index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/ReauthenticationContext.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import java.util.Objects; /** * Defines the context in which an {@link Authenticator} is to be created during * a re-authentication. */ public class ReauthenticationContext { private final NetworkReceive networkReceive; private final Authenticator previousAuthenticator; private final long reauthenticationBeginNanos; /** * Constructor * * @param previousAuthenticator * the mandatory {@link Authenticator} that was previously used to * authenticate the channel * @param networkReceive * the applicable {@link NetworkReceive} instance, if any. For the * client side this may be a response that has been partially read, a * non-null instance that has had no data read into it yet, or null; * if it is non-null then this is the instance that data should * initially be read into during re-authentication. For the server * side this is mandatory and it must contain the * {@code SaslHandshakeRequest} that has been received on the server * and that initiates re-authentication. * * @param nowNanos * the current time. The value is in nanoseconds as per * {@code System.nanoTime()} and is therefore only useful when * compared to such a value -- it's absolute value is meaningless. * This defines the moment when re-authentication begins. */ public ReauthenticationContext(Authenticator previousAuthenticator, NetworkReceive networkReceive, long nowNanos) { this.previousAuthenticator = Objects.requireNonNull(previousAuthenticator); this.networkReceive = networkReceive; this.reauthenticationBeginNanos = nowNanos; } /** * Return the applicable {@link NetworkReceive} instance, if any. For the client * side this may be a response that has been partially read, a non-null instance * that has had no data read into it yet, or null; if it is non-null then this * is the instance that data should initially be read into during * re-authentication. For the server side this is mandatory and it must contain * the {@code SaslHandshakeRequest} that has been received on the server and * that initiates re-authentication. * * @return the applicable {@link NetworkReceive} instance, if any */ public NetworkReceive networkReceive() { return networkReceive; } /** * Return the always non-null {@link Authenticator} that was previously used to * authenticate the channel * * @return the always non-null {@link Authenticator} that was previously used to * authenticate the channel */ public Authenticator previousAuthenticator() { return previousAuthenticator; } /** * Return the time when re-authentication began. The value is in nanoseconds as * per {@code System.nanoTime()} and is therefore only useful when compared to * such a value -- it's absolute value is meaningless. * * @return the time when re-authentication began */ public long reauthenticationBeginNanos() { return reauthenticationBeginNanos; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/Receive.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import java.io.Closeable; import java.io.IOException; import java.nio.channels.ScatteringByteChannel; /** * This interface models the in-progress reading of data from a channel to a source identified by an integer id */ public interface Receive extends Closeable { /** * The numeric id of the source from which we are receiving data. */ String source(); /** * Are we done receiving data? */ boolean complete(); /** * Read bytes into this receive from the given channel * @param channel The channel to read from * @return The number of bytes read * @throws IOException If the reading fails */ long readFrom(ScatteringByteChannel channel) throws IOException; /** * Do we know yet how much memory we require to fully read this */ boolean requiredMemoryAmountKnown(); /** * Has the underlying memory required to complete reading been allocated yet? */ boolean memoryAllocated(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/SaslChannelBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.config.SslConfigs; import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; import org.apache.kafka.common.memory.MemoryPool; import org.apache.kafka.common.requests.ApiVersionsResponse; import org.apache.kafka.common.security.JaasContext; import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; import org.apache.kafka.common.security.auth.Login; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.security.authenticator.CredentialCache; import org.apache.kafka.common.security.authenticator.DefaultLogin; import org.apache.kafka.common.security.authenticator.LoginManager; import org.apache.kafka.common.security.authenticator.SaslClientAuthenticator; import org.apache.kafka.common.security.authenticator.SaslClientCallbackHandler; import org.apache.kafka.common.security.authenticator.SaslServerAuthenticator; import org.apache.kafka.common.security.authenticator.SaslServerCallbackHandler; import org.apache.kafka.common.security.kerberos.KerberosClientCallbackHandler; import org.apache.kafka.common.security.kerberos.KerberosLogin; import org.apache.kafka.common.security.kerberos.KerberosName; import org.apache.kafka.common.security.kerberos.KerberosShortNamer; import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerRefreshingLogin; import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerSaslClientCallbackHandler; import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerUnsecuredValidatorCallbackHandler; import org.apache.kafka.common.security.plain.internals.PlainSaslServer; import org.apache.kafka.common.security.plain.internals.PlainServerCallbackHandler; import org.apache.kafka.common.security.scram.ScramCredential; import org.apache.kafka.common.security.scram.internals.ScramMechanism; import org.apache.kafka.common.security.scram.internals.ScramServerCallbackHandler; import org.apache.kafka.common.security.ssl.SslFactory; import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSCredential; import org.ietf.jgss.GSSException; import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; import org.ietf.jgss.Oid; import org.slf4j.Logger; import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosPrincipal; import java.io.IOException; import java.net.Socket; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Supplier; public class SaslChannelBuilder implements ChannelBuilder, ListenerReconfigurable { static final String GSS_NATIVE_PROP = "sun.security.jgss.native"; private final SecurityProtocol securityProtocol; private final ListenerName listenerName; private final boolean isInterBrokerListener; private final String clientSaslMechanism; private final Mode mode; private final Map<String, JaasContext> jaasContexts; private final boolean handshakeRequestEnable; private final CredentialCache credentialCache; private final DelegationTokenCache tokenCache; private final Map<String, LoginManager> loginManagers; private final Map<String, Subject> subjects; private final Supplier<ApiVersionsResponse> apiVersionSupplier; private SslFactory sslFactory; private Map<String, ?> configs; private final String sslClientAuthOverride; private KerberosShortNamer kerberosShortNamer; private Map<String, AuthenticateCallbackHandler> saslCallbackHandlers; private Map<String, Long> connectionsMaxReauthMsByMechanism; private final Time time; private final LogContext logContext; private final Logger log; public SaslChannelBuilder(Mode mode, Map<String, JaasContext> jaasContexts, SecurityProtocol securityProtocol, ListenerName listenerName, boolean isInterBrokerListener, String clientSaslMechanism, boolean handshakeRequestEnable, CredentialCache credentialCache, DelegationTokenCache tokenCache, String sslClientAuthOverride, Time time, LogContext logContext, Supplier<ApiVersionsResponse> apiVersionSupplier) { this.mode = mode; this.jaasContexts = jaasContexts; this.loginManagers = new HashMap<>(jaasContexts.size()); this.subjects = new HashMap<>(jaasContexts.size()); this.securityProtocol = securityProtocol; this.listenerName = listenerName; this.isInterBrokerListener = isInterBrokerListener; this.handshakeRequestEnable = handshakeRequestEnable; this.clientSaslMechanism = clientSaslMechanism; this.credentialCache = credentialCache; this.tokenCache = tokenCache; this.sslClientAuthOverride = sslClientAuthOverride; this.saslCallbackHandlers = new HashMap<>(); this.connectionsMaxReauthMsByMechanism = new HashMap<>(); this.time = time; this.logContext = logContext; this.log = logContext.logger(getClass()); this.apiVersionSupplier = apiVersionSupplier; if (mode == Mode.SERVER && apiVersionSupplier == null) { throw new IllegalArgumentException("Server channel builder must provide an ApiVersionResponse supplier"); } } @SuppressWarnings("unchecked") @Override public void configure(Map<String, ?> configs) throws KafkaException { try { this.configs = configs; if (mode == Mode.SERVER) { createServerCallbackHandlers(configs); createConnectionsMaxReauthMsMap(configs); } else createClientCallbackHandler(configs); for (Map.Entry<String, AuthenticateCallbackHandler> entry : saslCallbackHandlers.entrySet()) { String mechanism = entry.getKey(); entry.getValue().configure(configs, mechanism, jaasContexts.get(mechanism).configurationEntries()); } Class<? extends Login> defaultLoginClass = defaultLoginClass(); if (mode == Mode.SERVER && jaasContexts.containsKey(SaslConfigs.GSSAPI_MECHANISM)) { String defaultRealm; try { defaultRealm = defaultKerberosRealm(); } catch (Exception ke) { defaultRealm = ""; } List<String> principalToLocalRules = (List<String>) configs.get(BrokerSecurityConfigs.SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_CONFIG); if (principalToLocalRules != null) kerberosShortNamer = KerberosShortNamer.fromUnparsedRules(defaultRealm, principalToLocalRules); } for (Map.Entry<String, JaasContext> entry : jaasContexts.entrySet()) { String mechanism = entry.getKey(); // With static JAAS configuration, use KerberosLogin if Kerberos is enabled. With dynamic JAAS configuration, // use KerberosLogin only for the LoginContext corresponding to GSSAPI LoginManager loginManager = LoginManager.acquireLoginManager(entry.getValue(), mechanism, defaultLoginClass, configs); loginManagers.put(mechanism, loginManager); Subject subject = loginManager.subject(); subjects.put(mechanism, subject); if (mode == Mode.SERVER && mechanism.equals(SaslConfigs.GSSAPI_MECHANISM)) maybeAddNativeGssapiCredentials(subject); } if (this.securityProtocol == SecurityProtocol.SASL_SSL) { // Disable SSL client authentication as we are using SASL authentication this.sslFactory = new SslFactory(mode, sslClientAuthOverride, isInterBrokerListener); this.sslFactory.configure(configs); } } catch (Throwable e) { close(); throw new KafkaException(e); } } @Override public Set<String> reconfigurableConfigs() { return securityProtocol == SecurityProtocol.SASL_SSL ? SslConfigs.RECONFIGURABLE_CONFIGS : Collections.emptySet(); } @Override public void validateReconfiguration(Map<String, ?> configs) { if (this.securityProtocol == SecurityProtocol.SASL_SSL) sslFactory.validateReconfiguration(configs); } @Override public void reconfigure(Map<String, ?> configs) { if (this.securityProtocol == SecurityProtocol.SASL_SSL) sslFactory.reconfigure(configs); } @Override public ListenerName listenerName() { return listenerName; } @Override public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize, MemoryPool memoryPool, ChannelMetadataRegistry metadataRegistry) throws KafkaException { try { SocketChannel socketChannel = (SocketChannel) key.channel(); Socket socket = socketChannel.socket(); TransportLayer transportLayer = buildTransportLayer(id, key, socketChannel, metadataRegistry); Supplier<Authenticator> authenticatorCreator; if (mode == Mode.SERVER) { authenticatorCreator = () -> buildServerAuthenticator(configs, Collections.unmodifiableMap(saslCallbackHandlers), id, transportLayer, Collections.unmodifiableMap(subjects), Collections.unmodifiableMap(connectionsMaxReauthMsByMechanism), metadataRegistry); } else { LoginManager loginManager = loginManagers.get(clientSaslMechanism); authenticatorCreator = () -> buildClientAuthenticator(configs, saslCallbackHandlers.get(clientSaslMechanism), id, socket.getInetAddress().getHostName(), loginManager.serviceName(), transportLayer, subjects.get(clientSaslMechanism)); } return new KafkaChannel(id, transportLayer, authenticatorCreator, maxReceiveSize, memoryPool != null ? memoryPool : MemoryPool.NONE, metadataRegistry); } catch (Exception e) { throw new KafkaException(e); } } @Override public void close() { for (LoginManager loginManager : loginManagers.values()) loginManager.release(); loginManagers.clear(); for (AuthenticateCallbackHandler handler : saslCallbackHandlers.values()) handler.close(); if (sslFactory != null) sslFactory.close(); } // Visible to override for testing protected TransportLayer buildTransportLayer(String id, SelectionKey key, SocketChannel socketChannel, ChannelMetadataRegistry metadataRegistry) throws IOException { if (this.securityProtocol == SecurityProtocol.SASL_SSL) { return SslTransportLayer.create(id, key, sslFactory.createSslEngine(socketChannel.socket()), metadataRegistry); } else { return new PlaintextTransportLayer(key); } } // Visible to override for testing protected SaslServerAuthenticator buildServerAuthenticator(Map<String, ?> configs, Map<String, AuthenticateCallbackHandler> callbackHandlers, String id, TransportLayer transportLayer, Map<String, Subject> subjects, Map<String, Long> connectionsMaxReauthMsByMechanism, ChannelMetadataRegistry metadataRegistry) { return new SaslServerAuthenticator(configs, callbackHandlers, id, subjects, kerberosShortNamer, listenerName, securityProtocol, transportLayer, connectionsMaxReauthMsByMechanism, metadataRegistry, time, apiVersionSupplier); } // Visible to override for testing protected SaslClientAuthenticator buildClientAuthenticator(Map<String, ?> configs, AuthenticateCallbackHandler callbackHandler, String id, String serverHost, String servicePrincipal, TransportLayer transportLayer, Subject subject) { return new SaslClientAuthenticator(configs, callbackHandler, id, subject, servicePrincipal, serverHost, clientSaslMechanism, handshakeRequestEnable, transportLayer, time, logContext); } // Package private for testing Map<String, LoginManager> loginManagers() { return loginManagers; } private static String defaultKerberosRealm() { // see https://issues.apache.org/jira/browse/HADOOP-10848 for details return new KerberosPrincipal("tmp", 1).getRealm(); } private void createClientCallbackHandler(Map<String, ?> configs) { @SuppressWarnings("unchecked") Class<? extends AuthenticateCallbackHandler> clazz = (Class<? extends AuthenticateCallbackHandler>) configs.get(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS); if (clazz == null) clazz = clientCallbackHandlerClass(); AuthenticateCallbackHandler callbackHandler = Utils.newInstance(clazz); saslCallbackHandlers.put(clientSaslMechanism, callbackHandler); } private void createServerCallbackHandlers(Map<String, ?> configs) { for (String mechanism : jaasContexts.keySet()) { AuthenticateCallbackHandler callbackHandler; String prefix = ListenerName.saslMechanismPrefix(mechanism); @SuppressWarnings("unchecked") Class<? extends AuthenticateCallbackHandler> clazz = (Class<? extends AuthenticateCallbackHandler>) configs.get(prefix + BrokerSecurityConfigs.SASL_SERVER_CALLBACK_HANDLER_CLASS); if (clazz != null) callbackHandler = Utils.newInstance(clazz); else if (mechanism.equals(PlainSaslServer.PLAIN_MECHANISM)) callbackHandler = new PlainServerCallbackHandler(); else if (ScramMechanism.isScram(mechanism)) callbackHandler = new ScramServerCallbackHandler(credentialCache.cache(mechanism, ScramCredential.class), tokenCache); else if (mechanism.equals(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)) callbackHandler = new OAuthBearerUnsecuredValidatorCallbackHandler(); else callbackHandler = new SaslServerCallbackHandler(); saslCallbackHandlers.put(mechanism, callbackHandler); } } private void createConnectionsMaxReauthMsMap(Map<String, ?> configs) { for (String mechanism : jaasContexts.keySet()) { String prefix = ListenerName.saslMechanismPrefix(mechanism); Long connectionsMaxReauthMs = (Long) configs.get(prefix + BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS); if (connectionsMaxReauthMs == null) connectionsMaxReauthMs = (Long) configs.get(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS); if (connectionsMaxReauthMs != null) connectionsMaxReauthMsByMechanism.put(mechanism, connectionsMaxReauthMs); } } protected Class<? extends Login> defaultLoginClass() { if (jaasContexts.containsKey(SaslConfigs.GSSAPI_MECHANISM)) return KerberosLogin.class; if (OAuthBearerLoginModule.OAUTHBEARER_MECHANISM.equals(clientSaslMechanism)) return OAuthBearerRefreshingLogin.class; return DefaultLogin.class; } private Class<? extends AuthenticateCallbackHandler> clientCallbackHandlerClass() { switch (clientSaslMechanism) { case SaslConfigs.GSSAPI_MECHANISM: return KerberosClientCallbackHandler.class; case OAuthBearerLoginModule.OAUTHBEARER_MECHANISM: return OAuthBearerSaslClientCallbackHandler.class; default: return SaslClientCallbackHandler.class; } } // As described in http://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/jgss-features.html: // "To enable Java GSS to delegate to the native GSS library and its list of native mechanisms, // set the system property "sun.security.jgss.native" to true" // "In addition, when performing operations as a particular Subject, for example, Subject.doAs(...) // or Subject.doAsPrivileged(...), the to-be-used GSSCredential should be added to Subject's // private credential set. Otherwise, the GSS operations will fail since no credential is found." private void maybeAddNativeGssapiCredentials(Subject subject) { boolean usingNativeJgss = Boolean.getBoolean(GSS_NATIVE_PROP); if (usingNativeJgss && subject.getPrivateCredentials(GSSCredential.class).isEmpty()) { final String servicePrincipal = SaslClientAuthenticator.firstPrincipal(subject); KerberosName kerberosName; try { kerberosName = KerberosName.parse(servicePrincipal); } catch (IllegalArgumentException e) { throw new KafkaException("Principal has name with unexpected format " + servicePrincipal); } final String servicePrincipalName = kerberosName.serviceName(); final String serviceHostname = kerberosName.hostName(); try { GSSManager manager = gssManager(); // This Oid is used to represent the Kerberos version 5 GSS-API mechanism. It is defined in // RFC 1964. Oid krb5Mechanism = new Oid("1.2.840.113554.1.2.2"); GSSName gssName = manager.createName(servicePrincipalName + "@" + serviceHostname, GSSName.NT_HOSTBASED_SERVICE); GSSCredential cred = manager.createCredential(gssName, GSSContext.INDEFINITE_LIFETIME, krb5Mechanism, GSSCredential.ACCEPT_ONLY); subject.getPrivateCredentials().add(cred); log.info("Configured native GSSAPI private credentials for {}@{}", serviceHostname, serviceHostname); } catch (GSSException ex) { log.warn("Cannot add private credential to subject; clients authentication may fail", ex); } } } // Visibility to override for testing protected GSSManager gssManager() { return GSSManager.getInstance(); } // Visibility for testing protected Subject subject(String saslMechanism) { return subjects.get(saslMechanism); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/Selectable.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import java.io.IOException; import java.net.InetSocketAddress; import java.util.Collection; import java.util.List; import java.util.Map; /** * An interface for asynchronous, multi-channel network I/O */ public interface Selectable { /** * See {@link #connect(String, InetSocketAddress, int, int) connect()} */ int USE_DEFAULT_BUFFER_SIZE = -1; /** * Begin establishing a socket connection to the given address identified by the given address * @param id The id for this connection * @param address The address to connect to * @param sendBufferSize The send buffer for the socket * @param receiveBufferSize The receive buffer for the socket * @throws IOException If we cannot begin connecting */ void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize) throws IOException; /** * Wakeup this selector if it is blocked on I/O */ void wakeup(); /** * Close this selector */ void close(); /** * Close the connection identified by the given id */ void close(String id); /** * Queue the given request for sending in the subsequent {@link #poll(long) poll()} calls * @param send The request to send */ void send(NetworkSend send); /** * Do I/O. Reads, writes, connection establishment, etc. * @param timeout The amount of time to block if there is nothing to do * @throws IOException */ void poll(long timeout) throws IOException; /** * The list of sends that completed on the last {@link #poll(long) poll()} call. */ List<NetworkSend> completedSends(); /** * The collection of receives that completed on the last {@link #poll(long) poll()} call. */ Collection<NetworkReceive> completedReceives(); /** * The connections that finished disconnecting on the last {@link #poll(long) poll()} * call. Channel state indicates the local channel state at the time of disconnection. */ Map<String, ChannelState> disconnected(); /** * The list of connections that completed their connection on the last {@link #poll(long) poll()} * call. */ List<String> connected(); /** * Disable reads from the given connection * @param id The id for the connection */ void mute(String id); /** * Re-enable reads from the given connection * @param id The id for the connection */ void unmute(String id); /** * Disable reads from all connections */ void muteAll(); /** * Re-enable reads from all connections */ void unmuteAll(); /** * returns true if a channel is ready * @param id The id for the connection */ boolean isChannelReady(String id); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/Selector.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.memory.MemoryPool; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.internals.IntGaugeSuite; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.CumulativeSum; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.metrics.stats.Meter; import org.apache.kafka.common.metrics.stats.SampledStat; import org.apache.kafka.common.metrics.stats.WindowedCount; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.io.IOException; import java.net.InetSocketAddress; import java.net.Socket; import java.nio.channels.CancelledKeyException; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; import java.nio.channels.UnresolvedAddressException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; /** * A nioSelector interface for doing non-blocking multi-connection network I/O. * <p> * This class works with {@link NetworkSend} and {@link NetworkReceive} to transmit size-delimited network requests and * responses. * <p> * A connection can be added to the nioSelector associated with an integer id by doing * * <pre> * nioSelector.connect(&quot;42&quot;, new InetSocketAddress(&quot;google.com&quot;, server.port), 64000, 64000); * </pre> * * The connect call does not block on the creation of the TCP connection, so the connect method only begins initiating * the connection. The successful invocation of this method does not mean a valid connection has been established. * * Sending requests, receiving responses, processing connection completions, and disconnections on the existing * connections are all done using the <code>poll()</code> call. * * <pre> * nioSelector.send(new NetworkSend(myDestination, myBytes)); * nioSelector.send(new NetworkSend(myOtherDestination, myOtherBytes)); * nioSelector.poll(TIMEOUT_MS); * </pre> * * The nioSelector maintains several lists that are reset by each call to <code>poll()</code> which are available via * various getters. These are reset by each call to <code>poll()</code>. * * This class is not thread safe! */ public class Selector implements Selectable, AutoCloseable { public static final long NO_IDLE_TIMEOUT_MS = -1; public static final int NO_FAILED_AUTHENTICATION_DELAY = 0; private enum CloseMode { GRACEFUL(true), // process outstanding buffered receives, notify disconnect NOTIFY_ONLY(true), // discard any outstanding receives, notify disconnect DISCARD_NO_NOTIFY(false); // discard any outstanding receives, no disconnect notification boolean notifyDisconnect; CloseMode(boolean notifyDisconnect) { this.notifyDisconnect = notifyDisconnect; } } private final Logger log; private final java.nio.channels.Selector nioSelector; private final Map<String, KafkaChannel> channels; private final Set<KafkaChannel> explicitlyMutedChannels; private boolean outOfMemory; private final List<NetworkSend> completedSends; private final LinkedHashMap<String, NetworkReceive> completedReceives; private final Set<SelectionKey> immediatelyConnectedKeys; private final Map<String, KafkaChannel> closingChannels; private Set<SelectionKey> keysWithBufferedRead; private final Map<String, ChannelState> disconnected; private final List<String> connected; private final List<String> failedSends; private final Time time; private final SelectorMetrics sensors; private final ChannelBuilder channelBuilder; private final int maxReceiveSize; private final boolean recordTimePerConnection; private final IdleExpiryManager idleExpiryManager; private final LinkedHashMap<String, DelayedAuthenticationFailureClose> delayedClosingChannels; private final MemoryPool memoryPool; private final long lowMemThreshold; private final int failedAuthenticationDelayMs; //indicates if the previous call to poll was able to make progress in reading already-buffered data. //this is used to prevent tight loops when memory is not available to read any more data private boolean madeReadProgressLastPoll = true; /** * Create a new nioSelector * @param maxReceiveSize Max size in bytes of a single network receive (use {@link NetworkReceive#UNLIMITED} for no limit) * @param connectionMaxIdleMs Max idle connection time (use {@link #NO_IDLE_TIMEOUT_MS} to disable idle timeout) * @param failedAuthenticationDelayMs Minimum time by which failed authentication response and channel close should be delayed by. * Use {@link #NO_FAILED_AUTHENTICATION_DELAY} to disable this delay. * @param metrics Registry for Selector metrics * @param time Time implementation * @param metricGrpPrefix Prefix for the group of metrics registered by Selector * @param metricTags Additional tags to add to metrics registered by Selector * @param metricsPerConnection Whether or not to enable per-connection metrics * @param channelBuilder Channel builder for every new connection * @param logContext Context for logging with additional info */ public Selector(int maxReceiveSize, long connectionMaxIdleMs, int failedAuthenticationDelayMs, Metrics metrics, Time time, String metricGrpPrefix, Map<String, String> metricTags, boolean metricsPerConnection, boolean recordTimePerConnection, ChannelBuilder channelBuilder, MemoryPool memoryPool, LogContext logContext) { try { this.nioSelector = java.nio.channels.Selector.open(); } catch (IOException e) { throw new KafkaException(e); } this.maxReceiveSize = maxReceiveSize; this.time = time; this.channels = new HashMap<>(); this.explicitlyMutedChannels = new HashSet<>(); this.outOfMemory = false; this.completedSends = new ArrayList<>(); this.completedReceives = new LinkedHashMap<>(); this.immediatelyConnectedKeys = new HashSet<>(); this.closingChannels = new HashMap<>(); this.keysWithBufferedRead = new HashSet<>(); this.connected = new ArrayList<>(); this.disconnected = new HashMap<>(); this.failedSends = new ArrayList<>(); this.log = logContext.logger(Selector.class); this.sensors = new SelectorMetrics(metrics, metricGrpPrefix, metricTags, metricsPerConnection); this.channelBuilder = channelBuilder; this.recordTimePerConnection = recordTimePerConnection; this.idleExpiryManager = connectionMaxIdleMs < 0 ? null : new IdleExpiryManager(time, connectionMaxIdleMs); this.memoryPool = memoryPool; this.lowMemThreshold = (long) (0.1 * this.memoryPool.size()); this.failedAuthenticationDelayMs = failedAuthenticationDelayMs; this.delayedClosingChannels = (failedAuthenticationDelayMs > NO_FAILED_AUTHENTICATION_DELAY) ? new LinkedHashMap<String, DelayedAuthenticationFailureClose>() : null; } public Selector(int maxReceiveSize, long connectionMaxIdleMs, Metrics metrics, Time time, String metricGrpPrefix, Map<String, String> metricTags, boolean metricsPerConnection, boolean recordTimePerConnection, ChannelBuilder channelBuilder, MemoryPool memoryPool, LogContext logContext) { this(maxReceiveSize, connectionMaxIdleMs, NO_FAILED_AUTHENTICATION_DELAY, metrics, time, metricGrpPrefix, metricTags, metricsPerConnection, recordTimePerConnection, channelBuilder, memoryPool, logContext); } public Selector(int maxReceiveSize, long connectionMaxIdleMs, int failedAuthenticationDelayMs, Metrics metrics, Time time, String metricGrpPrefix, Map<String, String> metricTags, boolean metricsPerConnection, ChannelBuilder channelBuilder, LogContext logContext) { this(maxReceiveSize, connectionMaxIdleMs, failedAuthenticationDelayMs, metrics, time, metricGrpPrefix, metricTags, metricsPerConnection, false, channelBuilder, MemoryPool.NONE, logContext); } public Selector(int maxReceiveSize, long connectionMaxIdleMs, Metrics metrics, Time time, String metricGrpPrefix, Map<String, String> metricTags, boolean metricsPerConnection, ChannelBuilder channelBuilder, LogContext logContext) { this(maxReceiveSize, connectionMaxIdleMs, NO_FAILED_AUTHENTICATION_DELAY, metrics, time, metricGrpPrefix, metricTags, metricsPerConnection, channelBuilder, logContext); } public Selector(long connectionMaxIdleMS, Metrics metrics, Time time, String metricGrpPrefix, ChannelBuilder channelBuilder, LogContext logContext) { this(NetworkReceive.UNLIMITED, connectionMaxIdleMS, metrics, time, metricGrpPrefix, Collections.emptyMap(), true, channelBuilder, logContext); } public Selector(long connectionMaxIdleMS, int failedAuthenticationDelayMs, Metrics metrics, Time time, String metricGrpPrefix, ChannelBuilder channelBuilder, LogContext logContext) { this(NetworkReceive.UNLIMITED, connectionMaxIdleMS, failedAuthenticationDelayMs, metrics, time, metricGrpPrefix, Collections.<String, String>emptyMap(), true, channelBuilder, logContext); } /** * Begin connecting to the given address and add the connection to this nioSelector associated with the given id * number. * <p> * Note that this call only initiates the connection, which will be completed on a future {@link #poll(long)} * call. Check {@link #connected()} to see which (if any) connections have completed after a given poll call. * @param id The id for the new connection * @param address The address to connect to * @param sendBufferSize The send buffer for the new connection * @param receiveBufferSize The receive buffer for the new connection * @throws IllegalStateException if there is already a connection for that id * @throws IOException if DNS resolution fails on the hostname or if the broker is down */ @Override public void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize) throws IOException { ensureNotRegistered(id); SocketChannel socketChannel = SocketChannel.open(); SelectionKey key = null; try { configureSocketChannel(socketChannel, sendBufferSize, receiveBufferSize); boolean connected = doConnect(socketChannel, address); key = registerChannel(id, socketChannel, SelectionKey.OP_CONNECT); if (connected) { // OP_CONNECT won't trigger for immediately connected channels log.debug("Immediately connected to node {}", id); immediatelyConnectedKeys.add(key); key.interestOps(0); } } catch (IOException | RuntimeException e) { if (key != null) immediatelyConnectedKeys.remove(key); channels.remove(id); socketChannel.close(); throw e; } } // Visible to allow test cases to override. In particular, we use this to implement a blocking connect // in order to simulate "immediately connected" sockets. protected boolean doConnect(SocketChannel channel, InetSocketAddress address) throws IOException { try { return channel.connect(address); } catch (UnresolvedAddressException e) { throw new IOException("Can't resolve address: " + address, e); } } private void configureSocketChannel(SocketChannel socketChannel, int sendBufferSize, int receiveBufferSize) throws IOException { socketChannel.configureBlocking(false); Socket socket = socketChannel.socket(); socket.setKeepAlive(true); if (sendBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE) socket.setSendBufferSize(sendBufferSize); if (receiveBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE) socket.setReceiveBufferSize(receiveBufferSize); socket.setTcpNoDelay(true); } /** * Register the nioSelector with an existing channel * Use this on server-side, when a connection is accepted by a different thread but processed by the Selector * <p> * If a connection already exists with the same connection id in `channels` or `closingChannels`, * an exception is thrown. Connection ids must be chosen to avoid conflict when remote ports are reused. * Kafka brokers add an incrementing index to the connection id to avoid reuse in the timing window * where an existing connection may not yet have been closed by the broker when a new connection with * the same remote host:port is processed. * </p><p> * If a `KafkaChannel` cannot be created for this connection, the `socketChannel` is closed * and its selection key cancelled. * </p> */ public void register(String id, SocketChannel socketChannel) throws IOException { ensureNotRegistered(id); registerChannel(id, socketChannel, SelectionKey.OP_READ); this.sensors.connectionCreated.record(); // Default to empty client information as the ApiVersionsRequest is not // mandatory. In this case, we still want to account for the connection. ChannelMetadataRegistry metadataRegistry = this.channel(id).channelMetadataRegistry(); if (metadataRegistry.clientInformation() == null) metadataRegistry.registerClientInformation(ClientInformation.EMPTY); } private void ensureNotRegistered(String id) { if (this.channels.containsKey(id)) throw new IllegalStateException("There is already a connection for id " + id); if (this.closingChannels.containsKey(id)) throw new IllegalStateException("There is already a connection for id " + id + " that is still being closed"); } protected SelectionKey registerChannel(String id, SocketChannel socketChannel, int interestedOps) throws IOException { SelectionKey key = socketChannel.register(nioSelector, interestedOps); KafkaChannel channel = buildAndAttachKafkaChannel(socketChannel, id, key); this.channels.put(id, channel); if (idleExpiryManager != null) idleExpiryManager.update(channel.id(), time.nanoseconds()); return key; } private KafkaChannel buildAndAttachKafkaChannel(SocketChannel socketChannel, String id, SelectionKey key) throws IOException { try { KafkaChannel channel = channelBuilder.buildChannel(id, key, maxReceiveSize, memoryPool, new SelectorChannelMetadataRegistry()); key.attach(channel); return channel; } catch (Exception e) { try { socketChannel.close(); } finally { key.cancel(); } throw new IOException("Channel could not be created for socket " + socketChannel, e); } } /** * Interrupt the nioSelector if it is blocked waiting to do I/O. */ @Override public void wakeup() { this.nioSelector.wakeup(); } /** * Close this selector and all associated connections */ @Override public void close() { List<String> connections = new ArrayList<>(channels.keySet()); AtomicReference<Throwable> firstException = new AtomicReference<>(); Utils.closeAllQuietly(firstException, "release connections", connections.stream().map(id -> (AutoCloseable) () -> close(id)).toArray(AutoCloseable[]::new)); // If there is any exception thrown in close(id), we should still be able // to close the remaining objects, especially the sensors because keeping // the sensors may lead to failure to start up the ReplicaFetcherThread if // the old sensors with the same names has not yet been cleaned up. Utils.closeQuietly(nioSelector, "nioSelector", firstException); Utils.closeQuietly(sensors, "sensors", firstException); Utils.closeQuietly(channelBuilder, "channelBuilder", firstException); Throwable exception = firstException.get(); if (exception instanceof RuntimeException && !(exception instanceof SecurityException)) { throw (RuntimeException) exception; } } /** * Queue the given request for sending in the subsequent {@link #poll(long)} calls * @param send The request to send */ public void send(NetworkSend send) { String connectionId = send.destinationId(); KafkaChannel channel = openOrClosingChannelOrFail(connectionId); if (closingChannels.containsKey(connectionId)) { // ensure notification via `disconnected`, leave channel in the state in which closing was triggered this.failedSends.add(connectionId); } else { try { channel.setSend(send); } catch (Exception e) { // update the state for consistency, the channel will be discarded after `close` channel.state(ChannelState.FAILED_SEND); // ensure notification via `disconnected` when `failedSends` are processed in the next poll this.failedSends.add(connectionId); close(channel, CloseMode.DISCARD_NO_NOTIFY); if (!(e instanceof CancelledKeyException)) { log.error("Unexpected exception during send, closing connection {} and rethrowing exception {}", connectionId, e); throw e; } } } } /** * Do whatever I/O can be done on each connection without blocking. This includes completing connections, completing * disconnections, initiating new sends, or making progress on in-progress sends or receives. * * When this call is completed the user can check for completed sends, receives, connections or disconnects using * {@link #completedSends()}, {@link #completedReceives()}, {@link #connected()}, {@link #disconnected()}. These * lists will be cleared at the beginning of each `poll` call and repopulated by the call if there is * any completed I/O. * * In the "Plaintext" setting, we are using socketChannel to read & write to the network. But for the "SSL" setting, * we encrypt the data before we use socketChannel to write data to the network, and decrypt before we return the responses. * This requires additional buffers to be maintained as we are reading from network, since the data on the wire is encrypted * we won't be able to read exact no.of bytes as kafka protocol requires. We read as many bytes as we can, up to SSLEngine's * application buffer size. This means we might be reading additional bytes than the requested size. * If there is no further data to read from socketChannel selector won't invoke that channel and we have additional bytes * in the buffer. To overcome this issue we added "keysWithBufferedRead" map which tracks channels which have data in the SSL * buffers. If there are channels with buffered data that can by processed, we set "timeout" to 0 and process the data even * if there is no more data to read from the socket. * * At most one entry is added to "completedReceives" for a channel in each poll. This is necessary to guarantee that * requests from a channel are processed on the broker in the order they are sent. Since outstanding requests added * by SocketServer to the request queue may be processed by different request handler threads, requests on each * channel must be processed one-at-a-time to guarantee ordering. * * @param timeout The amount of time to wait, in milliseconds, which must be non-negative * @throws IllegalArgumentException If `timeout` is negative * @throws IllegalStateException If a send is given for which we have no existing connection or for which there is * already an in-progress send */ @Override public void poll(long timeout) throws IOException { if (timeout < 0) throw new IllegalArgumentException("timeout should be >= 0"); boolean madeReadProgressLastCall = madeReadProgressLastPoll; clear(); boolean dataInBuffers = !keysWithBufferedRead.isEmpty(); if (!immediatelyConnectedKeys.isEmpty() || (madeReadProgressLastCall && dataInBuffers)) timeout = 0; if (!memoryPool.isOutOfMemory() && outOfMemory) { //we have recovered from memory pressure. unmute any channel not explicitly muted for other reasons log.trace("Broker no longer low on memory - unmuting incoming sockets"); for (KafkaChannel channel : channels.values()) { if (channel.isInMutableState() && !explicitlyMutedChannels.contains(channel)) { channel.maybeUnmute(); } } outOfMemory = false; } /* check ready keys */ long startSelect = time.nanoseconds(); int numReadyKeys = select(timeout); long endSelect = time.nanoseconds(); this.sensors.selectTime.record(endSelect - startSelect, time.milliseconds(), false); if (numReadyKeys > 0 || !immediatelyConnectedKeys.isEmpty() || dataInBuffers) { Set<SelectionKey> readyKeys = this.nioSelector.selectedKeys(); // Poll from channels that have buffered data (but nothing more from the underlying socket) if (dataInBuffers) { keysWithBufferedRead.removeAll(readyKeys); //so no channel gets polled twice Set<SelectionKey> toPoll = keysWithBufferedRead; keysWithBufferedRead = new HashSet<>(); //poll() calls will repopulate if needed pollSelectionKeys(toPoll, false, endSelect); } // Poll from channels where the underlying socket has more data pollSelectionKeys(readyKeys, false, endSelect); // Clear all selected keys so that they are excluded from the ready count for the next select readyKeys.clear(); pollSelectionKeys(immediatelyConnectedKeys, true, endSelect); immediatelyConnectedKeys.clear(); } else { madeReadProgressLastPoll = true; //no work is also "progress" } long endIo = time.nanoseconds(); this.sensors.ioTime.record(endIo - endSelect, time.milliseconds(), false); // Close channels that were delayed and are now ready to be closed completeDelayedChannelClose(endIo); // we use the time at the end of select to ensure that we don't close any connections that // have just been processed in pollSelectionKeys maybeCloseOldestConnection(endSelect); } /** * handle any ready I/O on a set of selection keys * @param selectionKeys set of keys to handle * @param isImmediatelyConnected true if running over a set of keys for just-connected sockets * @param currentTimeNanos time at which set of keys was determined */ // package-private for testing void pollSelectionKeys(Set<SelectionKey> selectionKeys, boolean isImmediatelyConnected, long currentTimeNanos) { for (SelectionKey key : determineHandlingOrder(selectionKeys)) { KafkaChannel channel = channel(key); long channelStartTimeNanos = recordTimePerConnection ? time.nanoseconds() : 0; boolean sendFailed = false; String nodeId = channel.id(); // register all per-connection metrics at once sensors.maybeRegisterConnectionMetrics(nodeId); if (idleExpiryManager != null) idleExpiryManager.update(nodeId, currentTimeNanos); try { /* complete any connections that have finished their handshake (either normally or immediately) */ if (isImmediatelyConnected || key.isConnectable()) { if (channel.finishConnect()) { this.connected.add(nodeId); this.sensors.connectionCreated.record(); SocketChannel socketChannel = (SocketChannel) key.channel(); log.debug("Created socket with SO_RCVBUF = {}, SO_SNDBUF = {}, SO_TIMEOUT = {} to node {}", socketChannel.socket().getReceiveBufferSize(), socketChannel.socket().getSendBufferSize(), socketChannel.socket().getSoTimeout(), nodeId); } else { continue; } } /* if channel is not ready finish prepare */ if (channel.isConnected() && !channel.ready()) { channel.prepare(); if (channel.ready()) { long readyTimeMs = time.milliseconds(); boolean isReauthentication = channel.successfulAuthentications() > 1; if (isReauthentication) { sensors.successfulReauthentication.record(1.0, readyTimeMs); if (channel.reauthenticationLatencyMs() == null) log.warn( "Should never happen: re-authentication latency for a re-authenticated channel was null; continuing..."); else sensors.reauthenticationLatency .record(channel.reauthenticationLatencyMs().doubleValue(), readyTimeMs); } else { sensors.successfulAuthentication.record(1.0, readyTimeMs); if (!channel.connectedClientSupportsReauthentication()) sensors.successfulAuthenticationNoReauth.record(1.0, readyTimeMs); } log.debug("Successfully {}authenticated with {}", isReauthentication ? "re-" : "", channel.socketDescription()); } } if (channel.ready() && channel.state() == ChannelState.NOT_CONNECTED) channel.state(ChannelState.READY); Optional<NetworkReceive> responseReceivedDuringReauthentication = channel.pollResponseReceivedDuringReauthentication(); responseReceivedDuringReauthentication.ifPresent(receive -> { long currentTimeMs = time.milliseconds(); addToCompletedReceives(channel, receive, currentTimeMs); }); //if channel is ready and has bytes to read from socket or buffer, and has no //previous completed receive then read from it if (channel.ready() && (key.isReadable() || channel.hasBytesBuffered()) && !hasCompletedReceive(channel) && !explicitlyMutedChannels.contains(channel)) { attemptRead(channel); } if (channel.hasBytesBuffered() && !explicitlyMutedChannels.contains(channel)) { //this channel has bytes enqueued in intermediary buffers that we could not read //(possibly because no memory). it may be the case that the underlying socket will //not come up in the next poll() and so we need to remember this channel for the //next poll call otherwise data may be stuck in said buffers forever. If we attempt //to process buffered data and no progress is made, the channel buffered status is //cleared to avoid the overhead of checking every time. keysWithBufferedRead.add(key); } /* if channel is ready write to any sockets that have space in their buffer and for which we have data */ long nowNanos = channelStartTimeNanos != 0 ? channelStartTimeNanos : currentTimeNanos; try { attemptWrite(key, channel, nowNanos); } catch (Exception e) { sendFailed = true; throw e; } /* cancel any defunct sockets */ if (!key.isValid()) close(channel, CloseMode.GRACEFUL); } catch (Exception e) { String desc = String.format("%s (channelId=%s)", channel.socketDescription(), channel.id()); if (e instanceof IOException) { log.debug("Connection with {} disconnected", desc, e); } else if (e instanceof AuthenticationException) { boolean isReauthentication = channel.successfulAuthentications() > 0; if (isReauthentication) sensors.failedReauthentication.record(); else sensors.failedAuthentication.record(); String exceptionMessage = e.getMessage(); if (e instanceof DelayedResponseAuthenticationException) exceptionMessage = e.getCause().getMessage(); log.info("Failed {}authentication with {} ({})", isReauthentication ? "re-" : "", desc, exceptionMessage); } else { log.warn("Unexpected error from {}; closing connection", desc, e); } if (e instanceof DelayedResponseAuthenticationException) maybeDelayCloseOnAuthenticationFailure(channel); else close(channel, sendFailed ? CloseMode.NOTIFY_ONLY : CloseMode.GRACEFUL); } finally { maybeRecordTimePerConnection(channel, channelStartTimeNanos); } } } private void attemptWrite(SelectionKey key, KafkaChannel channel, long nowNanos) throws IOException { if (channel.hasSend() && channel.ready() && key.isWritable() && !channel.maybeBeginClientReauthentication(() -> nowNanos)) { write(channel); } } // package-private for testing void write(KafkaChannel channel) throws IOException { String nodeId = channel.id(); long bytesSent = channel.write(); NetworkSend send = channel.maybeCompleteSend(); // We may complete the send with bytesSent < 1 if `TransportLayer.hasPendingWrites` was true and `channel.write()` // caused the pending writes to be written to the socket channel buffer if (bytesSent > 0 || send != null) { long currentTimeMs = time.milliseconds(); if (bytesSent > 0) this.sensors.recordBytesSent(nodeId, bytesSent, currentTimeMs); if (send != null) { this.completedSends.add(send); this.sensors.recordCompletedSend(nodeId, send.size(), currentTimeMs); } } } private Collection<SelectionKey> determineHandlingOrder(Set<SelectionKey> selectionKeys) { //it is possible that the iteration order over selectionKeys is the same every invocation. //this may cause starvation of reads when memory is low. to address this we shuffle the keys if memory is low. if (!outOfMemory && memoryPool.availableMemory() < lowMemThreshold) { List<SelectionKey> shuffledKeys = new ArrayList<>(selectionKeys); Collections.shuffle(shuffledKeys); return shuffledKeys; } else { return selectionKeys; } } private void attemptRead(KafkaChannel channel) throws IOException { String nodeId = channel.id(); long bytesReceived = channel.read(); if (bytesReceived != 0) { long currentTimeMs = time.milliseconds(); sensors.recordBytesReceived(nodeId, bytesReceived, currentTimeMs); madeReadProgressLastPoll = true; NetworkReceive receive = channel.maybeCompleteReceive(); if (receive != null) { addToCompletedReceives(channel, receive, currentTimeMs); } } if (channel.isMuted()) { outOfMemory = true; //channel has muted itself due to memory pressure. } else { madeReadProgressLastPoll = true; } } private boolean maybeReadFromClosingChannel(KafkaChannel channel) { boolean hasPending; if (channel.state().state() != ChannelState.State.READY) hasPending = false; else if (explicitlyMutedChannels.contains(channel) || hasCompletedReceive(channel)) hasPending = true; else { try { attemptRead(channel); hasPending = hasCompletedReceive(channel); } catch (Exception e) { log.trace("Read from closing channel failed, ignoring exception", e); hasPending = false; } } return hasPending; } // Record time spent in pollSelectionKeys for channel (moved into a method to keep checkstyle happy) private void maybeRecordTimePerConnection(KafkaChannel channel, long startTimeNanos) { if (recordTimePerConnection) channel.addNetworkThreadTimeNanos(time.nanoseconds() - startTimeNanos); } @Override public List<NetworkSend> completedSends() { return this.completedSends; } @Override public Collection<NetworkReceive> completedReceives() { return this.completedReceives.values(); } @Override public Map<String, ChannelState> disconnected() { return this.disconnected; } @Override public List<String> connected() { return this.connected; } @Override public void mute(String id) { KafkaChannel channel = openOrClosingChannelOrFail(id); mute(channel); } private void mute(KafkaChannel channel) { channel.mute(); explicitlyMutedChannels.add(channel); keysWithBufferedRead.remove(channel.selectionKey()); } @Override public void unmute(String id) { KafkaChannel channel = openOrClosingChannelOrFail(id); unmute(channel); } private void unmute(KafkaChannel channel) { // Remove the channel from explicitlyMutedChannels only if the channel has been actually unmuted. if (channel.maybeUnmute()) { explicitlyMutedChannels.remove(channel); if (channel.hasBytesBuffered()) { keysWithBufferedRead.add(channel.selectionKey()); madeReadProgressLastPoll = true; } } } @Override public void muteAll() { for (KafkaChannel channel : this.channels.values()) mute(channel); } @Override public void unmuteAll() { for (KafkaChannel channel : this.channels.values()) unmute(channel); } // package-private for testing void completeDelayedChannelClose(long currentTimeNanos) { if (delayedClosingChannels == null) return; while (!delayedClosingChannels.isEmpty()) { DelayedAuthenticationFailureClose delayedClose = delayedClosingChannels.values().iterator().next(); if (!delayedClose.tryClose(currentTimeNanos)) break; } } private void maybeCloseOldestConnection(long currentTimeNanos) { if (idleExpiryManager == null) return; Map.Entry<String, Long> expiredConnection = idleExpiryManager.pollExpiredConnection(currentTimeNanos); if (expiredConnection != null) { String connectionId = expiredConnection.getKey(); KafkaChannel channel = this.channels.get(connectionId); if (channel != null) { if (log.isTraceEnabled()) log.trace("About to close the idle connection from {} due to being idle for {} millis", connectionId, (currentTimeNanos - expiredConnection.getValue()) / 1000 / 1000); channel.state(ChannelState.EXPIRED); close(channel, CloseMode.GRACEFUL); } } } /** * Clears completed receives. This is used by SocketServer to remove references to * receive buffers after processing completed receives, without waiting for the next * poll(). */ public void clearCompletedReceives() { this.completedReceives.clear(); } /** * Clears completed sends. This is used by SocketServer to remove references to * send buffers after processing completed sends, without waiting for the next * poll(). */ public void clearCompletedSends() { this.completedSends.clear(); } /** * Clears all the results from the previous poll. This is invoked by Selector at the start of * a poll() when all the results from the previous poll are expected to have been handled. * <p> * SocketServer uses {@link #clearCompletedSends()} and {@link #clearCompletedReceives()} to * clear `completedSends` and `completedReceives` as soon as they are processed to avoid * holding onto large request/response buffers from multiple connections longer than necessary. * Clients rely on Selector invoking {@link #clear()} at the start of each poll() since memory usage * is less critical and clearing once-per-poll provides the flexibility to process these results in * any order before the next poll. */ private void clear() { this.completedSends.clear(); this.completedReceives.clear(); this.connected.clear(); this.disconnected.clear(); // Remove closed channels after all their buffered receives have been processed or if a send was requested for (Iterator<Map.Entry<String, KafkaChannel>> it = closingChannels.entrySet().iterator(); it.hasNext(); ) { KafkaChannel channel = it.next().getValue(); boolean sendFailed = failedSends.remove(channel.id()); boolean hasPending = false; if (!sendFailed) hasPending = maybeReadFromClosingChannel(channel); if (!hasPending) { doClose(channel, true); it.remove(); } } for (String channel : this.failedSends) this.disconnected.put(channel, ChannelState.FAILED_SEND); this.failedSends.clear(); this.madeReadProgressLastPoll = false; } /** * Check for data, waiting up to the given timeout. * * @param timeoutMs Length of time to wait, in milliseconds, which must be non-negative * @return The number of keys ready */ private int select(long timeoutMs) throws IOException { if (timeoutMs < 0L) throw new IllegalArgumentException("timeout should be >= 0"); if (timeoutMs == 0L) return this.nioSelector.selectNow(); else return this.nioSelector.select(timeoutMs); } /** * Close the connection identified by the given id */ public void close(String id) { KafkaChannel channel = this.channels.get(id); if (channel != null) { // There is no disconnect notification for local close, but updating // channel state here anyway to avoid confusion. channel.state(ChannelState.LOCAL_CLOSE); close(channel, CloseMode.DISCARD_NO_NOTIFY); } else { KafkaChannel closingChannel = this.closingChannels.remove(id); // Close any closing channel, leave the channel in the state in which closing was triggered if (closingChannel != null) doClose(closingChannel, false); } } private void maybeDelayCloseOnAuthenticationFailure(KafkaChannel channel) { DelayedAuthenticationFailureClose delayedClose = new DelayedAuthenticationFailureClose(channel, failedAuthenticationDelayMs); if (delayedClosingChannels != null) delayedClosingChannels.put(channel.id(), delayedClose); else delayedClose.closeNow(); } private void handleCloseOnAuthenticationFailure(KafkaChannel channel) { try { channel.completeCloseOnAuthenticationFailure(); } catch (Exception e) { log.error("Exception handling close on authentication failure node {}", channel.id(), e); } finally { close(channel, CloseMode.GRACEFUL); } } /** * Begin closing this connection. * If 'closeMode' is `CloseMode.GRACEFUL`, the channel is disconnected here, but outstanding receives * are processed. The channel is closed when there are no outstanding receives or if a send is * requested. For other values of `closeMode`, outstanding receives are discarded and the channel * is closed immediately. * * The channel will be added to disconnect list when it is actually closed if `closeMode.notifyDisconnect` * is true. */ private void close(KafkaChannel channel, CloseMode closeMode) { channel.disconnect(); // Ensure that `connected` does not have closed channels. This could happen if `prepare` throws an exception // in the `poll` invocation when `finishConnect` succeeds connected.remove(channel.id()); // Keep track of closed channels with pending receives so that all received records // may be processed. For example, when producer with acks=0 sends some records and // closes its connections, a single poll() in the broker may receive records and // handle close(). When the remote end closes its connection, the channel is retained until // a send fails or all outstanding receives are processed. Mute state of disconnected channels // are tracked to ensure that requests are processed one-by-one by the broker to preserve ordering. if (closeMode == CloseMode.GRACEFUL && maybeReadFromClosingChannel(channel)) { closingChannels.put(channel.id(), channel); log.debug("Tracking closing connection {} to process outstanding requests", channel.id()); } else { doClose(channel, closeMode.notifyDisconnect); } this.channels.remove(channel.id()); if (delayedClosingChannels != null) delayedClosingChannels.remove(channel.id()); if (idleExpiryManager != null) idleExpiryManager.remove(channel.id()); } private void doClose(KafkaChannel channel, boolean notifyDisconnect) { SelectionKey key = channel.selectionKey(); try { immediatelyConnectedKeys.remove(key); keysWithBufferedRead.remove(key); channel.close(); } catch (IOException e) { log.error("Exception closing connection to node {}:", channel.id(), e); } finally { key.cancel(); key.attach(null); } this.sensors.connectionClosed.record(); this.explicitlyMutedChannels.remove(channel); if (notifyDisconnect) this.disconnected.put(channel.id(), channel.state()); } /** * check if channel is ready */ @Override public boolean isChannelReady(String id) { KafkaChannel channel = this.channels.get(id); return channel != null && channel.ready(); } private KafkaChannel openOrClosingChannelOrFail(String id) { KafkaChannel channel = this.channels.get(id); if (channel == null) channel = this.closingChannels.get(id); if (channel == null) throw new IllegalStateException("Attempt to retrieve channel for which there is no connection. Connection id " + id + " existing connections " + channels.keySet()); return channel; } /** * Return the selector channels. */ public List<KafkaChannel> channels() { return new ArrayList<>(channels.values()); } /** * Return the channel associated with this connection or `null` if there is no channel associated with the * connection. */ public KafkaChannel channel(String id) { return this.channels.get(id); } /** * Return the channel with the specified id if it was disconnected, but not yet closed * since there are outstanding messages to be processed. */ public KafkaChannel closingChannel(String id) { return closingChannels.get(id); } /** * Returns the lowest priority channel chosen using the following sequence: * 1) If one or more channels are in closing state, return any one of them * 2) If idle expiry manager is enabled, return the least recently updated channel * 3) Otherwise return any of the channels * * This method is used to close a channel to accommodate a new channel on the inter-broker listener * when broker-wide `max.connections` limit is enabled. */ public KafkaChannel lowestPriorityChannel() { KafkaChannel channel = null; if (!closingChannels.isEmpty()) { channel = closingChannels.values().iterator().next(); } else if (idleExpiryManager != null && !idleExpiryManager.lruConnections.isEmpty()) { String channelId = idleExpiryManager.lruConnections.keySet().iterator().next(); channel = channel(channelId); } else if (!channels.isEmpty()) { channel = channels.values().iterator().next(); } return channel; } /** * Get the channel associated with selectionKey */ private KafkaChannel channel(SelectionKey key) { return (KafkaChannel) key.attachment(); } /** * Check if given channel has a completed receive */ private boolean hasCompletedReceive(KafkaChannel channel) { return completedReceives.containsKey(channel.id()); } /** * adds a receive to completed receives */ private void addToCompletedReceives(KafkaChannel channel, NetworkReceive networkReceive, long currentTimeMs) { if (hasCompletedReceive(channel)) throw new IllegalStateException("Attempting to add second completed receive to channel " + channel.id()); this.completedReceives.put(channel.id(), networkReceive); sensors.recordCompletedReceive(channel.id(), networkReceive.size(), currentTimeMs); } // only for testing public Set<SelectionKey> keys() { return new HashSet<>(nioSelector.keys()); } class SelectorChannelMetadataRegistry implements ChannelMetadataRegistry { private CipherInformation cipherInformation; private ClientInformation clientInformation; @Override public void registerCipherInformation(final CipherInformation cipherInformation) { if (this.cipherInformation != null) { if (this.cipherInformation.equals(cipherInformation)) return; sensors.connectionsByCipher.decrement(this.cipherInformation); } this.cipherInformation = cipherInformation; sensors.connectionsByCipher.increment(cipherInformation); } @Override public CipherInformation cipherInformation() { return cipherInformation; } @Override public void registerClientInformation(final ClientInformation clientInformation) { if (this.clientInformation != null) { if (this.clientInformation.equals(clientInformation)) return; sensors.connectionsByClient.decrement(this.clientInformation); } this.clientInformation = clientInformation; sensors.connectionsByClient.increment(clientInformation); } @Override public ClientInformation clientInformation() { return clientInformation; } @Override public void close() { if (this.cipherInformation != null) { sensors.connectionsByCipher.decrement(this.cipherInformation); this.cipherInformation = null; } if (this.clientInformation != null) { sensors.connectionsByClient.decrement(this.clientInformation); this.clientInformation = null; } } } class SelectorMetrics implements AutoCloseable { private final Metrics metrics; private final Map<String, String> metricTags; private final boolean metricsPerConnection; private final String metricGrpName; private final String perConnectionMetricGrpName; public final Sensor connectionClosed; public final Sensor connectionCreated; public final Sensor successfulAuthentication; public final Sensor successfulReauthentication; public final Sensor successfulAuthenticationNoReauth; public final Sensor reauthenticationLatency; public final Sensor failedAuthentication; public final Sensor failedReauthentication; public final Sensor bytesTransferred; public final Sensor bytesSent; public final Sensor requestsSent; public final Sensor bytesReceived; public final Sensor responsesReceived; public final Sensor selectTime; public final Sensor ioTime; public final IntGaugeSuite<CipherInformation> connectionsByCipher; public final IntGaugeSuite<ClientInformation> connectionsByClient; /* Names of metrics that are not registered through sensors */ private final List<MetricName> topLevelMetricNames = new ArrayList<>(); private final List<Sensor> sensors = new ArrayList<>(); public SelectorMetrics(Metrics metrics, String metricGrpPrefix, Map<String, String> metricTags, boolean metricsPerConnection) { this.metrics = metrics; this.metricTags = metricTags; this.metricsPerConnection = metricsPerConnection; this.metricGrpName = metricGrpPrefix + "-metrics"; this.perConnectionMetricGrpName = metricGrpPrefix + "-node-metrics"; StringBuilder tagsSuffix = new StringBuilder(); for (Map.Entry<String, String> tag: metricTags.entrySet()) { tagsSuffix.append(tag.getKey()); tagsSuffix.append("-"); tagsSuffix.append(tag.getValue()); } this.connectionClosed = sensor("connections-closed:" + tagsSuffix); this.connectionClosed.add(createMeter(metrics, metricGrpName, metricTags, "connection-close", "connections closed")); this.connectionCreated = sensor("connections-created:" + tagsSuffix); this.connectionCreated.add(createMeter(metrics, metricGrpName, metricTags, "connection-creation", "new connections established")); this.successfulAuthentication = sensor("successful-authentication:" + tagsSuffix); this.successfulAuthentication.add(createMeter(metrics, metricGrpName, metricTags, "successful-authentication", "connections with successful authentication")); this.successfulReauthentication = sensor("successful-reauthentication:" + tagsSuffix); this.successfulReauthentication.add(createMeter(metrics, metricGrpName, metricTags, "successful-reauthentication", "successful re-authentication of connections")); this.successfulAuthenticationNoReauth = sensor("successful-authentication-no-reauth:" + tagsSuffix); MetricName successfulAuthenticationNoReauthMetricName = metrics.metricName( "successful-authentication-no-reauth-total", metricGrpName, "The total number of connections with successful authentication where the client does not support re-authentication", metricTags); this.successfulAuthenticationNoReauth.add(successfulAuthenticationNoReauthMetricName, new CumulativeSum()); this.failedAuthentication = sensor("failed-authentication:" + tagsSuffix); this.failedAuthentication.add(createMeter(metrics, metricGrpName, metricTags, "failed-authentication", "connections with failed authentication")); this.failedReauthentication = sensor("failed-reauthentication:" + tagsSuffix); this.failedReauthentication.add(createMeter(metrics, metricGrpName, metricTags, "failed-reauthentication", "failed re-authentication of connections")); this.reauthenticationLatency = sensor("reauthentication-latency:" + tagsSuffix); MetricName reauthenticationLatencyMaxMetricName = metrics.metricName("reauthentication-latency-max", metricGrpName, "The max latency observed due to re-authentication", metricTags); this.reauthenticationLatency.add(reauthenticationLatencyMaxMetricName, new Max()); MetricName reauthenticationLatencyAvgMetricName = metrics.metricName("reauthentication-latency-avg", metricGrpName, "The average latency observed due to re-authentication", metricTags); this.reauthenticationLatency.add(reauthenticationLatencyAvgMetricName, new Avg()); this.bytesTransferred = sensor("bytes-sent-received:" + tagsSuffix); bytesTransferred.add(createMeter(metrics, metricGrpName, metricTags, new WindowedCount(), "network-io", "network operations (reads or writes) on all connections")); this.bytesSent = sensor("bytes-sent:" + tagsSuffix, bytesTransferred); this.bytesSent.add(createMeter(metrics, metricGrpName, metricTags, "outgoing-byte", "outgoing bytes sent to all servers")); this.requestsSent = sensor("requests-sent:" + tagsSuffix); this.requestsSent.add(createMeter(metrics, metricGrpName, metricTags, new WindowedCount(), "request", "requests sent")); MetricName metricName = metrics.metricName("request-size-avg", metricGrpName, "The average size of requests sent.", metricTags); this.requestsSent.add(metricName, new Avg()); metricName = metrics.metricName("request-size-max", metricGrpName, "The maximum size of any request sent.", metricTags); this.requestsSent.add(metricName, new Max()); this.bytesReceived = sensor("bytes-received:" + tagsSuffix, bytesTransferred); this.bytesReceived.add(createMeter(metrics, metricGrpName, metricTags, "incoming-byte", "bytes read off all sockets")); this.responsesReceived = sensor("responses-received:" + tagsSuffix); this.responsesReceived.add(createMeter(metrics, metricGrpName, metricTags, new WindowedCount(), "response", "responses received")); this.selectTime = sensor("select-time:" + tagsSuffix); this.selectTime.add(createMeter(metrics, metricGrpName, metricTags, new WindowedCount(), "select", "times the I/O layer checked for new I/O to perform")); metricName = metrics.metricName("io-wait-time-ns-avg", metricGrpName, "The average length of time the I/O thread spent waiting for a socket ready for reads or writes in nanoseconds.", metricTags); this.selectTime.add(metricName, new Avg()); this.selectTime.add(createIOThreadRatioMeterLegacy(metrics, metricGrpName, metricTags, "io-wait", "waiting")); this.selectTime.add(createIOThreadRatioMeter(metrics, metricGrpName, metricTags, "io-wait", "waiting")); this.ioTime = sensor("io-time:" + tagsSuffix); metricName = metrics.metricName("io-time-ns-avg", metricGrpName, "The average length of time for I/O per select call in nanoseconds.", metricTags); this.ioTime.add(metricName, new Avg()); this.ioTime.add(createIOThreadRatioMeterLegacy(metrics, metricGrpName, metricTags, "io", "doing I/O")); this.ioTime.add(createIOThreadRatioMeter(metrics, metricGrpName, metricTags, "io", "doing I/O")); this.connectionsByCipher = new IntGaugeSuite<>(log, "sslCiphers", metrics, cipherInformation -> { Map<String, String> tags = new LinkedHashMap<>(); tags.put("cipher", cipherInformation.cipher()); tags.put("protocol", cipherInformation.protocol()); tags.putAll(metricTags); return metrics.metricName("connections", metricGrpName, "The number of connections with this SSL cipher and protocol.", tags); }, 100); this.connectionsByClient = new IntGaugeSuite<>(log, "clients", metrics, clientInformation -> { Map<String, String> tags = new LinkedHashMap<>(); tags.put("clientSoftwareName", clientInformation.softwareName()); tags.put("clientSoftwareVersion", clientInformation.softwareVersion()); tags.putAll(metricTags); return metrics.metricName("connections", metricGrpName, "The number of connections with this client and version.", tags); }, 100); metricName = metrics.metricName("connection-count", metricGrpName, "The current number of active connections.", metricTags); topLevelMetricNames.add(metricName); this.metrics.addMetric(metricName, (config, now) -> channels.size()); } private Meter createMeter(Metrics metrics, String groupName, Map<String, String> metricTags, SampledStat stat, String baseName, String descriptiveName) { MetricName rateMetricName = metrics.metricName(baseName + "-rate", groupName, String.format("The number of %s per second", descriptiveName), metricTags); MetricName totalMetricName = metrics.metricName(baseName + "-total", groupName, String.format("The total number of %s", descriptiveName), metricTags); if (stat == null) return new Meter(rateMetricName, totalMetricName); else return new Meter(stat, rateMetricName, totalMetricName); } private Meter createMeter(Metrics metrics, String groupName, Map<String, String> metricTags, String baseName, String descriptiveName) { return createMeter(metrics, groupName, metricTags, null, baseName, descriptiveName); } /** * This method generates `time-total` metrics but has a couple of deficiencies: no `-ns` suffix and no dash between basename * and `time-toal` suffix. * @deprecated use {{@link #createIOThreadRatioMeter(Metrics, String, Map, String, String)}} for new metrics instead */ @Deprecated private Meter createIOThreadRatioMeterLegacy(Metrics metrics, String groupName, Map<String, String> metricTags, String baseName, String action) { MetricName rateMetricName = metrics.metricName(baseName + "-ratio", groupName, String.format("*Deprecated* The fraction of time the I/O thread spent %s", action), metricTags); MetricName totalMetricName = metrics.metricName(baseName + "time-total", groupName, String.format("*Deprecated* The total time the I/O thread spent %s", action), metricTags); return new Meter(TimeUnit.NANOSECONDS, rateMetricName, totalMetricName); } private Meter createIOThreadRatioMeter(Metrics metrics, String groupName, Map<String, String> metricTags, String baseName, String action) { MetricName rateMetricName = metrics.metricName(baseName + "-ratio", groupName, String.format("The fraction of time the I/O thread spent %s", action), metricTags); MetricName totalMetricName = metrics.metricName(baseName + "-time-ns-total", groupName, String.format("The total time the I/O thread spent %s", action), metricTags); return new Meter(TimeUnit.NANOSECONDS, rateMetricName, totalMetricName); } private Sensor sensor(String name, Sensor... parents) { Sensor sensor = metrics.sensor(name, parents); sensors.add(sensor); return sensor; } public void maybeRegisterConnectionMetrics(String connectionId) { if (!connectionId.isEmpty() && metricsPerConnection) { // if one sensor of the metrics has been registered for the connection, // then all other sensors should have been registered; and vice versa String nodeRequestName = "node-" + connectionId + ".requests-sent"; Sensor nodeRequest = this.metrics.getSensor(nodeRequestName); if (nodeRequest == null) { Map<String, String> tags = new LinkedHashMap<>(metricTags); tags.put("node-id", "node-" + connectionId); nodeRequest = sensor(nodeRequestName); nodeRequest.add(createMeter(metrics, perConnectionMetricGrpName, tags, new WindowedCount(), "request", "requests sent")); MetricName metricName = metrics.metricName("request-size-avg", perConnectionMetricGrpName, "The average size of requests sent.", tags); nodeRequest.add(metricName, new Avg()); metricName = metrics.metricName("request-size-max", perConnectionMetricGrpName, "The maximum size of any request sent.", tags); nodeRequest.add(metricName, new Max()); String bytesSentName = "node-" + connectionId + ".bytes-sent"; Sensor bytesSent = sensor(bytesSentName); bytesSent.add(createMeter(metrics, perConnectionMetricGrpName, tags, "outgoing-byte", "outgoing bytes")); String nodeResponseName = "node-" + connectionId + ".responses-received"; Sensor nodeResponse = sensor(nodeResponseName); nodeResponse.add(createMeter(metrics, perConnectionMetricGrpName, tags, new WindowedCount(), "response", "responses received")); String bytesReceivedName = "node-" + connectionId + ".bytes-received"; Sensor bytesReceive = sensor(bytesReceivedName); bytesReceive.add(createMeter(metrics, perConnectionMetricGrpName, tags, "incoming-byte", "incoming bytes")); String nodeTimeName = "node-" + connectionId + ".latency"; Sensor nodeRequestTime = sensor(nodeTimeName); metricName = metrics.metricName("request-latency-avg", perConnectionMetricGrpName, tags); nodeRequestTime.add(metricName, new Avg()); metricName = metrics.metricName("request-latency-max", perConnectionMetricGrpName, tags); nodeRequestTime.add(metricName, new Max()); } } } public void recordBytesSent(String connectionId, long bytes, long currentTimeMs) { this.bytesSent.record(bytes, currentTimeMs, false); if (!connectionId.isEmpty()) { String bytesSentName = "node-" + connectionId + ".bytes-sent"; Sensor bytesSent = this.metrics.getSensor(bytesSentName); if (bytesSent != null) bytesSent.record(bytes, currentTimeMs); } } public void recordCompletedSend(String connectionId, long totalBytes, long currentTimeMs) { requestsSent.record(totalBytes, currentTimeMs, false); if (!connectionId.isEmpty()) { String nodeRequestName = "node-" + connectionId + ".requests-sent"; Sensor nodeRequest = this.metrics.getSensor(nodeRequestName); if (nodeRequest != null) nodeRequest.record(totalBytes, currentTimeMs); } } public void recordBytesReceived(String connectionId, long bytes, long currentTimeMs) { this.bytesReceived.record(bytes, currentTimeMs, false); if (!connectionId.isEmpty()) { String bytesReceivedName = "node-" + connectionId + ".bytes-received"; Sensor bytesReceived = this.metrics.getSensor(bytesReceivedName); if (bytesReceived != null) bytesReceived.record(bytes, currentTimeMs); } } public void recordCompletedReceive(String connectionId, long totalBytes, long currentTimeMs) { responsesReceived.record(totalBytes, currentTimeMs, false); if (!connectionId.isEmpty()) { String nodeRequestName = "node-" + connectionId + ".responses-received"; Sensor nodeRequest = this.metrics.getSensor(nodeRequestName); if (nodeRequest != null) nodeRequest.record(totalBytes, currentTimeMs); } } public void close() { for (MetricName metricName : topLevelMetricNames) metrics.removeMetric(metricName); for (Sensor sensor : sensors) metrics.removeSensor(sensor.name()); connectionsByCipher.close(); connectionsByClient.close(); } } /** * Encapsulate a channel that must be closed after a specific delay has elapsed due to authentication failure. */ private class DelayedAuthenticationFailureClose { private final KafkaChannel channel; private final long endTimeNanos; private boolean closed; /** * @param channel The channel whose close is being delayed * @param delayMs The amount of time by which the operation should be delayed */ public DelayedAuthenticationFailureClose(KafkaChannel channel, int delayMs) { this.channel = channel; this.endTimeNanos = time.nanoseconds() + (delayMs * 1000L * 1000L); this.closed = false; } /** * Try to close this channel if the delay has expired. * @param currentTimeNanos The current time * @return True if the delay has expired and the channel was closed; false otherwise */ public final boolean tryClose(long currentTimeNanos) { if (endTimeNanos <= currentTimeNanos) closeNow(); return closed; } /** * Close the channel now, regardless of whether the delay has expired or not. */ public final void closeNow() { if (closed) throw new IllegalStateException("Attempt to close a channel that has already been closed"); handleCloseOnAuthenticationFailure(channel); closed = true; } } // helper class for tracking least recently used connections to enable idle connection closing private static class IdleExpiryManager { private final Map<String, Long> lruConnections; private final long connectionsMaxIdleNanos; private long nextIdleCloseCheckTime; public IdleExpiryManager(Time time, long connectionsMaxIdleMs) { this.connectionsMaxIdleNanos = connectionsMaxIdleMs * 1000 * 1000; // initial capacity and load factor are default, we set them explicitly because we want to set accessOrder = true this.lruConnections = new LinkedHashMap<>(16, .75F, true); this.nextIdleCloseCheckTime = time.nanoseconds() + this.connectionsMaxIdleNanos; } public void update(String connectionId, long currentTimeNanos) { lruConnections.put(connectionId, currentTimeNanos); } public Map.Entry<String, Long> pollExpiredConnection(long currentTimeNanos) { if (currentTimeNanos <= nextIdleCloseCheckTime) return null; if (lruConnections.isEmpty()) { nextIdleCloseCheckTime = currentTimeNanos + connectionsMaxIdleNanos; return null; } Map.Entry<String, Long> oldestConnectionEntry = lruConnections.entrySet().iterator().next(); Long connectionLastActiveTime = oldestConnectionEntry.getValue(); nextIdleCloseCheckTime = connectionLastActiveTime + connectionsMaxIdleNanos; if (currentTimeNanos > nextIdleCloseCheckTime) return oldestConnectionEntry; else return null; } public void remove(String connectionId) { lruConnections.remove(connectionId); } } //package-private for testing boolean isOutOfMemory() { return outOfMemory; } //package-private for testing boolean isMadeReadProgressLastPoll() { return madeReadProgressLastPoll; } // package-private for testing Map<?, ?> delayedClosingChannels() { return delayedClosingChannels; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/Send.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import java.io.IOException; /** * This interface models the in-progress sending of data. */ public interface Send { /** * Is this send complete? */ boolean completed(); /** * Write some as-yet unwritten bytes from this send to the provided channel. It may take multiple calls for the send * to be completely written * @param channel The Channel to write to * @return The number of bytes written * @throws IOException If the write fails */ long writeTo(TransferableChannel channel) throws IOException; /** * Size of the send */ long size(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/SslChannelBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.config.SslConfigs; import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; import org.apache.kafka.common.memory.MemoryPool; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.security.auth.KafkaPrincipalBuilder; import org.apache.kafka.common.security.auth.KafkaPrincipalSerde; import org.apache.kafka.common.security.auth.SslAuthenticationContext; import org.apache.kafka.common.security.ssl.SslFactory; import org.apache.kafka.common.security.ssl.SslPrincipalMapper; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import java.io.Closeable; import java.io.IOException; import java.net.InetAddress; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.function.Supplier; public class SslChannelBuilder implements ChannelBuilder, ListenerReconfigurable { private final ListenerName listenerName; private final boolean isInterBrokerListener; private SslFactory sslFactory; private Mode mode; private Map<String, ?> configs; private SslPrincipalMapper sslPrincipalMapper; private final Logger log; /** * Constructs an SSL channel builder. ListenerName is provided only * for server channel builder and will be null for client channel builder. */ public SslChannelBuilder(Mode mode, ListenerName listenerName, boolean isInterBrokerListener, LogContext logContext) { this.mode = mode; this.listenerName = listenerName; this.isInterBrokerListener = isInterBrokerListener; this.log = logContext.logger(getClass()); } public void configure(Map<String, ?> configs) throws KafkaException { try { this.configs = configs; String sslPrincipalMappingRules = (String) configs.get(BrokerSecurityConfigs.SSL_PRINCIPAL_MAPPING_RULES_CONFIG); if (sslPrincipalMappingRules != null) sslPrincipalMapper = SslPrincipalMapper.fromRules(sslPrincipalMappingRules); this.sslFactory = new SslFactory(mode, null, isInterBrokerListener); this.sslFactory.configure(this.configs); } catch (KafkaException e) { throw e; } catch (Exception e) { throw new KafkaException(e); } } @Override public Set<String> reconfigurableConfigs() { return SslConfigs.RECONFIGURABLE_CONFIGS; } @Override public void validateReconfiguration(Map<String, ?> configs) { sslFactory.validateReconfiguration(configs); } @Override public void reconfigure(Map<String, ?> configs) { sslFactory.reconfigure(configs); } @Override public ListenerName listenerName() { return listenerName; } @Override public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize, MemoryPool memoryPool, ChannelMetadataRegistry metadataRegistry) throws KafkaException { try { SslTransportLayer transportLayer = buildTransportLayer(sslFactory, id, key, metadataRegistry); Supplier<Authenticator> authenticatorCreator = () -> new SslAuthenticator(configs, transportLayer, listenerName, sslPrincipalMapper); return new KafkaChannel(id, transportLayer, authenticatorCreator, maxReceiveSize, memoryPool != null ? memoryPool : MemoryPool.NONE, metadataRegistry); } catch (Exception e) { throw new KafkaException(e); } } @Override public void close() { if (sslFactory != null) sslFactory.close(); } protected SslTransportLayer buildTransportLayer(SslFactory sslFactory, String id, SelectionKey key, ChannelMetadataRegistry metadataRegistry) throws IOException { SocketChannel socketChannel = (SocketChannel) key.channel(); return SslTransportLayer.create(id, key, sslFactory.createSslEngine(socketChannel.socket()), metadataRegistry); } /** * Note that client SSL authentication is handled in {@link SslTransportLayer}. This class is only used * to transform the derived principal using a {@link KafkaPrincipalBuilder} configured by the user. */ private static class SslAuthenticator implements Authenticator { private final SslTransportLayer transportLayer; private final KafkaPrincipalBuilder principalBuilder; private final ListenerName listenerName; private SslAuthenticator(Map<String, ?> configs, SslTransportLayer transportLayer, ListenerName listenerName, SslPrincipalMapper sslPrincipalMapper) { this.transportLayer = transportLayer; this.principalBuilder = ChannelBuilders.createPrincipalBuilder(configs, null, sslPrincipalMapper); this.listenerName = listenerName; } /** * No-Op for plaintext authenticator */ @Override public void authenticate() {} /** * Constructs Principal using configured principalBuilder. * @return the built principal */ @Override public KafkaPrincipal principal() { InetAddress clientAddress = transportLayer.socketChannel().socket().getInetAddress(); // listenerName should only be null in Client mode where principal() should not be called if (listenerName == null) throw new IllegalStateException("Unexpected call to principal() when listenerName is null"); SslAuthenticationContext context = new SslAuthenticationContext( transportLayer.sslSession(), clientAddress, listenerName.value()); return principalBuilder.build(context); } @Override public Optional<KafkaPrincipalSerde> principalSerde() { return principalBuilder instanceof KafkaPrincipalSerde ? Optional.of((KafkaPrincipalSerde) principalBuilder) : Optional.empty(); } @Override public void close() throws IOException { if (principalBuilder instanceof Closeable) Utils.closeQuietly((Closeable) principalBuilder, "principal builder"); } /** * SslAuthenticator doesn't implement any additional authentication mechanism. * @return true */ @Override public boolean complete() { return true; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/SslTransportLayer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import java.io.IOException; import java.io.EOFException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.channels.SocketChannel; import java.nio.channels.SelectionKey; import java.nio.channels.CancelledKeyException; import java.security.Principal; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLEngineResult; import javax.net.ssl.SSLEngineResult.HandshakeStatus; import javax.net.ssl.SSLEngineResult.Status; import javax.net.ssl.SSLException; import javax.net.ssl.SSLHandshakeException; import javax.net.ssl.SSLKeyException; import javax.net.ssl.SSLPeerUnverifiedException; import javax.net.ssl.SSLProtocolException; import javax.net.ssl.SSLSession; import org.apache.kafka.common.errors.SslAuthenticationException; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.ByteBufferUnmapper; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; /* * Transport layer for SSL communication * * * TLS v1.3 notes: * https://tools.ietf.org/html/rfc8446#section-4.6 : Post-Handshake Messages * "TLS also allows other messages to be sent after the main handshake. * These messages use a handshake content type and are encrypted under * the appropriate application traffic key." */ public class SslTransportLayer implements TransportLayer { private enum State { // Initial state NOT_INITIALIZED, // SSLEngine is in handshake mode HANDSHAKE, // SSL handshake failed, connection will be terminated HANDSHAKE_FAILED, // SSLEngine has completed handshake, post-handshake messages may be pending for TLSv1.3 POST_HANDSHAKE, // SSLEngine has completed handshake, any post-handshake messages have been processed for TLSv1.3 // For TLSv1.3, we move the channel to READY state when incoming data is processed after handshake READY, // Channel is being closed CLOSING } private static final String TLS13 = "TLSv1.3"; private final String channelId; private final SSLEngine sslEngine; private final SelectionKey key; private final SocketChannel socketChannel; private final ChannelMetadataRegistry metadataRegistry; private final Logger log; private HandshakeStatus handshakeStatus; private SSLEngineResult handshakeResult; private State state; private SslAuthenticationException handshakeException; private ByteBuffer netReadBuffer; private ByteBuffer netWriteBuffer; private ByteBuffer appReadBuffer; private ByteBuffer fileChannelBuffer; private boolean hasBytesBuffered; public static SslTransportLayer create(String channelId, SelectionKey key, SSLEngine sslEngine, ChannelMetadataRegistry metadataRegistry) throws IOException { return new SslTransportLayer(channelId, key, sslEngine, metadataRegistry); } // Prefer `create`, only use this in tests SslTransportLayer(String channelId, SelectionKey key, SSLEngine sslEngine, ChannelMetadataRegistry metadataRegistry) { this.channelId = channelId; this.key = key; this.socketChannel = (SocketChannel) key.channel(); this.sslEngine = sslEngine; this.state = State.NOT_INITIALIZED; this.metadataRegistry = metadataRegistry; final LogContext logContext = new LogContext(String.format("[SslTransportLayer channelId=%s key=%s] ", channelId, key)); this.log = logContext.logger(getClass()); } // Visible for testing protected void startHandshake() throws IOException { if (state != State.NOT_INITIALIZED) throw new IllegalStateException("startHandshake() can only be called once, state " + state); this.netReadBuffer = ByteBuffer.allocate(netReadBufferSize()); this.netWriteBuffer = ByteBuffer.allocate(netWriteBufferSize()); this.appReadBuffer = ByteBuffer.allocate(applicationBufferSize()); netWriteBuffer.limit(0); netReadBuffer.limit(0); state = State.HANDSHAKE; //initiate handshake sslEngine.beginHandshake(); handshakeStatus = sslEngine.getHandshakeStatus(); } @Override public boolean ready() { return state == State.POST_HANDSHAKE || state == State.READY; } /** * does socketChannel.finishConnect() */ @Override public boolean finishConnect() throws IOException { boolean connected = socketChannel.finishConnect(); if (connected) key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ); return connected; } /** * disconnects selectionKey. */ @Override public void disconnect() { key.cancel(); } @Override public SocketChannel socketChannel() { return socketChannel; } @Override public SelectionKey selectionKey() { return key; } @Override public boolean isOpen() { return socketChannel.isOpen(); } @Override public boolean isConnected() { return socketChannel.isConnected(); } /** * Sends an SSL close message and closes socketChannel. */ @Override public void close() throws IOException { State prevState = state; if (state == State.CLOSING) return; state = State.CLOSING; sslEngine.closeOutbound(); try { if (prevState != State.NOT_INITIALIZED && isConnected()) { if (!flush(netWriteBuffer)) { throw new IOException("Remaining data in the network buffer, can't send SSL close message."); } //prep the buffer for the close message netWriteBuffer.clear(); //perform the close, since we called sslEngine.closeOutbound SSLEngineResult wrapResult = sslEngine.wrap(ByteUtils.EMPTY_BUF, netWriteBuffer); //we should be in a close state if (wrapResult.getStatus() != SSLEngineResult.Status.CLOSED) { throw new IOException("Unexpected status returned by SSLEngine.wrap, expected CLOSED, received " + wrapResult.getStatus() + ". Will not send close message to peer."); } netWriteBuffer.flip(); flush(netWriteBuffer); } } catch (IOException ie) { log.debug("Failed to send SSL Close message", ie); } finally { socketChannel.socket().close(); socketChannel.close(); netReadBuffer = null; netWriteBuffer = null; appReadBuffer = null; if (fileChannelBuffer != null) { ByteBufferUnmapper.unmap("fileChannelBuffer", fileChannelBuffer); fileChannelBuffer = null; } } } /** * returns true if there are any pending contents in netWriteBuffer */ @Override public boolean hasPendingWrites() { return netWriteBuffer.hasRemaining(); } /** * Reads available bytes from socket channel to `netReadBuffer`. * Visible for testing. * @return number of bytes read */ protected int readFromSocketChannel() throws IOException { return socketChannel.read(netReadBuffer); } /** * Flushes the buffer to the network, non blocking. * Visible for testing. * @param buf ByteBuffer * @return boolean true if the buffer has been emptied out, false otherwise * @throws IOException */ protected boolean flush(ByteBuffer buf) throws IOException { int remaining = buf.remaining(); if (remaining > 0) { int written = socketChannel.write(buf); return written >= remaining; } return true; } /** * Performs SSL handshake, non blocking. * Before application data (kafka protocols) can be sent client & kafka broker must * perform ssl handshake. * During the handshake SSLEngine generates encrypted data that will be transported over socketChannel. * Each SSLEngine operation generates SSLEngineResult , of which SSLEngineResult.handshakeStatus field is used to * determine what operation needs to occur to move handshake along. * A typical handshake might look like this. * +-------------+----------------------------------+-------------+ * | client | SSL/TLS message | HSStatus | * +-------------+----------------------------------+-------------+ * | wrap() | ClientHello | NEED_UNWRAP | * | unwrap() | ServerHello/Cert/ServerHelloDone | NEED_WRAP | * | wrap() | ClientKeyExchange | NEED_WRAP | * | wrap() | ChangeCipherSpec | NEED_WRAP | * | wrap() | Finished | NEED_UNWRAP | * | unwrap() | ChangeCipherSpec | NEED_UNWRAP | * | unwrap() | Finished | FINISHED | * +-------------+----------------------------------+-------------+ * * @throws IOException if read/write fails * @throws SslAuthenticationException if handshake fails with an {@link SSLException} */ @Override public void handshake() throws IOException { if (state == State.NOT_INITIALIZED) { try { startHandshake(); } catch (SSLException e) { maybeProcessHandshakeFailure(e, false, null); } } if (ready()) throw renegotiationException(); if (state == State.CLOSING) throw closingException(); int read = 0; boolean readable = key.isReadable(); try { // Read any available bytes before attempting any writes to ensure that handshake failures // reported by the peer are processed even if writes fail (since peer closes connection // if handshake fails) if (readable) read = readFromSocketChannel(); doHandshake(); if (ready()) updateBytesBuffered(true); } catch (SSLException e) { maybeProcessHandshakeFailure(e, true, null); } catch (IOException e) { maybeThrowSslAuthenticationException(); // This exception could be due to a write. If there is data available to unwrap in the buffer, or data available // in the socket channel to read and unwrap, process the data so that any SSL handshake exceptions are reported. try { do { log.trace("Process any available bytes from peer, netReadBuffer {} netWriterBuffer {} handshakeStatus {} readable? {}", netReadBuffer, netWriteBuffer, handshakeStatus, readable); handshakeWrapAfterFailure(false); handshakeUnwrap(false, true); } while (readable && readFromSocketChannel() > 0); } catch (SSLException e1) { maybeProcessHandshakeFailure(e1, false, e); } // If we get here, this is not a handshake failure, throw the original IOException throw e; } // Read from socket failed, so throw any pending handshake exception or EOF exception. if (read == -1) { maybeThrowSslAuthenticationException(); throw new EOFException("EOF during handshake, handshake status is " + handshakeStatus); } } @SuppressWarnings("fallthrough") private void doHandshake() throws IOException { boolean read = key.isReadable(); boolean write = key.isWritable(); handshakeStatus = sslEngine.getHandshakeStatus(); if (!flush(netWriteBuffer)) { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); return; } // Throw any pending handshake exception since `netWriteBuffer` has been flushed maybeThrowSslAuthenticationException(); switch (handshakeStatus) { case NEED_TASK: log.trace("SSLHandshake NEED_TASK channelId {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); handshakeStatus = runDelegatedTasks(); break; case NEED_WRAP: log.trace("SSLHandshake NEED_WRAP channelId {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); handshakeResult = handshakeWrap(write); if (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentNetWriteBufferSize = netWriteBufferSize(); netWriteBuffer.compact(); netWriteBuffer = Utils.ensureCapacity(netWriteBuffer, currentNetWriteBufferSize); netWriteBuffer.flip(); if (netWriteBuffer.limit() >= currentNetWriteBufferSize) { throw new IllegalStateException("Buffer overflow when available data size (" + netWriteBuffer.limit() + ") >= network buffer size (" + currentNetWriteBufferSize + ")"); } } else if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { throw new IllegalStateException("Should not have received BUFFER_UNDERFLOW during handshake WRAP."); } else if (handshakeResult.getStatus() == Status.CLOSED) { throw new EOFException(); } log.trace("SSLHandshake NEED_WRAP channelId {}, handshakeResult {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", channelId, handshakeResult, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); //if handshake status is not NEED_UNWRAP or unable to flush netWriteBuffer contents //we will break here otherwise we can do need_unwrap in the same call. if (handshakeStatus != HandshakeStatus.NEED_UNWRAP || !flush(netWriteBuffer)) { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); break; } case NEED_UNWRAP: log.trace("SSLHandshake NEED_UNWRAP channelId {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); do { handshakeResult = handshakeUnwrap(read, false); if (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentAppBufferSize = applicationBufferSize(); appReadBuffer = Utils.ensureCapacity(appReadBuffer, currentAppBufferSize); if (appReadBuffer.position() > currentAppBufferSize) { throw new IllegalStateException("Buffer underflow when available data size (" + appReadBuffer.position() + ") > packet buffer size (" + currentAppBufferSize + ")"); } } } while (handshakeResult.getStatus() == Status.BUFFER_OVERFLOW); if (handshakeResult.getStatus() == Status.BUFFER_UNDERFLOW) { int currentNetReadBufferSize = netReadBufferSize(); netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentNetReadBufferSize); if (netReadBuffer.position() >= currentNetReadBufferSize) { throw new IllegalStateException("Buffer underflow when there is available data"); } } else if (handshakeResult.getStatus() == Status.CLOSED) { throw new EOFException("SSL handshake status CLOSED during handshake UNWRAP"); } log.trace("SSLHandshake NEED_UNWRAP channelId {}, handshakeResult {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {}", channelId, handshakeResult, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); //if handshakeStatus completed than fall-through to finished status. //after handshake is finished there is no data left to read/write in socketChannel. //so the selector won't invoke this channel if we don't go through the handshakeFinished here. if (handshakeStatus != HandshakeStatus.FINISHED) { if (handshakeStatus == HandshakeStatus.NEED_WRAP) { key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); } else if (handshakeStatus == HandshakeStatus.NEED_UNWRAP) { key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); } break; } case FINISHED: handshakeFinished(); break; case NOT_HANDSHAKING: handshakeFinished(); break; default: throw new IllegalStateException(String.format("Unexpected status [%s]", handshakeStatus)); } } private SSLHandshakeException renegotiationException() { return new SSLHandshakeException("Renegotiation is not supported"); } private IllegalStateException closingException() { throw new IllegalStateException("Channel is in closing state"); } /** * Executes the SSLEngine tasks needed. * @return HandshakeStatus */ private HandshakeStatus runDelegatedTasks() { for (;;) { Runnable task = delegatedTask(); if (task == null) { break; } task.run(); } return sslEngine.getHandshakeStatus(); } /** * Checks if the handshake status is finished * Sets the interestOps for the selectionKey. */ private void handshakeFinished() throws IOException { // SSLEngine.getHandshakeStatus is transient and it doesn't record FINISHED status properly. // It can move from FINISHED status to NOT_HANDSHAKING after the handshake is completed. // Hence we also need to check handshakeResult.getHandshakeStatus() if the handshake finished or not if (handshakeResult.getHandshakeStatus() == HandshakeStatus.FINISHED) { //we are complete if we have delivered the last packet //remove OP_WRITE if we are complete, otherwise we still have data to write if (netWriteBuffer.hasRemaining()) key.interestOps(key.interestOps() | SelectionKey.OP_WRITE); else { SSLSession session = sslEngine.getSession(); state = session.getProtocol().equals(TLS13) ? State.POST_HANDSHAKE : State.READY; key.interestOps(key.interestOps() & ~SelectionKey.OP_WRITE); log.debug("SSL handshake completed successfully with peerHost '{}' peerPort {} peerPrincipal '{}' protocol '{}' cipherSuite '{}'", session.getPeerHost(), session.getPeerPort(), peerPrincipal(), session.getProtocol(), session.getCipherSuite()); metadataRegistry.registerCipherInformation( new CipherInformation(session.getCipherSuite(), session.getProtocol())); } log.trace("SSLHandshake FINISHED channelId {}, appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {} ", channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position()); } else { throw new IOException("NOT_HANDSHAKING during handshake"); } } /** * Performs the WRAP function * @param doWrite boolean * @return SSLEngineResult * @throws IOException */ private SSLEngineResult handshakeWrap(boolean doWrite) throws IOException { log.trace("SSLHandshake handshakeWrap {}", channelId); if (netWriteBuffer.hasRemaining()) throw new IllegalStateException("handshakeWrap called with netWriteBuffer not empty"); //this should never be called with a network buffer that contains data //so we can clear it here. netWriteBuffer.clear(); SSLEngineResult result; try { result = sslEngine.wrap(ByteUtils.EMPTY_BUF, netWriteBuffer); } finally { //prepare the results to be written netWriteBuffer.flip(); } handshakeStatus = result.getHandshakeStatus(); if (result.getStatus() == SSLEngineResult.Status.OK && result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) { handshakeStatus = runDelegatedTasks(); } if (doWrite) flush(netWriteBuffer); return result; } /** * Perform handshake unwrap * @param doRead boolean If true, read more from the socket channel * @param ignoreHandshakeStatus If true, continue to unwrap if data available regardless of handshake status * @return SSLEngineResult * @throws IOException */ private SSLEngineResult handshakeUnwrap(boolean doRead, boolean ignoreHandshakeStatus) throws IOException { log.trace("SSLHandshake handshakeUnwrap {}", channelId); SSLEngineResult result; int read = 0; if (doRead) read = readFromSocketChannel(); boolean cont; do { //prepare the buffer with the incoming data int position = netReadBuffer.position(); netReadBuffer.flip(); result = sslEngine.unwrap(netReadBuffer, appReadBuffer); netReadBuffer.compact(); handshakeStatus = result.getHandshakeStatus(); if (result.getStatus() == SSLEngineResult.Status.OK && result.getHandshakeStatus() == HandshakeStatus.NEED_TASK) { handshakeStatus = runDelegatedTasks(); } cont = (result.getStatus() == SSLEngineResult.Status.OK && handshakeStatus == HandshakeStatus.NEED_UNWRAP) || (ignoreHandshakeStatus && netReadBuffer.position() != position); log.trace("SSLHandshake handshakeUnwrap: handshakeStatus {} status {}", handshakeStatus, result.getStatus()); } while (netReadBuffer.position() != 0 && cont); // Throw EOF exception for failed read after processing already received data // so that handshake failures are reported correctly if (read == -1) throw new EOFException("EOF during handshake, handshake status is " + handshakeStatus); return result; } /** * Reads a sequence of bytes from this channel into the given buffer. Reads as much as possible * until either the dst buffer is full or there is no more data in the socket. * * @param dst The buffer into which bytes are to be transferred * @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream * and no more data is available * @throws IOException if some other I/O error occurs */ @Override public int read(ByteBuffer dst) throws IOException { if (state == State.CLOSING) return -1; else if (!ready()) return 0; //if we have unread decrypted data in appReadBuffer read that into dst buffer. int read = 0; if (appReadBuffer.position() > 0) { read = readFromAppBuffer(dst); } boolean readFromNetwork = false; boolean isClosed = false; // Each loop reads at most once from the socket. while (dst.remaining() > 0) { int netread = 0; netReadBuffer = Utils.ensureCapacity(netReadBuffer, netReadBufferSize()); if (netReadBuffer.remaining() > 0) { netread = readFromSocketChannel(); if (netread > 0) readFromNetwork = true; } while (netReadBuffer.position() > 0) { netReadBuffer.flip(); SSLEngineResult unwrapResult; try { unwrapResult = sslEngine.unwrap(netReadBuffer, appReadBuffer); if (state == State.POST_HANDSHAKE && appReadBuffer.position() != 0) { // For TLSv1.3, we have finished processing post-handshake messages since we are now processing data state = State.READY; } } catch (SSLException e) { // For TLSv1.3, handle SSL exceptions while processing post-handshake messages as authentication exceptions if (state == State.POST_HANDSHAKE) { state = State.HANDSHAKE_FAILED; throw new SslAuthenticationException("Failed to process post-handshake messages", e); } else throw e; } netReadBuffer.compact(); // reject renegotiation if TLS < 1.3, key updates for TLS 1.3 are allowed if (unwrapResult.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING && unwrapResult.getHandshakeStatus() != HandshakeStatus.FINISHED && unwrapResult.getStatus() == Status.OK && !sslEngine.getSession().getProtocol().equals(TLS13)) { log.error("Renegotiation requested, but it is not supported, channelId {}, " + "appReadBuffer pos {}, netReadBuffer pos {}, netWriteBuffer pos {} handshakeStatus {}", channelId, appReadBuffer.position(), netReadBuffer.position(), netWriteBuffer.position(), unwrapResult.getHandshakeStatus()); throw renegotiationException(); } if (unwrapResult.getStatus() == Status.OK) { read += readFromAppBuffer(dst); } else if (unwrapResult.getStatus() == Status.BUFFER_OVERFLOW) { int currentApplicationBufferSize = applicationBufferSize(); appReadBuffer = Utils.ensureCapacity(appReadBuffer, currentApplicationBufferSize); if (appReadBuffer.position() >= currentApplicationBufferSize) { throw new IllegalStateException("Buffer overflow when available data size (" + appReadBuffer.position() + ") >= application buffer size (" + currentApplicationBufferSize + ")"); } // appReadBuffer will extended upto currentApplicationBufferSize // we need to read the existing content into dst before we can do unwrap again. If there are no space in dst // we can break here. if (dst.hasRemaining()) read += readFromAppBuffer(dst); else break; } else if (unwrapResult.getStatus() == Status.BUFFER_UNDERFLOW) { int currentNetReadBufferSize = netReadBufferSize(); netReadBuffer = Utils.ensureCapacity(netReadBuffer, currentNetReadBufferSize); if (netReadBuffer.position() >= currentNetReadBufferSize) { throw new IllegalStateException("Buffer underflow when available data size (" + netReadBuffer.position() + ") > packet buffer size (" + currentNetReadBufferSize + ")"); } break; } else if (unwrapResult.getStatus() == Status.CLOSED) { // If data has been read and unwrapped, return the data. Close will be handled on the next poll. if (appReadBuffer.position() == 0 && read == 0) throw new EOFException(); else { isClosed = true; break; } } } if (read == 0 && netread < 0) throw new EOFException("EOF during read"); if (netread <= 0 || isClosed) break; } updateBytesBuffered(readFromNetwork || read > 0); // If data has been read and unwrapped, return the data even if end-of-stream, channel will be closed // on a subsequent poll. return read; } /** * Reads a sequence of bytes from this channel into the given buffers. * * @param dsts - The buffers into which bytes are to be transferred. * @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. * @throws IOException if some other I/O error occurs */ @Override public long read(ByteBuffer[] dsts) throws IOException { return read(dsts, 0, dsts.length); } /** * Reads a sequence of bytes from this channel into a subsequence of the given buffers. * @param dsts - The buffers into which bytes are to be transferred * @param offset - The offset within the buffer array of the first buffer into which bytes are to be transferred; must be non-negative and no larger than dsts.length. * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than dsts.length - offset * @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream. * @throws IOException if some other I/O error occurs */ @Override public long read(ByteBuffer[] dsts, int offset, int length) throws IOException { if ((offset < 0) || (length < 0) || (offset > dsts.length - length)) throw new IndexOutOfBoundsException(); int totalRead = 0; int i = offset; while (i < length) { if (dsts[i].hasRemaining()) { int read = read(dsts[i]); if (read > 0) totalRead += read; else break; } if (!dsts[i].hasRemaining()) { i++; } } return totalRead; } /** * Writes a sequence of bytes to this channel from the given buffer. * * @param src The buffer from which bytes are to be retrieved * @return The number of bytes read from src, possibly zero, or -1 if the channel has reached end-of-stream * @throws IOException If some other I/O error occurs */ @Override public int write(ByteBuffer src) throws IOException { if (state == State.CLOSING) throw closingException(); if (!ready()) return 0; int written = 0; while (flush(netWriteBuffer) && src.hasRemaining()) { netWriteBuffer.clear(); SSLEngineResult wrapResult = sslEngine.wrap(src, netWriteBuffer); netWriteBuffer.flip(); // reject renegotiation if TLS < 1.3, key updates for TLS 1.3 are allowed if (wrapResult.getHandshakeStatus() != HandshakeStatus.NOT_HANDSHAKING && wrapResult.getStatus() == Status.OK && !sslEngine.getSession().getProtocol().equals(TLS13)) { throw renegotiationException(); } if (wrapResult.getStatus() == Status.OK) { written += wrapResult.bytesConsumed(); } else if (wrapResult.getStatus() == Status.BUFFER_OVERFLOW) { // BUFFER_OVERFLOW means that the last `wrap` call had no effect, so we expand the buffer and try again netWriteBuffer = Utils.ensureCapacity(netWriteBuffer, netWriteBufferSize()); netWriteBuffer.position(netWriteBuffer.limit()); } else if (wrapResult.getStatus() == Status.BUFFER_UNDERFLOW) { throw new IllegalStateException("SSL BUFFER_UNDERFLOW during write"); } else if (wrapResult.getStatus() == Status.CLOSED) { throw new EOFException(); } } return written; } /** * Writes a sequence of bytes to this channel from the subsequence of the given buffers. * * @param srcs The buffers from which bytes are to be retrieved * @param offset The offset within the buffer array of the first buffer from which bytes are to be retrieved; must be non-negative and no larger than srcs.length. * @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than srcs.length - offset. * @return returns no.of bytes written , possibly zero. * @throws IOException If some other I/O error occurs */ @Override public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { if ((offset < 0) || (length < 0) || (offset > srcs.length - length)) throw new IndexOutOfBoundsException(); int totalWritten = 0; int i = offset; while (i < length) { if (srcs[i].hasRemaining() || hasPendingWrites()) { int written = write(srcs[i]); if (written > 0) { totalWritten += written; } } if (!srcs[i].hasRemaining() && !hasPendingWrites()) { i++; } else { // if we are unable to write the current buffer to socketChannel we should break, // as we might have reached max socket send buffer size. break; } } return totalWritten; } /** * Writes a sequence of bytes to this channel from the given buffers. * * @param srcs The buffers from which bytes are to be retrieved * @return returns no.of bytes consumed by SSLEngine.wrap , possibly zero. * @throws IOException If some other I/O error occurs */ @Override public long write(ByteBuffer[] srcs) throws IOException { return write(srcs, 0, srcs.length); } /** * SSLSession's peerPrincipal for the remote host. * @return Principal */ public Principal peerPrincipal() { try { return sslEngine.getSession().getPeerPrincipal(); } catch (SSLPeerUnverifiedException se) { log.debug("SSL peer is not authenticated, returning ANONYMOUS instead"); return KafkaPrincipal.ANONYMOUS; } } /** * returns an SSL Session after the handshake is established * throws IllegalStateException if the handshake is not established */ public SSLSession sslSession() throws IllegalStateException { return sslEngine.getSession(); } /** * Adds interestOps to SelectionKey of the TransportLayer * @param ops SelectionKey interestOps */ @Override public void addInterestOps(int ops) { if (!key.isValid()) throw new CancelledKeyException(); else if (!ready()) throw new IllegalStateException("handshake is not completed"); key.interestOps(key.interestOps() | ops); } /** * removes interestOps to SelectionKey of the TransportLayer * @param ops SelectionKey interestOps */ @Override public void removeInterestOps(int ops) { if (!key.isValid()) throw new CancelledKeyException(); else if (!ready()) throw new IllegalStateException("handshake is not completed"); key.interestOps(key.interestOps() & ~ops); } /** * returns delegatedTask for the SSLEngine. */ protected Runnable delegatedTask() { return sslEngine.getDelegatedTask(); } /** * transfers appReadBuffer contents (decrypted data) into dst bytebuffer * @param dst ByteBuffer */ private int readFromAppBuffer(ByteBuffer dst) { appReadBuffer.flip(); int remaining = Math.min(appReadBuffer.remaining(), dst.remaining()); if (remaining > 0) { int limit = appReadBuffer.limit(); appReadBuffer.limit(appReadBuffer.position() + remaining); dst.put(appReadBuffer); appReadBuffer.limit(limit); } appReadBuffer.compact(); return remaining; } protected int netReadBufferSize() { return sslEngine.getSession().getPacketBufferSize(); } protected int netWriteBufferSize() { return sslEngine.getSession().getPacketBufferSize(); } protected int applicationBufferSize() { return sslEngine.getSession().getApplicationBufferSize(); } protected ByteBuffer netReadBuffer() { return netReadBuffer; } // Visibility for testing protected ByteBuffer appReadBuffer() { return appReadBuffer; } /** * SSL exceptions are propagated as authentication failures so that clients can avoid * retries and report the failure. If `flush` is true, exceptions are propagated after * any pending outgoing bytes are flushed to ensure that the peer is notified of the failure. */ private void handshakeFailure(SSLException sslException, boolean flush) throws IOException { //Release all resources such as internal buffers that SSLEngine is managing log.debug("SSL Handshake failed", sslException); sslEngine.closeOutbound(); try { sslEngine.closeInbound(); } catch (SSLException e) { log.debug("SSLEngine.closeInBound() raised an exception.", e); } state = State.HANDSHAKE_FAILED; handshakeException = new SslAuthenticationException("SSL handshake failed", sslException); // Attempt to flush any outgoing bytes. If flush doesn't complete, delay exception handling until outgoing bytes // are flushed. If write fails because remote end has closed the channel, log the I/O exception and continue to // handle the handshake failure as an authentication exception. if (!flush || handshakeWrapAfterFailure(flush)) throw handshakeException; else log.debug("Delay propagation of handshake exception till {} bytes remaining are flushed", netWriteBuffer.remaining()); } // SSL handshake failures are typically thrown as SSLHandshakeException, SSLProtocolException, // SSLPeerUnverifiedException or SSLKeyException if the cause is known. These exceptions indicate // authentication failures (e.g. configuration errors) which should not be retried. But the SSL engine // may also throw exceptions using the base class SSLException in a few cases: // a) If there are no matching ciphers or TLS version or the private key is invalid, client will be // unable to process the server message and an SSLException is thrown: // javax.net.ssl.SSLException: Unrecognized SSL message, plaintext connection? // b) If server closes the connection gracefully during handshake, client may receive close_notify // and and an SSLException is thrown: // javax.net.ssl.SSLException: Received close_notify during handshake // We want to handle a) as a non-retriable SslAuthenticationException and b) as a retriable IOException. // To do this we need to rely on the exception string. Since it is safer to throw a retriable exception // when we are not sure, we will treat only the first exception string as a handshake exception. private void maybeProcessHandshakeFailure(SSLException sslException, boolean flush, IOException ioException) throws IOException { if (sslException instanceof SSLHandshakeException || sslException instanceof SSLProtocolException || sslException instanceof SSLPeerUnverifiedException || sslException instanceof SSLKeyException || sslException.getMessage().contains("Unrecognized SSL message") || sslException.getMessage().contains("Received fatal alert: ")) handshakeFailure(sslException, flush); else if (ioException == null) throw sslException; else { log.debug("SSLException while unwrapping data after IOException, original IOException will be propagated", sslException); throw ioException; } } // If handshake has already failed, throw the authentication exception. private void maybeThrowSslAuthenticationException() { if (handshakeException != null) throw handshakeException; } /** * Perform handshake wrap after an SSLException or any IOException. * * If `doWrite=false`, we are processing IOException after peer has disconnected, so we * cannot send any more data. We perform any pending wraps so that we can unwrap any * peer data that is already available. * * If `doWrite=true`, we are processing SSLException, we perform wrap and flush * any data to notify the peer of the handshake failure. * * Returns true if no more wrap is required and any data is flushed or discarded. */ private boolean handshakeWrapAfterFailure(boolean doWrite) { try { log.trace("handshakeWrapAfterFailure status {} doWrite {}", handshakeStatus, doWrite); while (handshakeStatus == HandshakeStatus.NEED_WRAP && (!doWrite || flush(netWriteBuffer))) { if (!doWrite) clearWriteBuffer(); handshakeWrap(doWrite); } } catch (Exception e) { log.debug("Failed to wrap and flush all bytes before closing channel", e); clearWriteBuffer(); } if (!doWrite) clearWriteBuffer(); return !netWriteBuffer.hasRemaining(); } private void clearWriteBuffer() { if (netWriteBuffer.hasRemaining()) log.debug("Discarding write buffer {} since peer has disconnected", netWriteBuffer); netWriteBuffer.position(0); netWriteBuffer.limit(0); } @Override public boolean isMute() { return key.isValid() && (key.interestOps() & SelectionKey.OP_READ) == 0; } @Override public boolean hasBytesBuffered() { return hasBytesBuffered; } // Update `hasBytesBuffered` status. If any bytes were read from the network or // if data was returned from read, `hasBytesBuffered` is set to true if any buffered // data is still remaining. If not, `hasBytesBuffered` is set to false since no progress // can be made until more data is available to read from the network. private void updateBytesBuffered(boolean madeProgress) { if (madeProgress) hasBytesBuffered = netReadBuffer.position() != 0 || appReadBuffer.position() != 0; else hasBytesBuffered = false; } @Override public long transferFrom(FileChannel fileChannel, long position, long count) throws IOException { if (state == State.CLOSING) throw closingException(); if (state != State.READY) return 0; if (!flush(netWriteBuffer)) return 0; long channelSize = fileChannel.size(); if (position > channelSize) return 0; int totalBytesToWrite = (int) Math.min(Math.min(count, channelSize - position), Integer.MAX_VALUE); if (fileChannelBuffer == null) { // Pick a size that allows for reasonably efficient disk reads, keeps the memory overhead per connection // manageable and can typically be drained in a single `write` call. The `netWriteBuffer` is typically 16k // and the socket send buffer is 100k by default, so 32k is a good number given the mentioned trade-offs. int transferSize = 32768; // Allocate a direct buffer to avoid one heap to heap buffer copy. SSLEngine copies the source // buffer (fileChannelBuffer) to the destination buffer (netWriteBuffer) and then encrypts in-place. // FileChannel.read() to a heap buffer requires a copy from a direct buffer to a heap buffer, which is not // useful here. fileChannelBuffer = ByteBuffer.allocateDirect(transferSize); // The loop below drains any remaining bytes from the buffer before reading from disk, so we ensure there // are no remaining bytes in the empty buffer fileChannelBuffer.position(fileChannelBuffer.limit()); } int totalBytesWritten = 0; long pos = position; try { while (totalBytesWritten < totalBytesToWrite) { if (!fileChannelBuffer.hasRemaining()) { fileChannelBuffer.clear(); int bytesRemaining = totalBytesToWrite - totalBytesWritten; if (bytesRemaining < fileChannelBuffer.limit()) fileChannelBuffer.limit(bytesRemaining); int bytesRead = fileChannel.read(fileChannelBuffer, pos); if (bytesRead <= 0) break; fileChannelBuffer.flip(); } int networkBytesWritten = write(fileChannelBuffer); totalBytesWritten += networkBytesWritten; // In the case of a partial write we only return the written bytes to the caller. As a result, the // `position` passed in the next `transferFrom` call won't include the bytes remaining in // `fileChannelBuffer`. By draining `fileChannelBuffer` first, we ensure we update `pos` before // we invoke `fileChannel.read`. if (fileChannelBuffer.hasRemaining()) break; pos += networkBytesWritten; } return totalBytesWritten; } catch (IOException e) { if (totalBytesWritten > 0) return totalBytesWritten; throw e; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/TransferableChannel.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; import java.io.IOException; import java.nio.channels.FileChannel; import java.nio.channels.GatheringByteChannel; /** * Extends GatheringByteChannel with the minimal set of methods required by the Send interface. Supporting TLS and * efficient zero copy transfers are the main reasons for the additional methods. * * @see SslTransportLayer */ public interface TransferableChannel extends GatheringByteChannel { /** * @return true if there are any pending writes. false if the implementation directly write all data to output. */ boolean hasPendingWrites(); /** * Transfers bytes from `fileChannel` to this `TransferableChannel`. * * This method will delegate to {@link FileChannel#transferTo(long, long, java.nio.channels.WritableByteChannel)}, * but it will unwrap the destination channel, if possible, in order to benefit from zero copy. This is required * because the fast path of `transferTo` is only executed if the destination buffer inherits from an internal JDK * class. * * @param fileChannel The source channel * @param position The position within the file at which the transfer is to begin; must be non-negative * @param count The maximum number of bytes to be transferred; must be non-negative * @return The number of bytes, possibly zero, that were actually transferred * @see FileChannel#transferTo(long, long, java.nio.channels.WritableByteChannel) */ long transferFrom(FileChannel fileChannel, long position, long count) throws IOException; }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/TransportLayer.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.network; /* * Transport layer for underlying communication. * At very basic level it is wrapper around SocketChannel and can be used as substitute for SocketChannel * and other network Channel implementations. * As NetworkClient replaces BlockingChannel and other implementations we will be using KafkaChannel as * a network I/O channel. */ import org.apache.kafka.common.errors.AuthenticationException; import java.io.IOException; import java.nio.channels.ScatteringByteChannel; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; import java.security.Principal; public interface TransportLayer extends ScatteringByteChannel, TransferableChannel { /** * Returns true if the channel has handshake and authentication done. */ boolean ready(); /** * Finishes the process of connecting a socket channel. */ boolean finishConnect() throws IOException; /** * disconnect socketChannel */ void disconnect(); /** * Tells whether or not this channel's network socket is connected. */ boolean isConnected(); /** * returns underlying socketChannel */ SocketChannel socketChannel(); /** * Get the underlying selection key */ SelectionKey selectionKey(); /** * This a no-op for the non-secure PLAINTEXT implementation. For SSL, this performs * SSL handshake. The SSL handshake includes client authentication if configured using * {@link org.apache.kafka.common.config.internals.BrokerSecurityConfigs#SSL_CLIENT_AUTH_CONFIG}. * @throws AuthenticationException if handshake fails due to an {@link javax.net.ssl.SSLException}. * @throws IOException if read or write fails with an I/O error. */ void handshake() throws AuthenticationException, IOException; /** * Returns `SSLSession.getPeerPrincipal()` if this is an SslTransportLayer and there is an authenticated peer, * `KafkaPrincipal.ANONYMOUS` is returned otherwise. */ Principal peerPrincipal() throws IOException; void addInterestOps(int ops); void removeInterestOps(int ops); boolean isMute(); /** * @return true if channel has bytes to be read in any intermediate buffers * which may be processed without reading additional data from the network. */ boolean hasBytesBuffered(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides the network API used by the Kafka clients * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong> */ package org.apache.kafka.common.network;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/ApiKeys.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import org.apache.kafka.common.message.ApiMessageType; import org.apache.kafka.common.message.ApiVersionsResponseData; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.record.RecordBatch; import java.util.ArrayList; import java.util.Arrays; import java.util.EnumMap; import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.stream.Collectors; import static org.apache.kafka.common.protocol.types.Type.BYTES; import static org.apache.kafka.common.protocol.types.Type.COMPACT_BYTES; import static org.apache.kafka.common.protocol.types.Type.COMPACT_NULLABLE_BYTES; import static org.apache.kafka.common.protocol.types.Type.NULLABLE_BYTES; import static org.apache.kafka.common.protocol.types.Type.RECORDS; /** * Identifiers for all the Kafka APIs */ public enum ApiKeys { PRODUCE(ApiMessageType.PRODUCE), FETCH(ApiMessageType.FETCH), LIST_OFFSETS(ApiMessageType.LIST_OFFSETS), METADATA(ApiMessageType.METADATA), LEADER_AND_ISR(ApiMessageType.LEADER_AND_ISR, true), STOP_REPLICA(ApiMessageType.STOP_REPLICA, true), UPDATE_METADATA(ApiMessageType.UPDATE_METADATA, true), CONTROLLED_SHUTDOWN(ApiMessageType.CONTROLLED_SHUTDOWN, true), OFFSET_COMMIT(ApiMessageType.OFFSET_COMMIT), OFFSET_FETCH(ApiMessageType.OFFSET_FETCH), FIND_COORDINATOR(ApiMessageType.FIND_COORDINATOR), JOIN_GROUP(ApiMessageType.JOIN_GROUP), HEARTBEAT(ApiMessageType.HEARTBEAT), LEAVE_GROUP(ApiMessageType.LEAVE_GROUP), SYNC_GROUP(ApiMessageType.SYNC_GROUP), DESCRIBE_GROUPS(ApiMessageType.DESCRIBE_GROUPS), LIST_GROUPS(ApiMessageType.LIST_GROUPS), SASL_HANDSHAKE(ApiMessageType.SASL_HANDSHAKE), API_VERSIONS(ApiMessageType.API_VERSIONS), CREATE_TOPICS(ApiMessageType.CREATE_TOPICS, false, true), DELETE_TOPICS(ApiMessageType.DELETE_TOPICS, false, true), DELETE_RECORDS(ApiMessageType.DELETE_RECORDS), INIT_PRODUCER_ID(ApiMessageType.INIT_PRODUCER_ID), OFFSET_FOR_LEADER_EPOCH(ApiMessageType.OFFSET_FOR_LEADER_EPOCH), ADD_PARTITIONS_TO_TXN(ApiMessageType.ADD_PARTITIONS_TO_TXN, false, RecordBatch.MAGIC_VALUE_V2, false), ADD_OFFSETS_TO_TXN(ApiMessageType.ADD_OFFSETS_TO_TXN, false, RecordBatch.MAGIC_VALUE_V2, false), END_TXN(ApiMessageType.END_TXN, false, RecordBatch.MAGIC_VALUE_V2, false), WRITE_TXN_MARKERS(ApiMessageType.WRITE_TXN_MARKERS, true, RecordBatch.MAGIC_VALUE_V2, false), TXN_OFFSET_COMMIT(ApiMessageType.TXN_OFFSET_COMMIT, false, RecordBatch.MAGIC_VALUE_V2, false), DESCRIBE_ACLS(ApiMessageType.DESCRIBE_ACLS), CREATE_ACLS(ApiMessageType.CREATE_ACLS, false, true), DELETE_ACLS(ApiMessageType.DELETE_ACLS, false, true), DESCRIBE_CONFIGS(ApiMessageType.DESCRIBE_CONFIGS), ALTER_CONFIGS(ApiMessageType.ALTER_CONFIGS, false, true), ALTER_REPLICA_LOG_DIRS(ApiMessageType.ALTER_REPLICA_LOG_DIRS), DESCRIBE_LOG_DIRS(ApiMessageType.DESCRIBE_LOG_DIRS), SASL_AUTHENTICATE(ApiMessageType.SASL_AUTHENTICATE), CREATE_PARTITIONS(ApiMessageType.CREATE_PARTITIONS, false, true), CREATE_DELEGATION_TOKEN(ApiMessageType.CREATE_DELEGATION_TOKEN, false, true), RENEW_DELEGATION_TOKEN(ApiMessageType.RENEW_DELEGATION_TOKEN, false, true), EXPIRE_DELEGATION_TOKEN(ApiMessageType.EXPIRE_DELEGATION_TOKEN, false, true), DESCRIBE_DELEGATION_TOKEN(ApiMessageType.DESCRIBE_DELEGATION_TOKEN), DELETE_GROUPS(ApiMessageType.DELETE_GROUPS), ELECT_LEADERS(ApiMessageType.ELECT_LEADERS, false, true), INCREMENTAL_ALTER_CONFIGS(ApiMessageType.INCREMENTAL_ALTER_CONFIGS, false, true), ALTER_PARTITION_REASSIGNMENTS(ApiMessageType.ALTER_PARTITION_REASSIGNMENTS, false, true), LIST_PARTITION_REASSIGNMENTS(ApiMessageType.LIST_PARTITION_REASSIGNMENTS, false, true), OFFSET_DELETE(ApiMessageType.OFFSET_DELETE), DESCRIBE_CLIENT_QUOTAS(ApiMessageType.DESCRIBE_CLIENT_QUOTAS), ALTER_CLIENT_QUOTAS(ApiMessageType.ALTER_CLIENT_QUOTAS, false, true), DESCRIBE_USER_SCRAM_CREDENTIALS(ApiMessageType.DESCRIBE_USER_SCRAM_CREDENTIALS), ALTER_USER_SCRAM_CREDENTIALS(ApiMessageType.ALTER_USER_SCRAM_CREDENTIALS, false, true), VOTE(ApiMessageType.VOTE, true, RecordBatch.MAGIC_VALUE_V0, false), BEGIN_QUORUM_EPOCH(ApiMessageType.BEGIN_QUORUM_EPOCH, true, RecordBatch.MAGIC_VALUE_V0, false), END_QUORUM_EPOCH(ApiMessageType.END_QUORUM_EPOCH, true, RecordBatch.MAGIC_VALUE_V0, false), DESCRIBE_QUORUM(ApiMessageType.DESCRIBE_QUORUM, true, RecordBatch.MAGIC_VALUE_V0, true), ALTER_PARTITION(ApiMessageType.ALTER_PARTITION, true), UPDATE_FEATURES(ApiMessageType.UPDATE_FEATURES, true, true), ENVELOPE(ApiMessageType.ENVELOPE, true, RecordBatch.MAGIC_VALUE_V0, false), FETCH_SNAPSHOT(ApiMessageType.FETCH_SNAPSHOT, false, RecordBatch.MAGIC_VALUE_V0, false), DESCRIBE_CLUSTER(ApiMessageType.DESCRIBE_CLUSTER), DESCRIBE_PRODUCERS(ApiMessageType.DESCRIBE_PRODUCERS), BROKER_REGISTRATION(ApiMessageType.BROKER_REGISTRATION, true, RecordBatch.MAGIC_VALUE_V0, false), BROKER_HEARTBEAT(ApiMessageType.BROKER_HEARTBEAT, true, RecordBatch.MAGIC_VALUE_V0, false), UNREGISTER_BROKER(ApiMessageType.UNREGISTER_BROKER, false, RecordBatch.MAGIC_VALUE_V0, true), DESCRIBE_TRANSACTIONS(ApiMessageType.DESCRIBE_TRANSACTIONS), LIST_TRANSACTIONS(ApiMessageType.LIST_TRANSACTIONS), ALLOCATE_PRODUCER_IDS(ApiMessageType.ALLOCATE_PRODUCER_IDS, true, true), CONSUMER_GROUP_HEARTBEAT(ApiMessageType.CONSUMER_GROUP_HEARTBEAT); private static final Map<ApiMessageType.ListenerType, EnumSet<ApiKeys>> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); static { for (ApiMessageType.ListenerType listenerType : ApiMessageType.ListenerType.values()) { APIS_BY_LISTENER.put(listenerType, filterApisForListener(listenerType)); } } // The generator ensures every `ApiMessageType` has a unique id private static final Map<Integer, ApiKeys> ID_TO_TYPE = Arrays.stream(ApiKeys.values()) .collect(Collectors.toMap(key -> (int) key.id, Function.identity())); /** the permanent and immutable id of an API - this can't change ever */ public final short id; /** An english description of the api - used for debugging and metric names, it can potentially be changed via a KIP */ public final String name; /** indicates if this is a ClusterAction request used only by brokers */ public final boolean clusterAction; /** indicates the minimum required inter broker magic required to support the API */ public final byte minRequiredInterBrokerMagic; /** indicates whether the API is enabled for forwarding **/ public final boolean forwardable; public final boolean requiresDelayedAllocation; public final ApiMessageType messageType; ApiKeys(ApiMessageType messageType) { this(messageType, false); } ApiKeys(ApiMessageType messageType, boolean clusterAction) { this(messageType, clusterAction, RecordBatch.MAGIC_VALUE_V0, false); } ApiKeys(ApiMessageType messageType, boolean clusterAction, boolean forwardable) { this(messageType, clusterAction, RecordBatch.MAGIC_VALUE_V0, forwardable); } ApiKeys( ApiMessageType messageType, boolean clusterAction, byte minRequiredInterBrokerMagic, boolean forwardable ) { this.messageType = messageType; this.id = messageType.apiKey(); this.name = messageType.name; this.clusterAction = clusterAction; this.minRequiredInterBrokerMagic = minRequiredInterBrokerMagic; this.requiresDelayedAllocation = forwardable || shouldRetainsBufferReference(messageType.requestSchemas()); this.forwardable = forwardable; } private static boolean shouldRetainsBufferReference(Schema[] requestSchemas) { boolean requestRetainsBufferReference = false; for (Schema requestVersionSchema : requestSchemas) { if (retainsBufferReference(requestVersionSchema)) { requestRetainsBufferReference = true; break; } } return requestRetainsBufferReference; } public static ApiKeys forId(int id) { ApiKeys apiKey = ID_TO_TYPE.get(id); if (apiKey == null) { throw new IllegalArgumentException("Unexpected api key: " + id); } return apiKey; } public static boolean hasId(int id) { return ID_TO_TYPE.containsKey(id); } public short latestVersion() { return messageType.highestSupportedVersion(true); } public short latestVersion(boolean enableUnstableLastVersion) { return messageType.highestSupportedVersion(enableUnstableLastVersion); } public short oldestVersion() { return messageType.lowestSupportedVersion(); } public List<Short> allVersions() { List<Short> versions = new ArrayList<>(latestVersion() - oldestVersion() + 1); for (short version = oldestVersion(); version <= latestVersion(); version++) { versions.add(version); } return versions; } public boolean isVersionSupported(short apiVersion) { return apiVersion >= oldestVersion() && apiVersion <= latestVersion(); } public boolean isVersionEnabled(short apiVersion, boolean enableUnstableLastVersion) { // ApiVersions API is a particular case. The client always send the highest version // that it supports and the server fails back to version 0 if it does not know it. // Hence, we have to accept any versions here, even unsupported ones. if (this == ApiKeys.API_VERSIONS) return true; return apiVersion >= oldestVersion() && apiVersion <= latestVersion(enableUnstableLastVersion); } public Optional<ApiVersionsResponseData.ApiVersion> toApiVersion(boolean enableUnstableLastVersion) { short oldestVersion = oldestVersion(); short latestVersion = latestVersion(enableUnstableLastVersion); // API is entirely disabled if latestStableVersion is smaller than oldestVersion. if (latestVersion >= oldestVersion) { return Optional.of(new ApiVersionsResponseData.ApiVersion() .setApiKey(messageType.apiKey()) .setMinVersion(oldestVersion) .setMaxVersion(latestVersion)); } else { return Optional.empty(); } } public short requestHeaderVersion(short apiVersion) { return messageType.requestHeaderVersion(apiVersion); } public short responseHeaderVersion(short apiVersion) { return messageType.responseHeaderVersion(apiVersion); } public boolean inScope(ApiMessageType.ListenerType listener) { return messageType.listeners().contains(listener); } private static String toHtml() { final StringBuilder b = new StringBuilder(); b.append("<table class=\"data-table\"><tbody>\n"); b.append("<tr>"); b.append("<th>Name</th>\n"); b.append("<th>Key</th>\n"); b.append("</tr>"); for (ApiKeys key : clientApis()) { b.append("<tr>\n"); b.append("<td>"); b.append("<a href=\"#The_Messages_" + key.name + "\">" + key.name + "</a>"); b.append("</td>"); b.append("<td>"); b.append(key.id); b.append("</td>"); b.append("</tr>\n"); } b.append("</tbody></table>\n"); return b.toString(); } public static void main(String[] args) { System.out.println(toHtml()); } private static boolean retainsBufferReference(Schema schema) { final AtomicBoolean hasBuffer = new AtomicBoolean(false); Schema.Visitor detector = new Schema.Visitor() { @Override public void visit(Type field) { if (field == BYTES || field == NULLABLE_BYTES || field == RECORDS || field == COMPACT_BYTES || field == COMPACT_NULLABLE_BYTES) hasBuffer.set(true); } }; schema.walk(detector); return hasBuffer.get(); } public static EnumSet<ApiKeys> zkBrokerApis() { return apisForListener(ApiMessageType.ListenerType.ZK_BROKER); } public static EnumSet<ApiKeys> controllerApis() { return apisForListener(ApiMessageType.ListenerType.CONTROLLER); } public static EnumSet<ApiKeys> clientApis() { List<ApiKeys> apis = Arrays.stream(ApiKeys.values()) .filter(apiKey -> apiKey.inScope(ApiMessageType.ListenerType.ZK_BROKER) || apiKey.inScope(ApiMessageType.ListenerType.BROKER)) .collect(Collectors.toList()); return EnumSet.copyOf(apis); } public static EnumSet<ApiKeys> apisForListener(ApiMessageType.ListenerType listener) { return APIS_BY_LISTENER.get(listener); } private static EnumSet<ApiKeys> filterApisForListener(ApiMessageType.ListenerType listener) { List<ApiKeys> apis = Arrays.stream(ApiKeys.values()) .filter(apiKey -> apiKey.inScope(listener)) .collect(Collectors.toList()); return EnumSet.copyOf(apis); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/ApiMessage.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; /** * A Message which is part of the top-level Kafka API. */ public interface ApiMessage extends Message { /** * Returns the API key of this message, or -1 if there is none. */ short apiKey(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/ByteBufferAccessor.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import org.apache.kafka.common.utils.ByteUtils; import java.nio.ByteBuffer; public class ByteBufferAccessor implements Readable, Writable { private final ByteBuffer buf; public ByteBufferAccessor(ByteBuffer buf) { this.buf = buf; } @Override public byte readByte() { return buf.get(); } @Override public short readShort() { return buf.getShort(); } @Override public int readInt() { return buf.getInt(); } @Override public long readLong() { return buf.getLong(); } @Override public double readDouble() { return ByteUtils.readDouble(buf); } @Override public byte[] readArray(int size) { int remaining = buf.remaining(); if (size > remaining) { throw new RuntimeException("Error reading byte array of " + size + " byte(s): only " + remaining + " byte(s) available"); } byte[] arr = new byte[size]; buf.get(arr); return arr; } @Override public int readUnsignedVarint() { return ByteUtils.readUnsignedVarint(buf); } @Override public ByteBuffer readByteBuffer(int length) { ByteBuffer res = buf.slice(); res.limit(length); buf.position(buf.position() + length); return res; } @Override public void writeByte(byte val) { buf.put(val); } @Override public void writeShort(short val) { buf.putShort(val); } @Override public void writeInt(int val) { buf.putInt(val); } @Override public void writeLong(long val) { buf.putLong(val); } @Override public void writeDouble(double val) { ByteUtils.writeDouble(val, buf); } @Override public void writeByteArray(byte[] arr) { buf.put(arr); } @Override public void writeUnsignedVarint(int i) { ByteUtils.writeUnsignedVarint(i, buf); } @Override public void writeByteBuffer(ByteBuffer src) { buf.put(src.duplicate()); } @Override public void writeVarint(int i) { ByteUtils.writeVarint(i, buf); } @Override public void writeVarlong(long i) { ByteUtils.writeVarlong(i, buf); } @Override public int readVarint() { return ByteUtils.readVarint(buf); } @Override public long readVarlong() { return ByteUtils.readVarlong(buf); } @Override public int remaining() { return buf.remaining(); } public void flip() { buf.flip(); } public ByteBuffer buffer() { return buf; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/DataOutputStreamWritable.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Utils; import java.io.Closeable; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; public class DataOutputStreamWritable implements Writable, Closeable { protected final DataOutputStream out; public DataOutputStreamWritable(DataOutputStream out) { this.out = out; } @Override public void writeByte(byte val) { try { out.writeByte(val); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void writeShort(short val) { try { out.writeShort(val); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void writeInt(int val) { try { out.writeInt(val); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void writeLong(long val) { try { out.writeLong(val); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void writeDouble(double val) { try { out.writeDouble(val); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void writeByteArray(byte[] arr) { try { out.write(arr); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void writeUnsignedVarint(int i) { try { ByteUtils.writeUnsignedVarint(i, out); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void writeByteBuffer(ByteBuffer buf) { try { if (buf.hasArray()) { out.write(buf.array(), buf.arrayOffset() + buf.position(), buf.remaining()); } else { byte[] bytes = Utils.toArray(buf); out.write(bytes); } } catch (IOException e) { throw new RuntimeException(e); } } @Override public void writeVarint(int i) { try { ByteUtils.writeVarint(i, out); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void writeVarlong(long i) { try { ByteUtils.writeVarlong(i, out); } catch (IOException e) { throw new RuntimeException(e); } } public void flush() { try { out.flush(); } catch (IOException e) { throw new RuntimeException(e); } } @Override public void close() { try { out.close(); } catch (IOException e) { throw new RuntimeException(e); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/Errors.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import org.apache.kafka.common.InvalidRecordException; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.BrokerIdNotRegisteredException; import org.apache.kafka.common.errors.BrokerNotAvailableException; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.ConcurrentTransactionsException; import org.apache.kafka.common.errors.ControllerMovedException; import org.apache.kafka.common.errors.CoordinatorLoadInProgressException; import org.apache.kafka.common.errors.CoordinatorNotAvailableException; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.errors.DelegationTokenAuthorizationException; import org.apache.kafka.common.errors.DelegationTokenDisabledException; import org.apache.kafka.common.errors.DelegationTokenExpiredException; import org.apache.kafka.common.errors.DelegationTokenNotFoundException; import org.apache.kafka.common.errors.DelegationTokenOwnerMismatchException; import org.apache.kafka.common.errors.DuplicateBrokerRegistrationException; import org.apache.kafka.common.errors.DuplicateResourceException; import org.apache.kafka.common.errors.DuplicateSequenceException; import org.apache.kafka.common.errors.ElectionNotNeededException; import org.apache.kafka.common.errors.EligibleLeadersNotAvailableException; import org.apache.kafka.common.errors.FeatureUpdateFailedException; import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.FencedLeaderEpochException; import org.apache.kafka.common.errors.FencedMemberEpochException; import org.apache.kafka.common.errors.FetchSessionIdNotFoundException; import org.apache.kafka.common.errors.FetchSessionTopicIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.errors.GroupMaxSizeReachedException; import org.apache.kafka.common.errors.GroupNotEmptyException; import org.apache.kafka.common.errors.GroupSubscribedToTopicException; import org.apache.kafka.common.errors.IllegalGenerationException; import org.apache.kafka.common.errors.IllegalSaslStateException; import org.apache.kafka.common.errors.InconsistentGroupProtocolException; import org.apache.kafka.common.errors.InconsistentTopicIdException; import org.apache.kafka.common.errors.InconsistentVoterSetException; import org.apache.kafka.common.errors.InconsistentClusterIdException; import org.apache.kafka.common.errors.IneligibleReplicaException; import org.apache.kafka.common.errors.InvalidCommitOffsetSizeException; import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.errors.InvalidFetchSessionEpochException; import org.apache.kafka.common.errors.InvalidFetchSizeException; import org.apache.kafka.common.errors.InvalidGroupIdException; import org.apache.kafka.common.errors.InvalidPartitionsException; import org.apache.kafka.common.errors.InvalidPidMappingException; import org.apache.kafka.common.errors.InvalidPrincipalTypeException; import org.apache.kafka.common.errors.InvalidProducerEpochException; import org.apache.kafka.common.errors.InvalidReplicaAssignmentException; import org.apache.kafka.common.errors.InvalidReplicationFactorException; import org.apache.kafka.common.errors.InvalidRequestException; import org.apache.kafka.common.errors.InvalidRequiredAcksException; import org.apache.kafka.common.errors.InvalidSessionTimeoutException; import org.apache.kafka.common.errors.InvalidTimestampException; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.InvalidTxnStateException; import org.apache.kafka.common.errors.InvalidTxnTimeoutException; import org.apache.kafka.common.errors.InvalidUpdateVersionException; import org.apache.kafka.common.errors.KafkaStorageException; import org.apache.kafka.common.errors.LeaderNotAvailableException; import org.apache.kafka.common.errors.ListenerNotFoundException; import org.apache.kafka.common.errors.LogDirNotFoundException; import org.apache.kafka.common.errors.MemberIdRequiredException; import org.apache.kafka.common.errors.NetworkException; import org.apache.kafka.common.errors.NewLeaderElectedException; import org.apache.kafka.common.errors.NoReassignmentInProgressException; import org.apache.kafka.common.errors.NotControllerException; import org.apache.kafka.common.errors.NotCoordinatorException; import org.apache.kafka.common.errors.NotEnoughReplicasAfterAppendException; import org.apache.kafka.common.errors.NotEnoughReplicasException; import org.apache.kafka.common.errors.NotLeaderOrFollowerException; import org.apache.kafka.common.errors.OffsetMetadataTooLarge; import org.apache.kafka.common.errors.OffsetNotAvailableException; import org.apache.kafka.common.errors.OffsetOutOfRangeException; import org.apache.kafka.common.errors.OffsetMovedToTieredStorageException; import org.apache.kafka.common.errors.OperationNotAttemptedException; import org.apache.kafka.common.errors.OutOfOrderSequenceException; import org.apache.kafka.common.errors.PolicyViolationException; import org.apache.kafka.common.errors.PositionOutOfRangeException; import org.apache.kafka.common.errors.PreferredLeaderNotAvailableException; import org.apache.kafka.common.errors.PrincipalDeserializationException; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.ReassignmentInProgressException; import org.apache.kafka.common.errors.RebalanceInProgressException; import org.apache.kafka.common.errors.RecordBatchTooLargeException; import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.ReplicaNotAvailableException; import org.apache.kafka.common.errors.ResourceNotFoundException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.SaslAuthenticationException; import org.apache.kafka.common.errors.SecurityDisabledException; import org.apache.kafka.common.errors.SnapshotNotFoundException; import org.apache.kafka.common.errors.StaleBrokerEpochException; import org.apache.kafka.common.errors.ThrottlingQuotaExceededException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.TopicDeletionDisabledException; import org.apache.kafka.common.errors.TopicExistsException; import org.apache.kafka.common.errors.TransactionCoordinatorFencedException; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.TransactionalIdNotFoundException; import org.apache.kafka.common.errors.UnacceptableCredentialException; import org.apache.kafka.common.errors.UnknownLeaderEpochException; import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.errors.UnknownProducerIdException; import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.errors.UnknownTopicIdException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.errors.UnreleasedInstanceIdException; import org.apache.kafka.common.errors.UnstableOffsetCommitException; import org.apache.kafka.common.errors.UnsupportedAssignorException; import org.apache.kafka.common.errors.UnsupportedByAuthenticationException; import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; import org.apache.kafka.common.errors.UnsupportedSaslMechanismException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.HashMap; import java.util.Map; import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutionException; import java.util.function.Function; /** * This class contains all the client-server errors--those errors that must be sent from the server to the client. These * are thus part of the protocol. The names can be changed but the error code cannot. * * Note that client library will convert an unknown error code to the non-retriable UnknownServerException if the client library * version is old and does not recognize the newly-added error code. Therefore when a new server-side error is added, * we may need extra logic to convert the new error code to another existing error code before sending the response back to * the client if the request version suggests that the client may not recognize the new error code. * * Do not add exceptions that occur only on the client or only on the server here. * * @see org.apache.kafka.common.network.SslTransportLayer */ public enum Errors { UNKNOWN_SERVER_ERROR(-1, "The server experienced an unexpected error when processing the request.", UnknownServerException::new), NONE(0, null, message -> null), OFFSET_OUT_OF_RANGE(1, "The requested offset is not within the range of offsets maintained by the server.", OffsetOutOfRangeException::new), CORRUPT_MESSAGE(2, "This message has failed its CRC checksum, exceeds the valid size, has a null key for a compacted topic, or is otherwise corrupt.", CorruptRecordException::new), UNKNOWN_TOPIC_OR_PARTITION(3, "This server does not host this topic-partition.", UnknownTopicOrPartitionException::new), INVALID_FETCH_SIZE(4, "The requested fetch size is invalid.", InvalidFetchSizeException::new), LEADER_NOT_AVAILABLE(5, "There is no leader for this topic-partition as we are in the middle of a leadership election.", LeaderNotAvailableException::new), NOT_LEADER_OR_FOLLOWER(6, "For requests intended only for the leader, this error indicates that the broker is not the current leader. " + "For requests intended for any replica, this error indicates that the broker is not a replica of the topic partition.", NotLeaderOrFollowerException::new), REQUEST_TIMED_OUT(7, "The request timed out.", TimeoutException::new), BROKER_NOT_AVAILABLE(8, "The broker is not available.", BrokerNotAvailableException::new), REPLICA_NOT_AVAILABLE(9, "The replica is not available for the requested topic-partition. Produce/Fetch requests and other requests " + "intended only for the leader or follower return NOT_LEADER_OR_FOLLOWER if the broker is not a replica of the topic-partition.", ReplicaNotAvailableException::new), MESSAGE_TOO_LARGE(10, "The request included a message larger than the max message size the server will accept.", RecordTooLargeException::new), STALE_CONTROLLER_EPOCH(11, "The controller moved to another broker.", ControllerMovedException::new), OFFSET_METADATA_TOO_LARGE(12, "The metadata field of the offset request was too large.", OffsetMetadataTooLarge::new), NETWORK_EXCEPTION(13, "The server disconnected before a response was received.", NetworkException::new), COORDINATOR_LOAD_IN_PROGRESS(14, "The coordinator is loading and hence can't process requests.", CoordinatorLoadInProgressException::new), COORDINATOR_NOT_AVAILABLE(15, "The coordinator is not available.", CoordinatorNotAvailableException::new), NOT_COORDINATOR(16, "This is not the correct coordinator.", NotCoordinatorException::new), INVALID_TOPIC_EXCEPTION(17, "The request attempted to perform an operation on an invalid topic.", InvalidTopicException::new), RECORD_LIST_TOO_LARGE(18, "The request included message batch larger than the configured segment size on the server.", RecordBatchTooLargeException::new), NOT_ENOUGH_REPLICAS(19, "Messages are rejected since there are fewer in-sync replicas than required.", NotEnoughReplicasException::new), NOT_ENOUGH_REPLICAS_AFTER_APPEND(20, "Messages are written to the log, but to fewer in-sync replicas than required.", NotEnoughReplicasAfterAppendException::new), INVALID_REQUIRED_ACKS(21, "Produce request specified an invalid value for required acks.", InvalidRequiredAcksException::new), ILLEGAL_GENERATION(22, "Specified group generation id is not valid.", IllegalGenerationException::new), INCONSISTENT_GROUP_PROTOCOL(23, "The group member's supported protocols are incompatible with those of existing members " + "or first group member tried to join with empty protocol type or empty protocol list.", InconsistentGroupProtocolException::new), INVALID_GROUP_ID(24, "The configured groupId is invalid.", InvalidGroupIdException::new), UNKNOWN_MEMBER_ID(25, "The coordinator is not aware of this member.", UnknownMemberIdException::new), INVALID_SESSION_TIMEOUT(26, "The session timeout is not within the range allowed by the broker " + "(as configured by group.min.session.timeout.ms and group.max.session.timeout.ms).", InvalidSessionTimeoutException::new), REBALANCE_IN_PROGRESS(27, "The group is rebalancing, so a rejoin is needed.", RebalanceInProgressException::new), INVALID_COMMIT_OFFSET_SIZE(28, "The committing offset data size is not valid.", InvalidCommitOffsetSizeException::new), TOPIC_AUTHORIZATION_FAILED(29, "Topic authorization failed.", TopicAuthorizationException::new), GROUP_AUTHORIZATION_FAILED(30, "Group authorization failed.", GroupAuthorizationException::new), CLUSTER_AUTHORIZATION_FAILED(31, "Cluster authorization failed.", ClusterAuthorizationException::new), INVALID_TIMESTAMP(32, "The timestamp of the message is out of acceptable range.", InvalidTimestampException::new), UNSUPPORTED_SASL_MECHANISM(33, "The broker does not support the requested SASL mechanism.", UnsupportedSaslMechanismException::new), ILLEGAL_SASL_STATE(34, "Request is not valid given the current SASL state.", IllegalSaslStateException::new), UNSUPPORTED_VERSION(35, "The version of API is not supported.", UnsupportedVersionException::new), TOPIC_ALREADY_EXISTS(36, "Topic with this name already exists.", TopicExistsException::new), INVALID_PARTITIONS(37, "Number of partitions is below 1.", InvalidPartitionsException::new), INVALID_REPLICATION_FACTOR(38, "Replication factor is below 1 or larger than the number of available brokers.", InvalidReplicationFactorException::new), INVALID_REPLICA_ASSIGNMENT(39, "Replica assignment is invalid.", InvalidReplicaAssignmentException::new), INVALID_CONFIG(40, "Configuration is invalid.", InvalidConfigurationException::new), NOT_CONTROLLER(41, "This is not the correct controller for this cluster.", NotControllerException::new), INVALID_REQUEST(42, "This most likely occurs because of a request being malformed by the " + "client library or the message was sent to an incompatible broker. See the broker logs " + "for more details.", InvalidRequestException::new), UNSUPPORTED_FOR_MESSAGE_FORMAT(43, "The message format version on the broker does not support the request.", UnsupportedForMessageFormatException::new), POLICY_VIOLATION(44, "Request parameters do not satisfy the configured policy.", PolicyViolationException::new), OUT_OF_ORDER_SEQUENCE_NUMBER(45, "The broker received an out of order sequence number.", OutOfOrderSequenceException::new), DUPLICATE_SEQUENCE_NUMBER(46, "The broker received a duplicate sequence number.", DuplicateSequenceException::new), INVALID_PRODUCER_EPOCH(47, "Producer attempted to produce with an old epoch.", InvalidProducerEpochException::new), INVALID_TXN_STATE(48, "The producer attempted a transactional operation in an invalid state.", InvalidTxnStateException::new), INVALID_PRODUCER_ID_MAPPING(49, "The producer attempted to use a producer id which is not currently assigned to " + "its transactional id.", InvalidPidMappingException::new), INVALID_TRANSACTION_TIMEOUT(50, "The transaction timeout is larger than the maximum value allowed by " + "the broker (as configured by transaction.max.timeout.ms).", InvalidTxnTimeoutException::new), CONCURRENT_TRANSACTIONS(51, "The producer attempted to update a transaction " + "while another concurrent operation on the same transaction was ongoing.", ConcurrentTransactionsException::new), TRANSACTION_COORDINATOR_FENCED(52, "Indicates that the transaction coordinator sending a WriteTxnMarker " + "is no longer the current coordinator for a given producer.", TransactionCoordinatorFencedException::new), TRANSACTIONAL_ID_AUTHORIZATION_FAILED(53, "Transactional Id authorization failed.", TransactionalIdAuthorizationException::new), SECURITY_DISABLED(54, "Security features are disabled.", SecurityDisabledException::new), OPERATION_NOT_ATTEMPTED(55, "The broker did not attempt to execute this operation. This may happen for " + "batched RPCs where some operations in the batch failed, causing the broker to respond without " + "trying the rest.", OperationNotAttemptedException::new), KAFKA_STORAGE_ERROR(56, "Disk error when trying to access log file on the disk.", KafkaStorageException::new), LOG_DIR_NOT_FOUND(57, "The user-specified log directory is not found in the broker config.", LogDirNotFoundException::new), SASL_AUTHENTICATION_FAILED(58, "SASL Authentication failed.", SaslAuthenticationException::new), UNKNOWN_PRODUCER_ID(59, "This exception is raised by the broker if it could not locate the producer metadata " + "associated with the producerId in question. This could happen if, for instance, the producer's records " + "were deleted because their retention time had elapsed. Once the last records of the producerId are " + "removed, the producer's metadata is removed from the broker, and future appends by the producer will " + "return this exception.", UnknownProducerIdException::new), REASSIGNMENT_IN_PROGRESS(60, "A partition reassignment is in progress.", ReassignmentInProgressException::new), DELEGATION_TOKEN_AUTH_DISABLED(61, "Delegation Token feature is not enabled.", DelegationTokenDisabledException::new), DELEGATION_TOKEN_NOT_FOUND(62, "Delegation Token is not found on server.", DelegationTokenNotFoundException::new), DELEGATION_TOKEN_OWNER_MISMATCH(63, "Specified Principal is not valid Owner/Renewer.", DelegationTokenOwnerMismatchException::new), DELEGATION_TOKEN_REQUEST_NOT_ALLOWED(64, "Delegation Token requests are not allowed on PLAINTEXT/1-way SSL " + "channels and on delegation token authenticated channels.", UnsupportedByAuthenticationException::new), DELEGATION_TOKEN_AUTHORIZATION_FAILED(65, "Delegation Token authorization failed.", DelegationTokenAuthorizationException::new), DELEGATION_TOKEN_EXPIRED(66, "Delegation Token is expired.", DelegationTokenExpiredException::new), INVALID_PRINCIPAL_TYPE(67, "Supplied principalType is not supported.", InvalidPrincipalTypeException::new), NON_EMPTY_GROUP(68, "The group is not empty.", GroupNotEmptyException::new), GROUP_ID_NOT_FOUND(69, "The group id does not exist.", GroupIdNotFoundException::new), FETCH_SESSION_ID_NOT_FOUND(70, "The fetch session ID was not found.", FetchSessionIdNotFoundException::new), INVALID_FETCH_SESSION_EPOCH(71, "The fetch session epoch is invalid.", InvalidFetchSessionEpochException::new), LISTENER_NOT_FOUND(72, "There is no listener on the leader broker that matches the listener on which " + "metadata request was processed.", ListenerNotFoundException::new), TOPIC_DELETION_DISABLED(73, "Topic deletion is disabled.", TopicDeletionDisabledException::new), FENCED_LEADER_EPOCH(74, "The leader epoch in the request is older than the epoch on the broker.", FencedLeaderEpochException::new), UNKNOWN_LEADER_EPOCH(75, "The leader epoch in the request is newer than the epoch on the broker.", UnknownLeaderEpochException::new), UNSUPPORTED_COMPRESSION_TYPE(76, "The requesting client does not support the compression type of given partition.", UnsupportedCompressionTypeException::new), STALE_BROKER_EPOCH(77, "Broker epoch has changed.", StaleBrokerEpochException::new), OFFSET_NOT_AVAILABLE(78, "The leader high watermark has not caught up from a recent leader " + "election so the offsets cannot be guaranteed to be monotonically increasing.", OffsetNotAvailableException::new), MEMBER_ID_REQUIRED(79, "The group member needs to have a valid member id before actually entering a consumer group.", MemberIdRequiredException::new), PREFERRED_LEADER_NOT_AVAILABLE(80, "The preferred leader was not available.", PreferredLeaderNotAvailableException::new), GROUP_MAX_SIZE_REACHED(81, "The consumer group has reached its max size.", GroupMaxSizeReachedException::new), FENCED_INSTANCE_ID(82, "The broker rejected this static consumer since " + "another consumer with the same group.instance.id has registered with a different member.id.", FencedInstanceIdException::new), ELIGIBLE_LEADERS_NOT_AVAILABLE(83, "Eligible topic partition leaders are not available.", EligibleLeadersNotAvailableException::new), ELECTION_NOT_NEEDED(84, "Leader election not needed for topic partition.", ElectionNotNeededException::new), NO_REASSIGNMENT_IN_PROGRESS(85, "No partition reassignment is in progress.", NoReassignmentInProgressException::new), GROUP_SUBSCRIBED_TO_TOPIC(86, "Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it.", GroupSubscribedToTopicException::new), INVALID_RECORD(87, "This record has failed the validation on broker and hence will be rejected.", InvalidRecordException::new), UNSTABLE_OFFSET_COMMIT(88, "There are unstable offsets that need to be cleared.", UnstableOffsetCommitException::new), THROTTLING_QUOTA_EXCEEDED(89, "The throttling quota has been exceeded.", ThrottlingQuotaExceededException::new), PRODUCER_FENCED(90, "There is a newer producer with the same transactionalId " + "which fences the current one.", ProducerFencedException::new), RESOURCE_NOT_FOUND(91, "A request illegally referred to a resource that does not exist.", ResourceNotFoundException::new), DUPLICATE_RESOURCE(92, "A request illegally referred to the same resource twice.", DuplicateResourceException::new), UNACCEPTABLE_CREDENTIAL(93, "Requested credential would not meet criteria for acceptability.", UnacceptableCredentialException::new), INCONSISTENT_VOTER_SET(94, "Indicates that the either the sender or recipient of a " + "voter-only request is not one of the expected voters", InconsistentVoterSetException::new), INVALID_UPDATE_VERSION(95, "The given update version was invalid.", InvalidUpdateVersionException::new), FEATURE_UPDATE_FAILED(96, "Unable to update finalized features due to an unexpected server error.", FeatureUpdateFailedException::new), PRINCIPAL_DESERIALIZATION_FAILURE(97, "Request principal deserialization failed during forwarding. " + "This indicates an internal error on the broker cluster security setup.", PrincipalDeserializationException::new), SNAPSHOT_NOT_FOUND(98, "Requested snapshot was not found", SnapshotNotFoundException::new), POSITION_OUT_OF_RANGE( 99, "Requested position is not greater than or equal to zero, and less than the size of the snapshot.", PositionOutOfRangeException::new), UNKNOWN_TOPIC_ID(100, "This server does not host this topic ID.", UnknownTopicIdException::new), DUPLICATE_BROKER_REGISTRATION(101, "This broker ID is already in use.", DuplicateBrokerRegistrationException::new), BROKER_ID_NOT_REGISTERED(102, "The given broker ID was not registered.", BrokerIdNotRegisteredException::new), INCONSISTENT_TOPIC_ID(103, "The log's topic ID did not match the topic ID in the request", InconsistentTopicIdException::new), INCONSISTENT_CLUSTER_ID(104, "The clusterId in the request does not match that found on the server", InconsistentClusterIdException::new), TRANSACTIONAL_ID_NOT_FOUND(105, "The transactionalId could not be found", TransactionalIdNotFoundException::new), FETCH_SESSION_TOPIC_ID_ERROR(106, "The fetch session encountered inconsistent topic ID usage", FetchSessionTopicIdException::new), INELIGIBLE_REPLICA(107, "The new ISR contains at least one ineligible replica.", IneligibleReplicaException::new), NEW_LEADER_ELECTED(108, "The AlterPartition request successfully updated the partition state but the leader has changed.", NewLeaderElectedException::new), OFFSET_MOVED_TO_TIERED_STORAGE(109, "The requested offset is moved to tiered storage.", OffsetMovedToTieredStorageException::new), FENCED_MEMBER_EPOCH(110, "The member epoch is fenced by the group coordinator. The member must abandon all its partitions and rejoin.", FencedMemberEpochException::new), UNRELEASED_INSTANCE_ID(111, "The instance ID is still used by another member in the consumer group. That member must leave first.", UnreleasedInstanceIdException::new), UNSUPPORTED_ASSIGNOR(112, "The assignor or its version range is not supported by the consumer group.", UnsupportedAssignorException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); private static Map<Class<?>, Errors> classToError = new HashMap<>(); private static Map<Short, Errors> codeToError = new HashMap<>(); static { for (Errors error : Errors.values()) { if (codeToError.put(error.code(), error) != null) throw new ExceptionInInitializerError("Code " + error.code() + " for error " + error + " has already been used"); if (error.exception != null) classToError.put(error.exception.getClass(), error); } } private final short code; private final Function<String, ApiException> builder; private final ApiException exception; Errors(int code, String defaultExceptionString, Function<String, ApiException> builder) { this.code = (short) code; this.builder = builder; this.exception = builder.apply(defaultExceptionString); } /** * An instance of the exception */ public ApiException exception() { return this.exception; } /** * Create an instance of the ApiException that contains the given error message. * * @param message The message string to set. * @return The exception. */ public ApiException exception(String message) { if (message == null) { // If no error message was specified, return an exception with the default error message. return exception; } // Return an exception with the given error message. return builder.apply(message); } /** * Returns the class name of the exception or null if this is {@code Errors.NONE}. */ public String exceptionName() { return exception == null ? null : exception.getClass().getName(); } /** * The error code for the exception */ public short code() { return this.code; } /** * Throw the exception corresponding to this error if there is one */ public void maybeThrow() { if (exception != null) { throw this.exception; } } /** * Get a friendly description of the error (if one is available). * @return the error message */ public String message() { if (exception != null) return exception.getMessage(); return toString(); } /** * Throw the exception if there is one */ public static Errors forCode(short code) { Errors error = codeToError.get(code); if (error != null) { return error; } else { log.warn("Unexpected error code: {}.", code); return UNKNOWN_SERVER_ERROR; } } /** * Return the error instance associated with this exception or any of its superclasses (or UNKNOWN if there is none). * If there are multiple matches in the class hierarchy, the first match starting from the bottom is used. */ public static Errors forException(Throwable t) { Throwable cause = maybeUnwrapException(t); Class<?> clazz = cause.getClass(); while (clazz != null) { Errors error = classToError.get(clazz); if (error != null) return error; clazz = clazz.getSuperclass(); } return UNKNOWN_SERVER_ERROR; } /** * Check if a Throwable is a commonly wrapped exception type (e.g. `CompletionException`) and return * the cause if so. This is useful to handle cases where exceptions may be raised from a future or a * completion stage (as might be the case for requests sent to the controller in `ControllerApis`). * * @param t The Throwable to check * @return The throwable itself or its cause if it is an instance of a commonly wrapped exception type */ public static Throwable maybeUnwrapException(Throwable t) { if (t instanceof CompletionException || t instanceof ExecutionException) { return t.getCause(); } else { return t; } } private static String toHtml() { final StringBuilder b = new StringBuilder(); b.append("<table class=\"data-table\"><tbody>\n"); b.append("<tr>"); b.append("<th>Error</th>\n"); b.append("<th>Code</th>\n"); b.append("<th>Retriable</th>\n"); b.append("<th>Description</th>\n"); b.append("</tr>\n"); for (Errors error : Errors.values()) { b.append("<tr>"); b.append("<td>"); b.append(error.name()); b.append("</td>"); b.append("<td>"); b.append(error.code()); b.append("</td>"); b.append("<td>"); b.append(error.exception() != null && error.exception() instanceof RetriableException ? "True" : "False"); b.append("</td>"); b.append("<td>"); b.append(error.exception() != null ? error.exception().getMessage() : ""); b.append("</td>"); b.append("</tr>\n"); } b.append("</tbody></table>\n"); return b.toString(); } public static void main(String[] args) { System.out.println(toHtml()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/Message.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import org.apache.kafka.common.protocol.types.RawTaggedField; import java.util.List; /** * An object that can serialize itself. The serialization protocol is versioned. * Messages also implement toString, equals, and hashCode. */ public interface Message { /** * Returns the lowest supported API key of this message, inclusive. */ short lowestSupportedVersion(); /** * Returns the highest supported API key of this message, inclusive. */ short highestSupportedVersion(); /** * Returns the number of bytes it would take to write out this message. * * @param cache The serialization size cache to populate. * @param version The version to use. * * @throws {@see org.apache.kafka.common.errors.UnsupportedVersionException} * If the specified version is too new to be supported * by this software. */ default int size(ObjectSerializationCache cache, short version) { MessageSizeAccumulator size = new MessageSizeAccumulator(); addSize(size, cache, version); return size.totalSize(); } /** * Add the size of this message to an accumulator. * * @param size The size accumulator to add to * @param cache The serialization size cache to populate. * @param version The version to use. */ void addSize(MessageSizeAccumulator size, ObjectSerializationCache cache, short version); /** * Writes out this message to the given Writable. * * @param writable The destination writable. * @param cache The object serialization cache to use. You must have * previously populated the size cache using #{Message#size()}. * @param version The version to use. * * @throws {@see org.apache.kafka.common.errors.UnsupportedVersionException} * If the specified version is too new to be supported * by this software. */ void write(Writable writable, ObjectSerializationCache cache, short version); /** * Reads this message from the given Readable. This will overwrite all * relevant fields with information from the byte buffer. * * @param readable The source readable. * @param version The version to use. * * @throws {@see org.apache.kafka.common.errors.UnsupportedVersionException} * If the specified version is too new to be supported * by this software. */ void read(Readable readable, short version); /** * Returns a list of tagged fields which this software can't understand. * * @return The raw tagged fields. */ List<RawTaggedField> unknownTaggedFields(); /** * Make a deep copy of the message. * * @return A copy of the message which does not share any mutable fields. */ Message duplicate(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/MessageSizeAccumulator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; /** * Helper class which facilitates zero-copy network transmission. See {@link SendBuilder}. */ public class MessageSizeAccumulator { private int totalSize = 0; private int zeroCopySize = 0; /** * Get the total size of the message. * * @return total size in bytes */ public int totalSize() { return totalSize; } /** * Size excluding zero copy fields as specified by {@link #zeroCopySize}. This is typically the size of the byte * buffer used to serialize messages. */ public int sizeExcludingZeroCopy() { return totalSize - zeroCopySize; } /** * Get the total "zero-copy" size of the message. This is the summed * total of all fields which have either have a type of 'bytes' with * 'zeroCopy' enabled, or a type of 'records' * * @return total size of zero-copy data in the message */ public int zeroCopySize() { return zeroCopySize; } public void addZeroCopyBytes(int size) { zeroCopySize += size; totalSize += size; } public void addBytes(int size) { totalSize += size; } public void add(MessageSizeAccumulator size) { this.totalSize += size.totalSize; this.zeroCopySize += size.zeroCopySize; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/MessageUtil.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import com.fasterxml.jackson.databind.JsonNode; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.utils.Utils; import java.io.IOException; import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Iterator; import java.util.List; public final class MessageUtil { public static final long UNSIGNED_INT_MAX = 4294967295L; public static final int UNSIGNED_SHORT_MAX = 65535; /** * Copy a byte buffer into an array. This will not affect the buffer's * position or mark. */ public static byte[] byteBufferToArray(ByteBuffer buf) { byte[] arr = new byte[buf.remaining()]; int prevPosition = buf.position(); try { buf.get(arr); } finally { buf.position(prevPosition); } return arr; } public static String deepToString(Iterator<?> iter) { StringBuilder bld = new StringBuilder("["); String prefix = ""; while (iter.hasNext()) { Object object = iter.next(); bld.append(prefix); bld.append(object.toString()); prefix = ", "; } bld.append("]"); return bld.toString(); } public static byte jsonNodeToByte(JsonNode node, String about) { int value = jsonNodeToInt(node, about); if (value > Byte.MAX_VALUE) { if (value <= 256) { // It's more traditional to refer to bytes as unsigned, // so we support that here. value -= 128; } else { throw new RuntimeException(about + ": value " + value + " does not fit in an 8-bit signed integer."); } } if (value < Byte.MIN_VALUE) { throw new RuntimeException(about + ": value " + value + " does not fit in an 8-bit signed integer."); } return (byte) value; } public static short jsonNodeToShort(JsonNode node, String about) { int value = jsonNodeToInt(node, about); if ((value < Short.MIN_VALUE) || (value > Short.MAX_VALUE)) { throw new RuntimeException(about + ": value " + value + " does not fit in a 16-bit signed integer."); } return (short) value; } public static int jsonNodeToUnsignedShort(JsonNode node, String about) { int value = jsonNodeToInt(node, about); if (value < 0 || value > UNSIGNED_SHORT_MAX) { throw new RuntimeException(about + ": value " + value + " does not fit in a 16-bit unsigned integer."); } return value; } public static long jsonNodeToUnsignedInt(JsonNode node, String about) { long value = jsonNodeToLong(node, about); if (value < 0 || value > UNSIGNED_INT_MAX) { throw new RuntimeException(about + ": value " + value + " does not fit in a 32-bit unsigned integer."); } return value; } public static int jsonNodeToInt(JsonNode node, String about) { if (node.isInt()) { return node.asInt(); } if (node.isTextual()) { throw new NumberFormatException(about + ": expected an integer or " + "string type, but got " + node.getNodeType()); } String text = node.asText(); if (text.startsWith("0x")) { try { return Integer.parseInt(text.substring(2), 16); } catch (NumberFormatException e) { throw new NumberFormatException(about + ": failed to " + "parse hexadecimal number: " + e.getMessage()); } } else { try { return Integer.parseInt(text); } catch (NumberFormatException e) { throw new NumberFormatException(about + ": failed to " + "parse number: " + e.getMessage()); } } } public static long jsonNodeToLong(JsonNode node, String about) { if (node.isLong()) { return node.asLong(); } if (node.isTextual()) { throw new NumberFormatException(about + ": expected an integer or " + "string type, but got " + node.getNodeType()); } String text = node.asText(); if (text.startsWith("0x")) { try { return Long.parseLong(text.substring(2), 16); } catch (NumberFormatException e) { throw new NumberFormatException(about + ": failed to " + "parse hexadecimal number: " + e.getMessage()); } } else { try { return Long.parseLong(text); } catch (NumberFormatException e) { throw new NumberFormatException(about + ": failed to " + "parse number: " + e.getMessage()); } } } public static byte[] jsonNodeToBinary(JsonNode node, String about) { try { byte[] value = node.binaryValue(); if (value == null) { throw new IllegalArgumentException(about + ": expected Base64-encoded binary data."); } return value; } catch (IOException e) { throw new UncheckedIOException(about + ": unable to retrieve Base64-encoded binary data", e); } } public static double jsonNodeToDouble(JsonNode node, String about) { if (!node.isFloatingPointNumber()) { throw new NumberFormatException(about + ": expected a floating point " + "type, but got " + node.getNodeType()); } return node.asDouble(); } public static byte[] duplicate(byte[] array) { if (array == null) return null; return Arrays.copyOf(array, array.length); } /** * Compare two RawTaggedFields lists. * A null list is equivalent to an empty one in this context. */ public static boolean compareRawTaggedFields(List<RawTaggedField> first, List<RawTaggedField> second) { if (first == null) { return second == null || second.isEmpty(); } else if (second == null) { return first.isEmpty(); } else { return first.equals(second); } } public static ByteBuffer toByteBuffer(final Message message, final short version) { ObjectSerializationCache cache = new ObjectSerializationCache(); int messageSize = message.size(cache, version); ByteBufferAccessor bytes = new ByteBufferAccessor(ByteBuffer.allocate(messageSize)); message.write(bytes, cache, version); bytes.flip(); return bytes.buffer(); } public static ByteBuffer toVersionPrefixedByteBuffer(final short version, final Message message) { ObjectSerializationCache cache = new ObjectSerializationCache(); int messageSize = message.size(cache, version); ByteBufferAccessor bytes = new ByteBufferAccessor(ByteBuffer.allocate(messageSize + 2)); bytes.writeShort(version); message.write(bytes, cache, version); bytes.flip(); return bytes.buffer(); } public static byte[] toVersionPrefixedBytes(final short version, final Message message) { ByteBuffer buffer = toVersionPrefixedByteBuffer(version, message); // take the inner array directly if it is full with data if (buffer.hasArray() && buffer.arrayOffset() == 0 && buffer.position() == 0 && buffer.limit() == buffer.array().length) return buffer.array(); else return Utils.toArray(buffer); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/ObjectSerializationCache.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import java.util.IdentityHashMap; /** * The ObjectSerializationCache stores sizes and values computed during the * first serialization pass. This avoids recalculating and recomputing the same * values during the second pass. * * It is intended to be used as part of a two-pass serialization process like: * ObjectSerializationCache cache = new ObjectSerializationCache(); * message.size(version, cache); * message.write(version, cache); */ public final class ObjectSerializationCache { private final IdentityHashMap<Object, Object> map; public ObjectSerializationCache() { this.map = new IdentityHashMap<>(); } public void setArraySizeInBytes(Object o, Integer size) { map.put(o, size); } public Integer getArraySizeInBytes(Object o) { return (Integer) map.get(o); } public void cacheSerializedValue(Object o, byte[] val) { map.put(o, val); } public byte[] getSerializedValue(Object o) { Object value = map.get(o); return (byte[]) value; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/Protocol.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import org.apache.kafka.common.message.RequestHeaderData; import org.apache.kafka.common.message.ResponseHeaderData; import org.apache.kafka.common.protocol.types.BoundField; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.TaggedFields; import org.apache.kafka.common.protocol.types.Type; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; public class Protocol { private static String indentString(int size) { StringBuilder b = new StringBuilder(size); for (int i = 0; i < size; i++) b.append(" "); return b.toString(); } private static void schemaToBnfHtml(Schema schema, StringBuilder b, int indentSize) { final String indentStr = indentString(indentSize); final Map<String, Type> subTypes = new LinkedHashMap<>(); // Top level fields for (BoundField field: schema.fields()) { Type type = field.def.type; if (type.isArray()) { b.append("["); b.append(field.def.name); b.append("] "); if (!subTypes.containsKey(field.def.name)) { subTypes.put(field.def.name, type.arrayElementType().get()); } } else if (type instanceof TaggedFields) { b.append("TAG_BUFFER "); } else { b.append(field.def.name); b.append(" "); if (!subTypes.containsKey(field.def.name)) subTypes.put(field.def.name, type); } } b.append("\n"); // Sub Types/Schemas for (Map.Entry<String, Type> entry: subTypes.entrySet()) { if (entry.getValue() instanceof Schema) { // Complex Schema Type b.append(indentStr); b.append(entry.getKey()); b.append(" => "); schemaToBnfHtml((Schema) entry.getValue(), b, indentSize + 2); } else { // Standard Field Type b.append(indentStr); b.append(entry.getKey()); b.append(" => "); b.append(entry.getValue()); b.append("\n"); } } } private static void populateSchemaFields(Schema schema, Set<BoundField> fields) { for (BoundField field: schema.fields()) { fields.add(field); if (field.def.type.isArray()) { Type innerType = field.def.type.arrayElementType().get(); if (innerType instanceof Schema) populateSchemaFields((Schema) innerType, fields); } else if (field.def.type instanceof Schema) populateSchemaFields((Schema) field.def.type, fields); } } private static void schemaToFieldTableHtml(Schema schema, StringBuilder b) { Set<BoundField> fields = new LinkedHashSet<>(); populateSchemaFields(schema, fields); b.append("<table class=\"data-table\"><tbody>\n"); b.append("<tr>"); b.append("<th>Field</th>\n"); b.append("<th>Description</th>\n"); b.append("</tr>"); for (BoundField field : fields) { b.append("<tr>\n"); b.append("<td>"); b.append(field.def.name); b.append("</td>"); b.append("<td>"); b.append(field.def.docString); b.append("</td>"); b.append("</tr>\n"); } b.append("</tbody></table>\n"); } public static String toHtml() { final StringBuilder b = new StringBuilder(); b.append("<h5>Headers:</h5>\n"); for (int i = 0; i < RequestHeaderData.SCHEMAS.length; i++) { b.append("<pre>"); b.append("Request Header v").append(i).append(" => "); schemaToBnfHtml(RequestHeaderData.SCHEMAS[i], b, 2); b.append("</pre>\n"); schemaToFieldTableHtml(RequestHeaderData.SCHEMAS[i], b); } for (int i = 0; i < ResponseHeaderData.SCHEMAS.length; i++) { b.append("<pre>"); b.append("Response Header v").append(i).append(" => "); schemaToBnfHtml(ResponseHeaderData.SCHEMAS[i], b, 2); b.append("</pre>\n"); schemaToFieldTableHtml(ResponseHeaderData.SCHEMAS[i], b); } for (ApiKeys key : ApiKeys.clientApis()) { // Key b.append("<h5>"); b.append("<a name=\"The_Messages_" + key.name + "\">"); b.append(key.name); b.append(" API (Key: "); b.append(key.id); b.append("):</a></h5>\n\n"); // Requests b.append("<b>Requests:</b><br>\n"); Schema[] requests = key.messageType.requestSchemas(); for (int i = 0; i < requests.length; i++) { Schema schema = requests[i]; // Schema if (schema != null) { b.append("<div>"); // Version header b.append("<pre>"); b.append(key.name); b.append(" Request (Version: "); b.append(i); b.append(") => "); schemaToBnfHtml(requests[i], b, 2); b.append("</pre>"); schemaToFieldTableHtml(requests[i], b); } b.append("</div>\n"); } // Responses b.append("<b>Responses:</b><br>\n"); Schema[] responses = key.messageType.responseSchemas(); for (int i = 0; i < responses.length; i++) { Schema schema = responses[i]; // Schema if (schema != null) { b.append("<div>"); // Version header b.append("<pre>"); b.append(key.name); b.append(" Response (Version: "); b.append(i); b.append(") => "); schemaToBnfHtml(responses[i], b, 2); b.append("</pre>"); schemaToFieldTableHtml(responses[i], b); } b.append("</div>\n"); } } return b.toString(); } public static void main(String[] args) { System.out.println(toHtml()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/Readable.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.record.MemoryRecords; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; public interface Readable { byte readByte(); short readShort(); int readInt(); long readLong(); double readDouble(); byte[] readArray(int length); int readUnsignedVarint(); ByteBuffer readByteBuffer(int length); int readVarint(); long readVarlong(); int remaining(); default String readString(int length) { byte[] arr = readArray(length); return new String(arr, StandardCharsets.UTF_8); } default List<RawTaggedField> readUnknownTaggedField(List<RawTaggedField> unknowns, int tag, int size) { if (unknowns == null) { unknowns = new ArrayList<>(); } byte[] data = readArray(size); unknowns.add(new RawTaggedField(tag, data)); return unknowns; } default MemoryRecords readRecords(int length) { if (length < 0) { // no records return null; } else { ByteBuffer recordsBuffer = readByteBuffer(length); return MemoryRecords.readableRecords(recordsBuffer); } } /** * Read a UUID with the most significant digits first. */ default Uuid readUuid() { return new Uuid(readLong(), readLong()); } default int readUnsignedShort() { return Short.toUnsignedInt(readShort()); } default long readUnsignedInt() { return Integer.toUnsignedLong(readInt()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/SendBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import org.apache.kafka.common.network.ByteBufferSend; import org.apache.kafka.common.network.Send; import org.apache.kafka.common.record.BaseRecords; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.MultiRecordsSend; import org.apache.kafka.common.record.UnalignedMemoryRecords; import org.apache.kafka.common.requests.RequestHeader; import org.apache.kafka.common.requests.ResponseHeader; import org.apache.kafka.common.utils.ByteUtils; import java.nio.ByteBuffer; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.List; import java.util.Queue; /** * This class provides a way to build {@link Send} objects for network transmission * from generated {@link org.apache.kafka.common.protocol.ApiMessage} types without * allocating new space for "zero-copy" fields (see {@link #writeByteBuffer(ByteBuffer)} * and {@link #writeRecords(BaseRecords)}). * * See {@link org.apache.kafka.common.requests.EnvelopeRequest#toSend(RequestHeader)} * for example usage. */ public class SendBuilder implements Writable { private final ByteBuffer buffer; private final Queue<Send> sends = new ArrayDeque<>(1); private long sizeOfSends = 0; private final List<ByteBuffer> buffers = new ArrayList<>(); private long sizeOfBuffers = 0; SendBuilder(int size) { this.buffer = ByteBuffer.allocate(size); this.buffer.mark(); } @Override public void writeByte(byte val) { buffer.put(val); } @Override public void writeShort(short val) { buffer.putShort(val); } @Override public void writeInt(int val) { buffer.putInt(val); } @Override public void writeLong(long val) { buffer.putLong(val); } @Override public void writeDouble(double val) { buffer.putDouble(val); } @Override public void writeByteArray(byte[] arr) { buffer.put(arr); } @Override public void writeUnsignedVarint(int i) { ByteUtils.writeUnsignedVarint(i, buffer); } /** * Write a byte buffer. The reference to the underlying buffer will * be retained in the result of {@link #build()}. * * @param buf the buffer to write */ @Override public void writeByteBuffer(ByteBuffer buf) { flushPendingBuffer(); addBuffer(buf.duplicate()); } @Override public void writeVarint(int i) { ByteUtils.writeVarint(i, buffer); } @Override public void writeVarlong(long i) { ByteUtils.writeVarlong(i, buffer); } private void addBuffer(ByteBuffer buffer) { buffers.add(buffer); sizeOfBuffers += buffer.remaining(); } private void addSend(Send send) { sends.add(send); sizeOfSends += send.size(); } private void clearBuffers() { buffers.clear(); sizeOfBuffers = 0; } /** * Write a record set. The underlying record data will be retained * in the result of {@link #build()}. See {@link BaseRecords#toSend()}. * * @param records the records to write */ @Override public void writeRecords(BaseRecords records) { if (records instanceof MemoryRecords) { flushPendingBuffer(); addBuffer(((MemoryRecords) records).buffer()); } else if (records instanceof UnalignedMemoryRecords) { flushPendingBuffer(); addBuffer(((UnalignedMemoryRecords) records).buffer()); } else { flushPendingSend(); addSend(records.toSend()); } } private void flushPendingSend() { flushPendingBuffer(); if (!buffers.isEmpty()) { ByteBuffer[] byteBufferArray = buffers.toArray(new ByteBuffer[0]); addSend(new ByteBufferSend(byteBufferArray, sizeOfBuffers)); clearBuffers(); } } private void flushPendingBuffer() { int latestPosition = buffer.position(); buffer.reset(); if (latestPosition > buffer.position()) { buffer.limit(latestPosition); addBuffer(buffer.slice()); buffer.position(latestPosition); buffer.limit(buffer.capacity()); buffer.mark(); } } public Send build() { flushPendingSend(); if (sends.size() == 1) { return sends.poll(); } else { return new MultiRecordsSend(sends, sizeOfSends); } } public static Send buildRequestSend( RequestHeader header, Message apiRequest ) { return buildSend( header.data(), header.headerVersion(), apiRequest, header.apiVersion() ); } public static Send buildResponseSend( ResponseHeader header, Message apiResponse, short apiVersion ) { return buildSend( header.data(), header.headerVersion(), apiResponse, apiVersion ); } private static Send buildSend( Message header, short headerVersion, Message apiMessage, short apiVersion ) { ObjectSerializationCache serializationCache = new ObjectSerializationCache(); MessageSizeAccumulator messageSize = new MessageSizeAccumulator(); header.addSize(messageSize, serializationCache, headerVersion); apiMessage.addSize(messageSize, serializationCache, apiVersion); SendBuilder builder = new SendBuilder(messageSize.sizeExcludingZeroCopy() + 4); builder.writeInt(messageSize.totalSize()); header.write(builder, serializationCache, headerVersion); apiMessage.write(builder, serializationCache, apiVersion); return builder.build(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/Writable.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.record.BaseRecords; import org.apache.kafka.common.record.MemoryRecords; import java.nio.ByteBuffer; public interface Writable { void writeByte(byte val); void writeShort(short val); void writeInt(int val); void writeLong(long val); void writeDouble(double val); void writeByteArray(byte[] arr); void writeUnsignedVarint(int i); void writeByteBuffer(ByteBuffer buf); void writeVarint(int i); void writeVarlong(long i); default void writeRecords(BaseRecords records) { if (records instanceof MemoryRecords) { MemoryRecords memRecords = (MemoryRecords) records; writeByteBuffer(memRecords.buffer()); } else { throw new UnsupportedOperationException("Unsupported record type " + records.getClass()); } } default void writeUuid(Uuid uuid) { writeLong(uuid.getMostSignificantBits()); writeLong(uuid.getLeastSignificantBits()); } default void writeUnsignedShort(int i) { // The setter functions in the generated code prevent us from setting // ints outside the valid range of a short. writeShort((short) i); } default void writeUnsignedInt(long i) { writeInt((int) i); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides a common implementation of the Kafka Wire Protocol. * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong> */ package org.apache.kafka.common.protocol;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/ArrayOf.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; import org.apache.kafka.common.protocol.types.Type.DocumentedType; import java.nio.ByteBuffer; import java.util.Optional; /** * Represents a type for an array of a particular type */ public class ArrayOf extends DocumentedType { private static final String ARRAY_TYPE_NAME = "ARRAY"; private final Type type; private final boolean nullable; public ArrayOf(Type type) { this(type, false); } public static ArrayOf nullable(Type type) { return new ArrayOf(type, true); } private ArrayOf(Type type, boolean nullable) { this.type = type; this.nullable = nullable; } @Override public boolean isNullable() { return nullable; } @Override public void write(ByteBuffer buffer, Object o) { if (o == null) { buffer.putInt(-1); return; } Object[] objs = (Object[]) o; int size = objs.length; buffer.putInt(size); for (Object obj : objs) type.write(buffer, obj); } @Override public Object read(ByteBuffer buffer) { int size = buffer.getInt(); if (size < 0 && isNullable()) return null; else if (size < 0) throw new SchemaException("Array size " + size + " cannot be negative"); if (size > buffer.remaining()) throw new SchemaException("Error reading array of size " + size + ", only " + buffer.remaining() + " bytes available"); Object[] objs = new Object[size]; for (int i = 0; i < size; i++) objs[i] = type.read(buffer); return objs; } @Override public int sizeOf(Object o) { int size = 4; if (o == null) return size; Object[] objs = (Object[]) o; for (Object obj : objs) size += type.sizeOf(obj); return size; } @Override public Optional<Type> arrayElementType() { return Optional.of(type); } @Override public String toString() { return ARRAY_TYPE_NAME + "(" + type + ")"; } @Override public Object[] validate(Object item) { try { if (isNullable() && item == null) return null; Object[] array = (Object[]) item; for (Object obj : array) type.validate(obj); return array; } catch (ClassCastException e) { throw new SchemaException("Not an Object[]."); } } @Override public String typeName() { return ARRAY_TYPE_NAME; } @Override public String documentation() { return "Represents a sequence of objects of a given type T. " + "Type T can be either a primitive type (e.g. " + STRING + ") or a structure. " + "First, the length N is given as an " + INT32 + ". Then N instances of type T follow. " + "A null array is represented with a length of -1. " + "In protocol documentation an array of T instances is referred to as [T]."; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/BoundField.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; /** * A field definition bound to a particular schema. */ public class BoundField { public final Field def; final int index; final Schema schema; public BoundField(Field def, Schema schema, int index) { this.def = def; this.schema = schema; this.index = index; } @Override public String toString() { return def.name + ":" + def.type; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/CompactArrayOf.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; import org.apache.kafka.common.protocol.types.Type.DocumentedType; import org.apache.kafka.common.utils.ByteUtils; import java.nio.ByteBuffer; import java.util.Optional; /** * Represents a type for a compact array of a particular type. * A compact array represents its length with a varint rather than a * fixed-length field. */ public class CompactArrayOf extends DocumentedType { private static final String COMPACT_ARRAY_TYPE_NAME = "COMPACT_ARRAY"; private final Type type; private final boolean nullable; public CompactArrayOf(Type type) { this(type, false); } public static CompactArrayOf nullable(Type type) { return new CompactArrayOf(type, true); } private CompactArrayOf(Type type, boolean nullable) { this.type = type; this.nullable = nullable; } @Override public boolean isNullable() { return nullable; } @Override public void write(ByteBuffer buffer, Object o) { if (o == null) { ByteUtils.writeUnsignedVarint(0, buffer); return; } Object[] objs = (Object[]) o; int size = objs.length; ByteUtils.writeUnsignedVarint(size + 1, buffer); for (Object obj : objs) type.write(buffer, obj); } @Override public Object read(ByteBuffer buffer) { int n = ByteUtils.readUnsignedVarint(buffer); if (n == 0) { if (isNullable()) { return null; } else { throw new SchemaException("This array is not nullable."); } } int size = n - 1; if (size > buffer.remaining()) throw new SchemaException("Error reading array of size " + size + ", only " + buffer.remaining() + " bytes available"); Object[] objs = new Object[size]; for (int i = 0; i < size; i++) objs[i] = type.read(buffer); return objs; } @Override public int sizeOf(Object o) { if (o == null) { return 1; } Object[] objs = (Object[]) o; int size = ByteUtils.sizeOfUnsignedVarint(objs.length + 1); for (Object obj : objs) { size += type.sizeOf(obj); } return size; } @Override public Optional<Type> arrayElementType() { return Optional.of(type); } @Override public String toString() { return COMPACT_ARRAY_TYPE_NAME + "(" + type + ")"; } @Override public Object[] validate(Object item) { try { if (isNullable() && item == null) return null; Object[] array = (Object[]) item; for (Object obj : array) type.validate(obj); return array; } catch (ClassCastException e) { throw new SchemaException("Not an Object[]. Found class " + item.getClass().getSimpleName()); } } @Override public String typeName() { return COMPACT_ARRAY_TYPE_NAME; } @Override public String documentation() { return "Represents a sequence of objects of a given type T. " + "Type T can be either a primitive type (e.g. " + STRING + ") or a structure. " + "First, the length N + 1 is given as an UNSIGNED_VARINT. Then N instances of type T follow. " + "A null array is represented with a length of 0. " + "In protocol documentation an array of T instances is referred to as [T]."; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/Field.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; public class Field { public final String name; public final String docString; public final Type type; public final boolean hasDefaultValue; public final Object defaultValue; public Field(String name, Type type, String docString, boolean hasDefaultValue, Object defaultValue) { this.name = name; this.docString = docString; this.type = type; this.hasDefaultValue = hasDefaultValue; this.defaultValue = defaultValue; if (hasDefaultValue) type.validate(defaultValue); } public Field(String name, Type type, String docString) { this(name, type, docString, false, null); } public Field(String name, Type type, String docString, Object defaultValue) { this(name, type, docString, true, defaultValue); } public Field(String name, Type type) { this(name, type, null, false, null); } public static class Int8 extends Field { public Int8(String name, String docString) { super(name, Type.INT8, docString, false, null); } public Int8(String name, String docString, byte defaultValue) { super(name, Type.INT8, docString, true, defaultValue); } } public static class Int32 extends Field { public Int32(String name, String docString) { super(name, Type.INT32, docString, false, null); } public Int32(String name, String docString, int defaultValue) { super(name, Type.INT32, docString, true, defaultValue); } } public static class Int64 extends Field { public Int64(String name, String docString) { super(name, Type.INT64, docString, false, null); } public Int64(String name, String docString, long defaultValue) { super(name, Type.INT64, docString, true, defaultValue); } } public static class UUID extends Field { public UUID(String name, String docString) { super(name, Type.UUID, docString, false, null); } public UUID(String name, String docString, UUID defaultValue) { super(name, Type.UUID, docString, true, defaultValue); } } public static class Int16 extends Field { public Int16(String name, String docString) { super(name, Type.INT16, docString, false, null); } } public static class Uint16 extends Field { public Uint16(String name, String docString) { super(name, Type.UINT16, docString, false, null); } } public static class Uint32 extends Field { public Uint32(String name, String docString) { super(name, Type.UNSIGNED_INT32, docString, false, null); } } public static class Float64 extends Field { public Float64(String name, String docString) { super(name, Type.FLOAT64, docString, false, null); } public Float64(String name, String docString, double defaultValue) { super(name, Type.FLOAT64, docString, true, defaultValue); } } public static class Str extends Field { public Str(String name, String docString) { super(name, Type.STRING, docString, false, null); } } public static class CompactStr extends Field { public CompactStr(String name, String docString) { super(name, Type.COMPACT_STRING, docString, false, null); } } public static class NullableStr extends Field { public NullableStr(String name, String docString) { super(name, Type.NULLABLE_STRING, docString, false, null); } } public static class CompactNullableStr extends Field { public CompactNullableStr(String name, String docString) { super(name, Type.COMPACT_NULLABLE_STRING, docString, false, null); } } public static class Bool extends Field { public Bool(String name, String docString) { super(name, Type.BOOLEAN, docString, false, null); } } public static class Array extends Field { public Array(String name, Type elementType, String docString) { super(name, new ArrayOf(elementType), docString, false, null); } } public static class CompactArray extends Field { public CompactArray(String name, Type elementType, String docString) { super(name, new CompactArrayOf(elementType), docString, false, null); } } public static class TaggedFieldsSection extends Field { private static final String NAME = "_tagged_fields"; private static final String DOC_STRING = "The tagged fields"; /** * Create a new TaggedFieldsSection with the given tags and fields. * * @param fields This is an array containing Integer tags followed * by associated Field objects. * @return The new {@link TaggedFieldsSection} */ public static TaggedFieldsSection of(Object... fields) { return new TaggedFieldsSection(TaggedFields.of(fields)); } public TaggedFieldsSection(Type type) { super(NAME, type, DOC_STRING, false, null); } } public static class ComplexArray { public final String name; public final String docString; public ComplexArray(String name, String docString) { this.name = name; this.docString = docString; } public Field withFields(Field... fields) { Schema elementType = new Schema(fields); return new Field(name, new ArrayOf(elementType), docString, false, null); } public Field nullableWithFields(Field... fields) { Schema elementType = new Schema(fields); return new Field(name, ArrayOf.nullable(elementType), docString, false, null); } public Field withFields(String docStringOverride, Field... fields) { Schema elementType = new Schema(fields); return new Field(name, new ArrayOf(elementType), docStringOverride, false, null); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/RawTaggedField.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; import java.util.Arrays; public class RawTaggedField { private final int tag; private final byte[] data; public RawTaggedField(int tag, byte[] data) { this.tag = tag; this.data = data; } public int tag() { return tag; } public byte[] data() { return data; } public int size() { return data.length; } @Override public boolean equals(Object o) { if ((o == null) || (!o.getClass().equals(getClass()))) { return false; } RawTaggedField other = (RawTaggedField) o; return tag == other.tag && Arrays.equals(data, other.data); } @Override public int hashCode() { return tag ^ Arrays.hashCode(data); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/RawTaggedFieldWriter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; import org.apache.kafka.common.protocol.Writable; import java.util.ArrayList; import java.util.List; import java.util.ListIterator; /** * The RawTaggedFieldWriter is used by Message subclasses to serialize their * lists of raw tags. */ public class RawTaggedFieldWriter { private static final RawTaggedFieldWriter EMPTY_WRITER = new RawTaggedFieldWriter(new ArrayList<>(0)); private final List<RawTaggedField> fields; private final ListIterator<RawTaggedField> iter; private int prevTag; public static RawTaggedFieldWriter forFields(List<RawTaggedField> fields) { if (fields == null) { return EMPTY_WRITER; } return new RawTaggedFieldWriter(fields); } private RawTaggedFieldWriter(List<RawTaggedField> fields) { this.fields = fields; this.iter = this.fields.listIterator(); this.prevTag = -1; } public int numFields() { return fields.size(); } public void writeRawTags(Writable writable, int nextDefinedTag) { while (iter.hasNext()) { RawTaggedField field = iter.next(); int tag = field.tag(); if (tag >= nextDefinedTag) { if (tag == nextDefinedTag) { // We must not have a raw tag field that duplicates the tag of another field. throw new RuntimeException("Attempted to use tag " + tag + " as an " + "undefined tag."); } iter.previous(); return; } if (tag <= prevTag) { // The raw tag field list must be sorted by tag, and there must not be // any duplicate tags. throw new RuntimeException("Invalid raw tag field list: tag " + tag + " comes after tag " + prevTag + ", but is not higher than it."); } writable.writeUnsignedVarint(field.tag()); writable.writeUnsignedVarint(field.data().length); writable.writeByteArray(field.data()); prevTag = tag; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/Schema.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import java.util.Objects; /** * The schema for a compound record definition */ public class Schema extends Type { private final static Object[] NO_VALUES = new Object[0]; private final BoundField[] fields; private final Map<String, BoundField> fieldsByName; private final boolean tolerateMissingFieldsWithDefaults; private final Struct cachedStruct; /** * Construct the schema with a given list of its field values * * @param fs the fields of this schema * * @throws SchemaException If the given list have duplicate fields */ public Schema(Field... fs) { this(false, fs); } /** * Construct the schema with a given list of its field values and the ability to tolerate * missing optional fields with defaults at the end of the schema definition. * * @param tolerateMissingFieldsWithDefaults whether to accept records with missing optional * fields the end of the schema * @param fs the fields of this schema * * @throws SchemaException If the given list have duplicate fields */ public Schema(boolean tolerateMissingFieldsWithDefaults, Field... fs) { this.fields = new BoundField[fs.length]; this.fieldsByName = new HashMap<>(); this.tolerateMissingFieldsWithDefaults = tolerateMissingFieldsWithDefaults; for (int i = 0; i < this.fields.length; i++) { Field def = fs[i]; if (fieldsByName.containsKey(def.name)) throw new SchemaException("Schema contains a duplicate field: " + def.name); this.fields[i] = new BoundField(def, this, i); this.fieldsByName.put(def.name, this.fields[i]); } //6 schemas have no fields at the time of this writing (3 versions each of list_groups and api_versions) //for such schemas there's no point in even creating a unique Struct object when deserializing. this.cachedStruct = this.fields.length > 0 ? null : new Struct(this, NO_VALUES); } /** * Write a struct to the buffer */ @Override public void write(ByteBuffer buffer, Object o) { Struct r = (Struct) o; for (BoundField field : fields) { try { Object value = field.def.type.validate(r.get(field)); field.def.type.write(buffer, value); } catch (Exception e) { throw new SchemaException("Error writing field '" + field.def.name + "': " + (e.getMessage() == null ? e.getClass().getName() : e.getMessage())); } } } /** * Read a struct from the buffer. If this schema is configured to tolerate missing * optional fields at the end of the buffer, these fields are replaced with their default * values; otherwise, if the schema does not tolerate missing fields, or if missing fields * don't have a default value, a {@code SchemaException} is thrown to signify that mandatory * fields are missing. */ @Override public Struct read(ByteBuffer buffer) { if (cachedStruct != null) { return cachedStruct; } Object[] objects = new Object[fields.length]; for (int i = 0; i < fields.length; i++) { try { if (tolerateMissingFieldsWithDefaults) { if (buffer.hasRemaining()) { objects[i] = fields[i].def.type.read(buffer); } else if (fields[i].def.hasDefaultValue) { objects[i] = fields[i].def.defaultValue; } else { throw new SchemaException("Missing value for field '" + fields[i].def.name + "' which has no default value."); } } else { objects[i] = fields[i].def.type.read(buffer); } } catch (Exception e) { throw new SchemaException("Error reading field '" + fields[i].def.name + "': " + (e.getMessage() == null ? e.getClass().getName() : e.getMessage())); } } return new Struct(this, objects); } /** * The size of the given record */ @Override public int sizeOf(Object o) { int size = 0; Struct r = (Struct) o; for (BoundField field : fields) { try { size += field.def.type.sizeOf(r.get(field)); } catch (Exception e) { throw new SchemaException("Error computing size for field '" + field.def.name + "': " + (e.getMessage() == null ? e.getClass().getName() : e.getMessage())); } } return size; } /** * The number of fields in this schema */ public int numFields() { return this.fields.length; } /** * Get a field by its slot in the record array * * @param slot The slot at which this field sits * @return The field */ public BoundField get(int slot) { return this.fields[slot]; } /** * Get a field by its name * * @param name The name of the field * @return The field */ public BoundField get(String name) { return this.fieldsByName.get(name); } /** * Get all the fields in this schema */ public BoundField[] fields() { return this.fields; } /** * Display a string representation of the schema */ @Override public String toString() { StringBuilder b = new StringBuilder(); b.append('{'); for (int i = 0; i < this.fields.length; i++) { b.append(this.fields[i].toString()); if (i < this.fields.length - 1) b.append(','); } b.append("}"); return b.toString(); } @Override public Struct validate(Object item) { try { Struct struct = (Struct) item; for (BoundField field : fields) { try { field.def.type.validate(struct.get(field)); } catch (SchemaException e) { throw new SchemaException("Invalid value for field '" + field.def.name + "': " + e.getMessage()); } } return struct; } catch (ClassCastException e) { throw new SchemaException("Not a Struct."); } } public void walk(Visitor visitor) { Objects.requireNonNull(visitor, "visitor must be non-null"); handleNode(this, visitor); } private static void handleNode(Type node, Visitor visitor) { if (node instanceof Schema) { Schema schema = (Schema) node; visitor.visit(schema); for (BoundField f : schema.fields()) handleNode(f.def.type, visitor); } else if (node.isArray()) { visitor.visit(node); handleNode(node.arrayElementType().get(), visitor); } else { visitor.visit(node); } } /** * Override one or more of the visit methods with the desired logic. */ public static abstract class Visitor { public void visit(Schema schema) {} public void visit(Type field) {} } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/SchemaException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; import org.apache.kafka.common.KafkaException; /** * Thrown if the protocol schema validation fails while parsing request or response. */ public class SchemaException extends KafkaException { private static final long serialVersionUID = 1L; public SchemaException(String message) { super(message); } public SchemaException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/Struct.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.record.BaseRecords; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Objects; import static org.apache.kafka.common.protocol.MessageUtil.UNSIGNED_INT_MAX; import static org.apache.kafka.common.protocol.MessageUtil.UNSIGNED_SHORT_MAX; /** * A record that can be serialized and deserialized according to a pre-defined schema */ public class Struct { private final Schema schema; private final Object[] values; Struct(Schema schema, Object[] values) { this.schema = schema; this.values = values; } public Struct(Schema schema) { this.schema = schema; this.values = new Object[this.schema.numFields()]; } /** * The schema for this struct. */ public Schema schema() { return this.schema; } /** * Return the value of the given pre-validated field, or if the value is missing return the default value. * * @param field The field for which to get the default value * @throws SchemaException if the field has no value and has no default. */ private Object getFieldOrDefault(BoundField field) { Object value = this.values[field.index]; if (value != null) return value; else if (field.def.hasDefaultValue) return field.def.defaultValue; else if (field.def.type.isNullable()) return null; else throw new SchemaException("Missing value for field '" + field.def.name + "' which has no default value."); } /** * Get the value for the field directly by the field index with no lookup needed (faster!) * * @param field The field to look up * @return The value for that field. * @throws SchemaException if the field has no value and has no default. */ public Object get(BoundField field) { validateField(field); return getFieldOrDefault(field); } public Byte get(Field.Int8 field) { return getByte(field.name); } public Integer get(Field.Int32 field) { return getInt(field.name); } public Long get(Field.Int64 field) { return getLong(field.name); } public Uuid get(Field.UUID field) { return getUuid(field.name); } public Integer get(Field.Uint16 field) { return getInt(field.name); } public Long get(Field.Uint32 field) { return getLong(field.name); } public Short get(Field.Int16 field) { return getShort(field.name); } public Double get(Field.Float64 field) { return getDouble(field.name); } public String get(Field.Str field) { return getString(field.name); } public String get(Field.NullableStr field) { return getString(field.name); } public Boolean get(Field.Bool field) { return getBoolean(field.name); } public Object[] get(Field.Array field) { return getArray(field.name); } public Object[] get(Field.ComplexArray field) { return getArray(field.name); } public Long getOrElse(Field.Int64 field, long alternative) { if (hasField(field.name)) return getLong(field.name); return alternative; } public Uuid getOrElse(Field.UUID field, Uuid alternative) { if (hasField(field.name)) return getUuid(field.name); return alternative; } public Short getOrElse(Field.Int16 field, short alternative) { if (hasField(field.name)) return getShort(field.name); return alternative; } public Byte getOrElse(Field.Int8 field, byte alternative) { if (hasField(field.name)) return getByte(field.name); return alternative; } public Integer getOrElse(Field.Int32 field, int alternative) { if (hasField(field.name)) return getInt(field.name); return alternative; } public Double getOrElse(Field.Float64 field, double alternative) { if (hasField(field.name)) return getDouble(field.name); return alternative; } public String getOrElse(Field.NullableStr field, String alternative) { if (hasField(field.name)) return getString(field.name); return alternative; } public String getOrElse(Field.Str field, String alternative) { if (hasField(field.name)) return getString(field.name); return alternative; } public boolean getOrElse(Field.Bool field, boolean alternative) { if (hasField(field.name)) return getBoolean(field.name); return alternative; } public Object[] getOrEmpty(Field.Array field) { if (hasField(field.name)) return getArray(field.name); return new Object[0]; } public Object[] getOrEmpty(Field.ComplexArray field) { if (hasField(field.name)) return getArray(field.name); return new Object[0]; } /** * Get the record value for the field with the given name by doing a hash table lookup (slower!) * * @param name The name of the field * @return The value in the record * @throws SchemaException If no such field exists */ public Object get(String name) { BoundField field = schema.get(name); if (field == null) throw new SchemaException("No such field: " + name); return getFieldOrDefault(field); } /** * Check if the struct contains a field. * @param name * @return Whether a field exists. */ public boolean hasField(String name) { return schema.get(name) != null; } public boolean hasField(Field def) { return schema.get(def.name) != null; } public boolean hasField(Field.ComplexArray def) { return schema.get(def.name) != null; } public Struct getStruct(BoundField field) { return (Struct) get(field); } public Struct getStruct(String name) { return (Struct) get(name); } public Byte getByte(BoundField field) { return (Byte) get(field); } public byte getByte(String name) { return (Byte) get(name); } public BaseRecords getRecords(String name) { return (BaseRecords) get(name); } public Short getShort(BoundField field) { return (Short) get(field); } public Short getShort(String name) { return (Short) get(name); } public Integer getUnsignedShort(BoundField field) { return (Integer) get(field); } public Integer getUnsignedShort(String name) { return (Integer) get(name); } public Integer getInt(BoundField field) { return (Integer) get(field); } public Integer getInt(String name) { return (Integer) get(name); } public Long getUnsignedInt(String name) { return (Long) get(name); } public Long getUnsignedInt(BoundField field) { return (Long) get(field); } public Long getLong(BoundField field) { return (Long) get(field); } public Long getLong(String name) { return (Long) get(name); } public Uuid getUuid(BoundField field) { return (Uuid) get(field); } public Uuid getUuid(String name) { return (Uuid) get(name); } public Double getDouble(BoundField field) { return (Double) get(field); } public Double getDouble(String name) { return (Double) get(name); } public Object[] getArray(BoundField field) { return (Object[]) get(field); } public Object[] getArray(String name) { return (Object[]) get(name); } public String getString(BoundField field) { return (String) get(field); } public String getString(String name) { return (String) get(name); } public Boolean getBoolean(BoundField field) { return (Boolean) get(field); } public Boolean getBoolean(String name) { return (Boolean) get(name); } public ByteBuffer getBytes(BoundField field) { Object result = get(field); if (result instanceof byte[]) return ByteBuffer.wrap((byte[]) result); return (ByteBuffer) result; } public ByteBuffer getBytes(String name) { Object result = get(name); if (result instanceof byte[]) return ByteBuffer.wrap((byte[]) result); return (ByteBuffer) result; } public byte[] getByteArray(String name) { Object result = get(name); if (result instanceof byte[]) return (byte[]) result; ByteBuffer buf = (ByteBuffer) result; byte[] arr = new byte[buf.remaining()]; buf.get(arr); buf.flip(); return arr; } /** * Set the given field to the specified value * * @param field The field * @param value The value * @throws SchemaException If the validation of the field failed */ public Struct set(BoundField field, Object value) { validateField(field); this.values[field.index] = value; return this; } /** * Set the field specified by the given name to the value * * @param name The name of the field * @param value The value to set * @throws SchemaException If the field is not known */ public Struct set(String name, Object value) { BoundField field = this.schema.get(name); if (field == null) throw new SchemaException("Unknown field: " + name); this.values[field.index] = value; return this; } public Struct set(Field.Str def, String value) { return set(def.name, value); } public Struct set(Field.NullableStr def, String value) { return set(def.name, value); } public Struct set(Field.Int8 def, byte value) { return set(def.name, value); } public Struct set(Field.Int32 def, int value) { return set(def.name, value); } public Struct set(Field.Int64 def, long value) { return set(def.name, value); } public Struct set(Field.UUID def, Uuid value) { return set(def.name, value); } public Struct set(Field.Int16 def, short value) { return set(def.name, value); } public Struct set(Field.Uint16 def, int value) { if (value < 0 || value > UNSIGNED_SHORT_MAX) { throw new RuntimeException("Invalid value for unsigned short for " + def.name + ": " + value); } return set(def.name, value); } public Struct set(Field.Uint32 def, long value) { if (value < 0 || value > UNSIGNED_INT_MAX) { throw new RuntimeException("Invalid value for unsigned int for " + def.name + ": " + value); } return set(def.name, value); } public Struct set(Field.Float64 def, double value) { return set(def.name, value); } public Struct set(Field.Bool def, boolean value) { return set(def.name, value); } public Struct set(Field.Array def, Object[] value) { return set(def.name, value); } public Struct set(Field.ComplexArray def, Object[] value) { return set(def.name, value); } public Struct setByteArray(String name, byte[] value) { ByteBuffer buf = value == null ? null : ByteBuffer.wrap(value); return set(name, buf); } public Struct setIfExists(Field.Array def, Object[] value) { return setIfExists(def.name, value); } public Struct setIfExists(Field.ComplexArray def, Object[] value) { return setIfExists(def.name, value); } public Struct setIfExists(Field def, Object value) { return setIfExists(def.name, value); } public Struct setIfExists(String fieldName, Object value) { BoundField field = this.schema.get(fieldName); if (field != null) this.values[field.index] = value; return this; } /** * Create a struct for the schema of a container type (struct or array). Note that for array type, this method * assumes that the type is an array of schema and creates a struct of that schema. Arrays of other types can't be * instantiated with this method. * * @param field The field to create an instance of * @return The struct * @throws SchemaException If the given field is not a container type */ public Struct instance(BoundField field) { validateField(field); if (field.def.type instanceof Schema) { return new Struct((Schema) field.def.type); } else if (field.def.type.isArray()) { return new Struct((Schema) field.def.type.arrayElementType().get()); } else { throw new SchemaException("Field '" + field.def.name + "' is not a container type, it is of type " + field.def.type); } } /** * Create a struct instance for the given field which must be a container type (struct or array) * * @param field The name of the field to create (field must be a schema type) * @return The struct * @throws SchemaException If the given field is not a container type */ public Struct instance(String field) { return instance(schema.get(field)); } public Struct instance(Field field) { return instance(schema.get(field.name)); } public Struct instance(Field.ComplexArray field) { return instance(schema.get(field.name)); } /** * Empty all the values from this record */ public void clear() { Arrays.fill(this.values, null); } /** * Get the serialized size of this object */ public int sizeOf() { return this.schema.sizeOf(this); } /** * Write this struct to a buffer */ public void writeTo(ByteBuffer buffer) { this.schema.write(buffer, this); } /** * Ensure the user doesn't try to access fields from the wrong schema * * @throws SchemaException If validation fails */ private void validateField(BoundField field) { Objects.requireNonNull(field, "`field` must be non-null"); if (this.schema != field.schema) throw new SchemaException("Attempt to access field '" + field.def.name + "' from a different schema instance."); if (field.index > values.length) throw new SchemaException("Invalid field index: " + field.index); } /** * Validate the contents of this struct against its schema * * @throws SchemaException If validation fails */ public void validate() { this.schema.validate(this); } @Override public String toString() { StringBuilder b = new StringBuilder(); b.append('{'); for (int i = 0; i < this.values.length; i++) { BoundField f = this.schema.get(i); b.append(f.def.name); b.append('='); if (f.def.type.isArray() && this.values[i] != null) { Object[] arrayValue = (Object[]) this.values[i]; b.append('['); for (int j = 0; j < arrayValue.length; j++) { b.append(arrayValue[j]); if (j < arrayValue.length - 1) b.append(','); } b.append(']'); } else b.append(this.values[i]); if (i < this.values.length - 1) b.append(','); } b.append('}'); return b.toString(); } @Override public int hashCode() { final int prime = 31; int result = 1; for (int i = 0; i < this.values.length; i++) { BoundField f = this.schema.get(i); if (f.def.type.isArray()) { if (this.get(f) != null) { Object[] arrayObject = (Object[]) this.get(f); for (Object arrayItem: arrayObject) result = prime * result + arrayItem.hashCode(); } } else { Object field = this.get(f); if (field != null) { result = prime * result + field.hashCode(); } } } return result; } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (getClass() != obj.getClass()) return false; Struct other = (Struct) obj; if (schema != other.schema) return false; for (int i = 0; i < this.values.length; i++) { BoundField f = this.schema.get(i); boolean result; if (f.def.type.isArray()) { result = Arrays.equals((Object[]) this.get(f), (Object[]) other.get(f)); } else { Object thisField = this.get(f); Object otherField = other.get(f); result = Objects.equals(thisField, otherField); } if (!result) return false; } return true; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/TaggedFields.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; import org.apache.kafka.common.protocol.types.Type.DocumentedType; import org.apache.kafka.common.utils.ByteUtils; import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; import java.util.NavigableMap; import java.util.TreeMap; /** * Represents a tagged fields section. */ public class TaggedFields extends DocumentedType { private static final String TAGGED_FIELDS_TYPE_NAME = "TAGGED_FIELDS"; private final Map<Integer, Field> fields; /** * Create a new TaggedFields object with the given tags and fields. * * @param fields This is an array containing Integer tags followed * by associated Field objects. * @return The new {@link TaggedFields} */ @SuppressWarnings("unchecked") public static TaggedFields of(Object... fields) { if (fields.length % 2 != 0) { throw new RuntimeException("TaggedFields#of takes an even " + "number of parameters."); } TreeMap<Integer, Field> newFields = new TreeMap<>(); for (int i = 0; i < fields.length; i += 2) { Integer tag = (Integer) fields[i]; Field field = (Field) fields[i + 1]; newFields.put(tag, field); } return new TaggedFields(newFields); } public TaggedFields(Map<Integer, Field> fields) { this.fields = fields; } @Override public boolean isNullable() { return false; } @SuppressWarnings("unchecked") @Override public void write(ByteBuffer buffer, Object o) { NavigableMap<Integer, Object> objects = (NavigableMap<Integer, Object>) o; ByteUtils.writeUnsignedVarint(objects.size(), buffer); for (Map.Entry<Integer, Object> entry : objects.entrySet()) { Integer tag = entry.getKey(); Field field = fields.get(tag); ByteUtils.writeUnsignedVarint(tag, buffer); if (field == null) { RawTaggedField value = (RawTaggedField) entry.getValue(); ByteUtils.writeUnsignedVarint(value.data().length, buffer); buffer.put(value.data()); } else { ByteUtils.writeUnsignedVarint(field.type.sizeOf(entry.getValue()), buffer); field.type.write(buffer, entry.getValue()); } } } @SuppressWarnings("unchecked") @Override public NavigableMap<Integer, Object> read(ByteBuffer buffer) { int numTaggedFields = ByteUtils.readUnsignedVarint(buffer); if (numTaggedFields == 0) { return Collections.emptyNavigableMap(); } NavigableMap<Integer, Object> objects = new TreeMap<>(); int prevTag = -1; for (int i = 0; i < numTaggedFields; i++) { int tag = ByteUtils.readUnsignedVarint(buffer); if (tag <= prevTag) { throw new RuntimeException("Invalid or out-of-order tag " + tag); } prevTag = tag; int size = ByteUtils.readUnsignedVarint(buffer); if (size < 0) throw new SchemaException("field size " + size + " cannot be negative"); if (size > buffer.remaining()) throw new SchemaException("Error reading field of size " + size + ", only " + buffer.remaining() + " bytes available"); Field field = fields.get(tag); if (field == null) { byte[] bytes = new byte[size]; buffer.get(bytes); objects.put(tag, new RawTaggedField(tag, bytes)); } else { objects.put(tag, field.type.read(buffer)); } } return objects; } @SuppressWarnings("unchecked") @Override public int sizeOf(Object o) { int size = 0; NavigableMap<Integer, Object> objects = (NavigableMap<Integer, Object>) o; size += ByteUtils.sizeOfUnsignedVarint(objects.size()); for (Map.Entry<Integer, Object> entry : objects.entrySet()) { Integer tag = entry.getKey(); size += ByteUtils.sizeOfUnsignedVarint(tag); Field field = fields.get(tag); if (field == null) { RawTaggedField value = (RawTaggedField) entry.getValue(); size += value.data().length + ByteUtils.sizeOfUnsignedVarint(value.data().length); } else { int valueSize = field.type.sizeOf(entry.getValue()); size += valueSize + ByteUtils.sizeOfUnsignedVarint(valueSize); } } return size; } @Override public String toString() { StringBuilder bld = new StringBuilder("TAGGED_FIELDS_TYPE_NAME("); String prefix = ""; for (Map.Entry<Integer, Field> field : fields.entrySet()) { bld.append(prefix); prefix = ", "; bld.append(field.getKey()).append(" -> ").append(field.getValue().toString()); } bld.append(")"); return bld.toString(); } @SuppressWarnings("unchecked") @Override public Map<Integer, Object> validate(Object item) { try { NavigableMap<Integer, Object> objects = (NavigableMap<Integer, Object>) item; for (Map.Entry<Integer, Object> entry : objects.entrySet()) { Integer tag = entry.getKey(); Field field = fields.get(tag); if (field == null) { if (!(entry.getValue() instanceof RawTaggedField)) { throw new SchemaException("The value associated with tag " + tag + " must be a RawTaggedField in this version of the software."); } } else { field.type.validate(entry.getValue()); } } return objects; } catch (ClassCastException e) { throw new SchemaException("Not a NavigableMap. Found class " + item.getClass().getSimpleName()); } } @Override public String typeName() { return TAGGED_FIELDS_TYPE_NAME; } @Override public String documentation() { return "Represents a series of tagged fields."; } /** * The number of tagged fields */ public int numFields() { return this.fields.size(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/Type.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.protocol.types; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.record.BaseRecords; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Utils; import java.nio.ByteBuffer; import java.util.Optional; /** * A serializable type */ public abstract class Type { /** * Write the typed object to the buffer * * @throws SchemaException If the object is not valid for its type */ public abstract void write(ByteBuffer buffer, Object o); /** * Read the typed object from the buffer * Please remember to do size validation before creating the container (ex: array) for the following data * * @throws SchemaException If the object is not valid for its type */ public abstract Object read(ByteBuffer buffer); /** * Validate the object. If succeeded return its typed object. * * @throws SchemaException If validation failed */ public abstract Object validate(Object o); /** * Return the size of the object in bytes */ public abstract int sizeOf(Object o); /** * Check if the type supports null values * @return whether or not null is a valid value for the type implementation */ public boolean isNullable() { return false; } /** * If the type is an array, return the type of the array elements. Otherwise, return empty. */ public Optional<Type> arrayElementType() { return Optional.empty(); } /** * Returns true if the type is an array. */ public final boolean isArray() { return arrayElementType().isPresent(); } /** * A Type that can return its description for documentation purposes. */ public static abstract class DocumentedType extends Type { /** * Short name of the type to identify it in documentation; * @return the name of the type */ public abstract String typeName(); /** * Documentation of the Type. * * @return details about valid values, representation */ public abstract String documentation(); @Override public String toString() { return typeName(); } } /** * The Boolean type represents a boolean value in a byte by using * the value of 0 to represent false, and 1 to represent true. * * If for some reason a value that is not 0 or 1 is read, * then any non-zero value will return true. */ public static final DocumentedType BOOLEAN = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { if ((Boolean) o) buffer.put((byte) 1); else buffer.put((byte) 0); } @Override public Object read(ByteBuffer buffer) { byte value = buffer.get(); return value != 0; } @Override public int sizeOf(Object o) { return 1; } @Override public String typeName() { return "BOOLEAN"; } @Override public Boolean validate(Object item) { if (item instanceof Boolean) return (Boolean) item; else throw new SchemaException(item + " is not a Boolean."); } @Override public String documentation() { return "Represents a boolean value in a byte. " + "Values 0 and 1 are used to represent false and true respectively. " + "When reading a boolean value, any non-zero value is considered true."; } }; public static final DocumentedType INT8 = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { buffer.put((Byte) o); } @Override public Object read(ByteBuffer buffer) { return buffer.get(); } @Override public int sizeOf(Object o) { return 1; } @Override public String typeName() { return "INT8"; } @Override public Byte validate(Object item) { if (item instanceof Byte) return (Byte) item; else throw new SchemaException(item + " is not a Byte."); } @Override public String documentation() { return "Represents an integer between -2<sup>7</sup> and 2<sup>7</sup>-1 inclusive."; } }; public static final DocumentedType INT16 = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { buffer.putShort((Short) o); } @Override public Object read(ByteBuffer buffer) { return buffer.getShort(); } @Override public int sizeOf(Object o) { return 2; } @Override public String typeName() { return "INT16"; } @Override public Short validate(Object item) { if (item instanceof Short) return (Short) item; else throw new SchemaException(item + " is not a Short."); } @Override public String documentation() { return "Represents an integer between -2<sup>15</sup> and 2<sup>15</sup>-1 inclusive. " + "The values are encoded using two bytes in network byte order (big-endian)."; } }; public static final DocumentedType UINT16 = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { Integer value = (Integer) o; buffer.putShort((short) value.intValue()); } @Override public Object read(ByteBuffer buffer) { short value = buffer.getShort(); return Integer.valueOf(Short.toUnsignedInt(value)); } @Override public int sizeOf(Object o) { return 2; } @Override public String typeName() { return "UINT16"; } @Override public Integer validate(Object item) { if (item instanceof Integer) return (Integer) item; else throw new SchemaException(item + " is not an a Integer (encoding an unsigned short)"); } @Override public String documentation() { return "Represents an integer between 0 and 65535 inclusive. " + "The values are encoded using two bytes in network byte order (big-endian)."; } }; public static final DocumentedType INT32 = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { buffer.putInt((Integer) o); } @Override public Object read(ByteBuffer buffer) { return buffer.getInt(); } @Override public int sizeOf(Object o) { return 4; } @Override public String typeName() { return "INT32"; } @Override public Integer validate(Object item) { if (item instanceof Integer) return (Integer) item; else throw new SchemaException(item + " is not an Integer."); } @Override public String documentation() { return "Represents an integer between -2<sup>31</sup> and 2<sup>31</sup>-1 inclusive. " + "The values are encoded using four bytes in network byte order (big-endian)."; } }; public static final DocumentedType UNSIGNED_INT32 = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { ByteUtils.writeUnsignedInt(buffer, (long) o); } @Override public Object read(ByteBuffer buffer) { return ByteUtils.readUnsignedInt(buffer); } @Override public int sizeOf(Object o) { return 4; } @Override public String typeName() { return "UINT32"; } @Override public Long validate(Object item) { if (item instanceof Long) return (Long) item; else throw new SchemaException(item + " is not an a Long (encoding an unsigned integer)."); } @Override public String documentation() { return "Represents an integer between 0 and 2<sup>32</sup>-1 inclusive. " + "The values are encoded using four bytes in network byte order (big-endian)."; } }; public static final DocumentedType INT64 = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { buffer.putLong((Long) o); } @Override public Object read(ByteBuffer buffer) { return buffer.getLong(); } @Override public int sizeOf(Object o) { return 8; } @Override public String typeName() { return "INT64"; } @Override public Long validate(Object item) { if (item instanceof Long) return (Long) item; else throw new SchemaException(item + " is not a Long."); } @Override public String documentation() { return "Represents an integer between -2<sup>63</sup> and 2<sup>63</sup>-1 inclusive. " + "The values are encoded using eight bytes in network byte order (big-endian)."; } }; public static final DocumentedType UUID = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { final Uuid uuid = (Uuid) o; buffer.putLong(uuid.getMostSignificantBits()); buffer.putLong(uuid.getLeastSignificantBits()); } @Override public Object read(ByteBuffer buffer) { return new Uuid(buffer.getLong(), buffer.getLong()); } @Override public int sizeOf(Object o) { return 16; } @Override public String typeName() { return "UUID"; } @Override public Uuid validate(Object item) { if (item instanceof Uuid) return (Uuid) item; else throw new SchemaException(item + " is not a Uuid."); } @Override public String documentation() { return "Represents a type 4 immutable universally unique identifier (Uuid). " + "The values are encoded using sixteen bytes in network byte order (big-endian)."; } }; public static final DocumentedType FLOAT64 = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { ByteUtils.writeDouble((Double) o, buffer); } @Override public Object read(ByteBuffer buffer) { return ByteUtils.readDouble(buffer); } @Override public int sizeOf(Object o) { return 8; } @Override public String typeName() { return "FLOAT64"; } @Override public Double validate(Object item) { if (item instanceof Double) return (Double) item; else throw new SchemaException(item + " is not a Double."); } @Override public String documentation() { return "Represents a double-precision 64-bit format IEEE 754 value. " + "The values are encoded using eight bytes in network byte order (big-endian)."; } }; public static final DocumentedType STRING = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { byte[] bytes = Utils.utf8((String) o); if (bytes.length > Short.MAX_VALUE) throw new SchemaException("String length " + bytes.length + " is larger than the maximum string length."); buffer.putShort((short) bytes.length); buffer.put(bytes); } @Override public String read(ByteBuffer buffer) { short length = buffer.getShort(); if (length < 0) throw new SchemaException("String length " + length + " cannot be negative"); if (length > buffer.remaining()) throw new SchemaException("Error reading string of length " + length + ", only " + buffer.remaining() + " bytes available"); String result = Utils.utf8(buffer, length); buffer.position(buffer.position() + length); return result; } @Override public int sizeOf(Object o) { return 2 + Utils.utf8Length((String) o); } @Override public String typeName() { return "STRING"; } @Override public String validate(Object item) { if (item instanceof String) return (String) item; else throw new SchemaException(item + " is not a String."); } @Override public String documentation() { return "Represents a sequence of characters. First the length N is given as an " + INT16 + ". Then N bytes follow which are the UTF-8 encoding of the character sequence. " + "Length must not be negative."; } }; public static final DocumentedType COMPACT_STRING = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { byte[] bytes = Utils.utf8((String) o); if (bytes.length > Short.MAX_VALUE) throw new SchemaException("String length " + bytes.length + " is larger than the maximum string length."); ByteUtils.writeUnsignedVarint(bytes.length + 1, buffer); buffer.put(bytes); } @Override public String read(ByteBuffer buffer) { int length = ByteUtils.readUnsignedVarint(buffer) - 1; if (length < 0) throw new SchemaException("String length " + length + " cannot be negative"); if (length > Short.MAX_VALUE) throw new SchemaException("String length " + length + " is larger than the maximum string length."); if (length > buffer.remaining()) throw new SchemaException("Error reading string of length " + length + ", only " + buffer.remaining() + " bytes available"); String result = Utils.utf8(buffer, length); buffer.position(buffer.position() + length); return result; } @Override public int sizeOf(Object o) { int length = Utils.utf8Length((String) o); return ByteUtils.sizeOfUnsignedVarint(length + 1) + length; } @Override public String typeName() { return "COMPACT_STRING"; } @Override public String validate(Object item) { if (item instanceof String) return (String) item; else throw new SchemaException(item + " is not a String."); } @Override public String documentation() { return "Represents a sequence of characters. First the length N + 1 is given as an UNSIGNED_VARINT " + ". Then N bytes follow which are the UTF-8 encoding of the character sequence."; } }; public static final DocumentedType NULLABLE_STRING = new DocumentedType() { @Override public boolean isNullable() { return true; } @Override public void write(ByteBuffer buffer, Object o) { if (o == null) { buffer.putShort((short) -1); return; } byte[] bytes = Utils.utf8((String) o); if (bytes.length > Short.MAX_VALUE) throw new SchemaException("String length " + bytes.length + " is larger than the maximum string length."); buffer.putShort((short) bytes.length); buffer.put(bytes); } @Override public String read(ByteBuffer buffer) { short length = buffer.getShort(); if (length < 0) return null; if (length > buffer.remaining()) throw new SchemaException("Error reading string of length " + length + ", only " + buffer.remaining() + " bytes available"); String result = Utils.utf8(buffer, length); buffer.position(buffer.position() + length); return result; } @Override public int sizeOf(Object o) { if (o == null) return 2; return 2 + Utils.utf8Length((String) o); } @Override public String typeName() { return "NULLABLE_STRING"; } @Override public String validate(Object item) { if (item == null) return null; if (item instanceof String) return (String) item; else throw new SchemaException(item + " is not a String."); } @Override public String documentation() { return "Represents a sequence of characters or null. For non-null strings, first the length N is given as an " + INT16 + ". Then N bytes follow which are the UTF-8 encoding of the character sequence. " + "A null value is encoded with length of -1 and there are no following bytes."; } }; public static final DocumentedType COMPACT_NULLABLE_STRING = new DocumentedType() { @Override public boolean isNullable() { return true; } @Override public void write(ByteBuffer buffer, Object o) { if (o == null) { ByteUtils.writeUnsignedVarint(0, buffer); } else { byte[] bytes = Utils.utf8((String) o); if (bytes.length > Short.MAX_VALUE) throw new SchemaException("String length " + bytes.length + " is larger than the maximum string length."); ByteUtils.writeUnsignedVarint(bytes.length + 1, buffer); buffer.put(bytes); } } @Override public String read(ByteBuffer buffer) { int length = ByteUtils.readUnsignedVarint(buffer) - 1; if (length < 0) { return null; } else if (length > Short.MAX_VALUE) { throw new SchemaException("String length " + length + " is larger than the maximum string length."); } else if (length > buffer.remaining()) { throw new SchemaException("Error reading string of length " + length + ", only " + buffer.remaining() + " bytes available"); } else { String result = Utils.utf8(buffer, length); buffer.position(buffer.position() + length); return result; } } @Override public int sizeOf(Object o) { if (o == null) { return 1; } int length = Utils.utf8Length((String) o); return ByteUtils.sizeOfUnsignedVarint(length + 1) + length; } @Override public String typeName() { return "COMPACT_NULLABLE_STRING"; } @Override public String validate(Object item) { if (item == null) { return null; } else if (item instanceof String) { return (String) item; } else { throw new SchemaException(item + " is not a String."); } } @Override public String documentation() { return "Represents a sequence of characters. First the length N + 1 is given as an UNSIGNED_VARINT " + ". Then N bytes follow which are the UTF-8 encoding of the character sequence. " + "A null string is represented with a length of 0."; } }; public static final DocumentedType BYTES = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { ByteBuffer arg = (ByteBuffer) o; int pos = arg.position(); buffer.putInt(arg.remaining()); buffer.put(arg); arg.position(pos); } @Override public Object read(ByteBuffer buffer) { int size = buffer.getInt(); if (size < 0) throw new SchemaException("Bytes size " + size + " cannot be negative"); if (size > buffer.remaining()) throw new SchemaException("Error reading bytes of size " + size + ", only " + buffer.remaining() + " bytes available"); int limit = buffer.limit(); int newPosition = buffer.position() + size; buffer.limit(newPosition); ByteBuffer val = buffer.slice(); buffer.limit(limit); buffer.position(newPosition); return val; } @Override public int sizeOf(Object o) { ByteBuffer buffer = (ByteBuffer) o; return 4 + buffer.remaining(); } @Override public String typeName() { return "BYTES"; } @Override public ByteBuffer validate(Object item) { if (item instanceof ByteBuffer) return (ByteBuffer) item; else throw new SchemaException(item + " is not a java.nio.ByteBuffer."); } @Override public String documentation() { return "Represents a raw sequence of bytes. First the length N is given as an " + INT32 + ". Then N bytes follow."; } }; public static final DocumentedType COMPACT_BYTES = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { ByteBuffer arg = (ByteBuffer) o; int pos = arg.position(); ByteUtils.writeUnsignedVarint(arg.remaining() + 1, buffer); buffer.put(arg); arg.position(pos); } @Override public Object read(ByteBuffer buffer) { int size = ByteUtils.readUnsignedVarint(buffer) - 1; if (size < 0) throw new SchemaException("Bytes size " + size + " cannot be negative"); if (size > buffer.remaining()) throw new SchemaException("Error reading bytes of size " + size + ", only " + buffer.remaining() + " bytes available"); int limit = buffer.limit(); int newPosition = buffer.position() + size; buffer.limit(newPosition); ByteBuffer val = buffer.slice(); buffer.limit(limit); buffer.position(newPosition); return val; } @Override public int sizeOf(Object o) { ByteBuffer buffer = (ByteBuffer) o; int remaining = buffer.remaining(); return ByteUtils.sizeOfUnsignedVarint(remaining + 1) + remaining; } @Override public String typeName() { return "COMPACT_BYTES"; } @Override public ByteBuffer validate(Object item) { if (item instanceof ByteBuffer) return (ByteBuffer) item; else throw new SchemaException(item + " is not a java.nio.ByteBuffer."); } @Override public String documentation() { return "Represents a raw sequence of bytes. First the length N+1 is given as an UNSIGNED_VARINT." + "Then N bytes follow."; } }; public static final DocumentedType NULLABLE_BYTES = new DocumentedType() { @Override public boolean isNullable() { return true; } @Override public void write(ByteBuffer buffer, Object o) { if (o == null) { buffer.putInt(-1); return; } ByteBuffer arg = (ByteBuffer) o; int pos = arg.position(); buffer.putInt(arg.remaining()); buffer.put(arg); arg.position(pos); } @Override public Object read(ByteBuffer buffer) { int size = buffer.getInt(); if (size < 0) return null; if (size > buffer.remaining()) throw new SchemaException("Error reading bytes of size " + size + ", only " + buffer.remaining() + " bytes available"); int limit = buffer.limit(); int newPosition = buffer.position() + size; buffer.limit(newPosition); ByteBuffer val = buffer.slice(); buffer.limit(limit); buffer.position(newPosition); return val; } @Override public int sizeOf(Object o) { if (o == null) return 4; ByteBuffer buffer = (ByteBuffer) o; return 4 + buffer.remaining(); } @Override public String typeName() { return "NULLABLE_BYTES"; } @Override public ByteBuffer validate(Object item) { if (item == null) return null; if (item instanceof ByteBuffer) return (ByteBuffer) item; throw new SchemaException(item + " is not a java.nio.ByteBuffer."); } @Override public String documentation() { return "Represents a raw sequence of bytes or null. For non-null values, first the length N is given as an " + INT32 + ". Then N bytes follow. A null value is encoded with length of -1 and there are no following bytes."; } }; public static final DocumentedType COMPACT_NULLABLE_BYTES = new DocumentedType() { @Override public boolean isNullable() { return true; } @Override public void write(ByteBuffer buffer, Object o) { if (o == null) { ByteUtils.writeUnsignedVarint(0, buffer); } else { ByteBuffer arg = (ByteBuffer) o; int pos = arg.position(); ByteUtils.writeUnsignedVarint(arg.remaining() + 1, buffer); buffer.put(arg); arg.position(pos); } } @Override public Object read(ByteBuffer buffer) { int size = ByteUtils.readUnsignedVarint(buffer) - 1; if (size < 0) return null; if (size > buffer.remaining()) throw new SchemaException("Error reading bytes of size " + size + ", only " + buffer.remaining() + " bytes available"); int limit = buffer.limit(); int newPosition = buffer.position() + size; buffer.limit(newPosition); ByteBuffer val = buffer.slice(); buffer.limit(limit); buffer.position(newPosition); return val; } @Override public int sizeOf(Object o) { if (o == null) { return 1; } ByteBuffer buffer = (ByteBuffer) o; int remaining = buffer.remaining(); return ByteUtils.sizeOfUnsignedVarint(remaining + 1) + remaining; } @Override public String typeName() { return "COMPACT_NULLABLE_BYTES"; } @Override public ByteBuffer validate(Object item) { if (item == null) return null; if (item instanceof ByteBuffer) return (ByteBuffer) item; throw new SchemaException(item + " is not a java.nio.ByteBuffer."); } @Override public String documentation() { return "Represents a raw sequence of bytes. First the length N+1 is given as an UNSIGNED_VARINT." + "Then N bytes follow. A null object is represented with a length of 0."; } }; public static final DocumentedType COMPACT_RECORDS = new DocumentedType() { @Override public boolean isNullable() { return true; } @Override public void write(ByteBuffer buffer, Object o) { if (o == null) { COMPACT_NULLABLE_BYTES.write(buffer, null); } else if (o instanceof MemoryRecords) { MemoryRecords records = (MemoryRecords) o; COMPACT_NULLABLE_BYTES.write(buffer, records.buffer().duplicate()); } else { throw new IllegalArgumentException("Unexpected record type: " + o.getClass()); } } @Override public MemoryRecords read(ByteBuffer buffer) { ByteBuffer recordsBuffer = (ByteBuffer) COMPACT_NULLABLE_BYTES.read(buffer); if (recordsBuffer == null) { return null; } else { return MemoryRecords.readableRecords(recordsBuffer); } } @Override public int sizeOf(Object o) { if (o == null) { return 1; } BaseRecords records = (BaseRecords) o; int recordsSize = records.sizeInBytes(); return ByteUtils.sizeOfUnsignedVarint(recordsSize + 1) + recordsSize; } @Override public String typeName() { return "COMPACT_RECORDS"; } @Override public BaseRecords validate(Object item) { if (item == null) return null; if (item instanceof BaseRecords) return (BaseRecords) item; throw new SchemaException(item + " is not an instance of " + BaseRecords.class.getName()); } @Override public String documentation() { return "Represents a sequence of Kafka records as " + COMPACT_NULLABLE_BYTES + ". " + "For a detailed description of records see " + "<a href=\"/documentation/#messageformat\">Message Sets</a>."; } }; public static final DocumentedType RECORDS = new DocumentedType() { @Override public boolean isNullable() { return true; } @Override public void write(ByteBuffer buffer, Object o) { if (o == null) { NULLABLE_BYTES.write(buffer, null); } else if (o instanceof MemoryRecords) { MemoryRecords records = (MemoryRecords) o; NULLABLE_BYTES.write(buffer, records.buffer().duplicate()); } else { throw new IllegalArgumentException("Unexpected record type: " + o.getClass()); } } @Override public MemoryRecords read(ByteBuffer buffer) { ByteBuffer recordsBuffer = (ByteBuffer) NULLABLE_BYTES.read(buffer); if (recordsBuffer == null) { return null; } else { return MemoryRecords.readableRecords(recordsBuffer); } } @Override public int sizeOf(Object o) { if (o == null) return 4; BaseRecords records = (BaseRecords) o; return 4 + records.sizeInBytes(); } @Override public String typeName() { return "RECORDS"; } @Override public BaseRecords validate(Object item) { if (item == null) return null; if (item instanceof BaseRecords) return (BaseRecords) item; throw new SchemaException(item + " is not an instance of " + BaseRecords.class.getName()); } @Override public String documentation() { return "Represents a sequence of Kafka records as " + NULLABLE_BYTES + ". " + "For a detailed description of records see " + "<a href=\"/documentation/#messageformat\">Message Sets</a>."; } }; public static final DocumentedType VARINT = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { ByteUtils.writeVarint((Integer) o, buffer); } @Override public Integer read(ByteBuffer buffer) { return ByteUtils.readVarint(buffer); } @Override public Integer validate(Object item) { if (item instanceof Integer) return (Integer) item; throw new SchemaException(item + " is not an integer"); } public String typeName() { return "VARINT"; } @Override public int sizeOf(Object o) { return ByteUtils.sizeOfVarint((Integer) o); } @Override public String documentation() { return "Represents an integer between -2<sup>31</sup> and 2<sup>31</sup>-1 inclusive. " + "Encoding follows the variable-length zig-zag encoding from " + " <a href=\"http://code.google.com/apis/protocolbuffers/docs/encoding.html\"> Google Protocol Buffers</a>."; } }; public static final DocumentedType VARLONG = new DocumentedType() { @Override public void write(ByteBuffer buffer, Object o) { ByteUtils.writeVarlong((Long) o, buffer); } @Override public Long read(ByteBuffer buffer) { return ByteUtils.readVarlong(buffer); } @Override public Long validate(Object item) { if (item instanceof Long) return (Long) item; throw new SchemaException(item + " is not a long"); } public String typeName() { return "VARLONG"; } @Override public int sizeOf(Object o) { return ByteUtils.sizeOfVarlong((Long) o); } @Override public String documentation() { return "Represents an integer between -2<sup>63</sup> and 2<sup>63</sup>-1 inclusive. " + "Encoding follows the variable-length zig-zag encoding from " + " <a href=\"http://code.google.com/apis/protocolbuffers/docs/encoding.html\"> Google Protocol Buffers</a>."; } }; private static String toHtml() { DocumentedType[] types = { BOOLEAN, INT8, INT16, INT32, INT64, UNSIGNED_INT32, VARINT, VARLONG, UUID, FLOAT64, STRING, COMPACT_STRING, NULLABLE_STRING, COMPACT_NULLABLE_STRING, BYTES, COMPACT_BYTES, NULLABLE_BYTES, COMPACT_NULLABLE_BYTES, RECORDS, new ArrayOf(STRING), new CompactArrayOf(COMPACT_STRING)}; final StringBuilder b = new StringBuilder(); b.append("<table class=\"data-table\"><tbody>\n"); b.append("<tr>"); b.append("<th>Type</th>\n"); b.append("<th>Description</th>\n"); b.append("</tr>\n"); for (DocumentedType type : types) { b.append("<tr>"); b.append("<td>"); b.append(type.typeName()); b.append("</td>"); b.append("<td>"); b.append(type.documentation()); b.append("</td>"); b.append("</tr>\n"); } b.append("</tbody></table>\n"); return b.toString(); } public static void main(String[] args) { System.out.println(toHtml()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/protocol/types/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides types of data which can be serialized in the Kafka Wire Protocol. * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong> */ package org.apache.kafka.common.protocol.types;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/quota/ClientQuotaAlteration.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.quota; import java.util.Collection; import java.util.Objects; /** * Describes a configuration alteration to be made to a client quota entity. */ public class ClientQuotaAlteration { public static class Op { private final String key; private final Double value; /** * @param key the quota type to alter * @param value if set then the existing value is updated, * otherwise if null, the existing value is cleared */ public Op(String key, Double value) { this.key = key; this.value = value; } /** * @return the quota type to alter */ public String key() { return this.key; } /** * @return if set then the existing value is updated, * otherwise if null, the existing value is cleared */ public Double value() { return this.value; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Op that = (Op) o; return Objects.equals(key, that.key) && Objects.equals(value, that.value); } @Override public int hashCode() { return Objects.hash(key, value); } @Override public String toString() { return "ClientQuotaAlteration.Op(key=" + key + ", value=" + value + ")"; } } private final ClientQuotaEntity entity; private final Collection<Op> ops; /** * @param entity the entity whose config will be modified * @param ops the alteration to perform */ public ClientQuotaAlteration(ClientQuotaEntity entity, Collection<Op> ops) { this.entity = entity; this.ops = ops; } /** * @return the entity whose config will be modified */ public ClientQuotaEntity entity() { return this.entity; } /** * @return the alteration to perform */ public Collection<Op> ops() { return this.ops; } @Override public String toString() { return "ClientQuotaAlteration(entity=" + entity + ", ops=" + ops + ")"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/quota/ClientQuotaEntity.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.quota; import java.util.Map; import java.util.Objects; /** * Describes a client quota entity, which is a mapping of entity types to their names. */ public class ClientQuotaEntity { private final Map<String, String> entries; /** * The type of an entity entry. */ public static final String USER = "user"; public static final String CLIENT_ID = "client-id"; public static final String IP = "ip"; public static boolean isValidEntityType(String entityType) { return Objects.equals(entityType, USER) || Objects.equals(entityType, CLIENT_ID) || Objects.equals(entityType, IP); } /** * Constructs a quota entity for the given types and names. If a name is null, * then it is mapped to the built-in default entity name. * * @param entries maps entity type to its name */ public ClientQuotaEntity(Map<String, String> entries) { this.entries = entries; } /** * @return map of entity type to its name */ public Map<String, String> entries() { return this.entries; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClientQuotaEntity that = (ClientQuotaEntity) o; return Objects.equals(entries, that.entries); } @Override public int hashCode() { return Objects.hash(entries); } @Override public String toString() { return "ClientQuotaEntity(entries=" + entries + ")"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/quota/ClientQuotaFilter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.quota; import java.util.Collection; import java.util.Collections; import java.util.Objects; /** * Describes a client quota entity filter. */ public class ClientQuotaFilter { private final Collection<ClientQuotaFilterComponent> components; private final boolean strict; /** * A filter to be applied to matching client quotas. * * @param components the components to filter on * @param strict whether the filter only includes specified components */ private ClientQuotaFilter(Collection<ClientQuotaFilterComponent> components, boolean strict) { this.components = components; this.strict = strict; } /** * Constructs and returns a quota filter that matches all provided components. Matching entities * with entity types that are not specified by a component will also be included in the result. * * @param components the components for the filter */ public static ClientQuotaFilter contains(Collection<ClientQuotaFilterComponent> components) { return new ClientQuotaFilter(components, false); } /** * Constructs and returns a quota filter that matches all provided components. Matching entities * with entity types that are not specified by a component will *not* be included in the result. * * @param components the components for the filter */ public static ClientQuotaFilter containsOnly(Collection<ClientQuotaFilterComponent> components) { return new ClientQuotaFilter(components, true); } /** * Constructs and returns a quota filter that matches all configured entities. */ public static ClientQuotaFilter all() { return new ClientQuotaFilter(Collections.emptyList(), false); } /** * @return the filter's components */ public Collection<ClientQuotaFilterComponent> components() { return this.components; } /** * @return whether the filter is strict, i.e. only includes specified components */ public boolean strict() { return this.strict; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClientQuotaFilter that = (ClientQuotaFilter) o; return Objects.equals(components, that.components) && Objects.equals(strict, that.strict); } @Override public int hashCode() { return Objects.hash(components, strict); } @Override public String toString() { return "ClientQuotaFilter(components=" + components + ", strict=" + strict + ")"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/quota/ClientQuotaFilterComponent.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.quota; import java.util.Objects; import java.util.Optional; /** * Describes a component for applying a client quota filter. */ public class ClientQuotaFilterComponent { private final String entityType; private final Optional<String> match; /** * A filter to be applied. * * @param entityType the entity type the filter component applies to * @param match if present, the name that's matched exactly * if empty, matches the default name * if null, matches any specified name */ private ClientQuotaFilterComponent(String entityType, Optional<String> match) { this.entityType = Objects.requireNonNull(entityType); this.match = match; } /** * Constructs and returns a filter component that exactly matches the provided entity * name for the entity type. * * @param entityType the entity type the filter component applies to * @param entityName the entity name that's matched exactly */ public static ClientQuotaFilterComponent ofEntity(String entityType, String entityName) { return new ClientQuotaFilterComponent(entityType, Optional.of(Objects.requireNonNull(entityName))); } /** * Constructs and returns a filter component that matches the built-in default entity name * for the entity type. * * @param entityType the entity type the filter component applies to */ public static ClientQuotaFilterComponent ofDefaultEntity(String entityType) { return new ClientQuotaFilterComponent(entityType, Optional.empty()); } /** * Constructs and returns a filter component that matches any specified name for the * entity type. * * @param entityType the entity type the filter component applies to */ public static ClientQuotaFilterComponent ofEntityType(String entityType) { return new ClientQuotaFilterComponent(entityType, null); } /** * @return the component's entity type */ public String entityType() { return this.entityType; } /** * @return the optional match string, where: * if present, the name that's matched exactly * if empty, matches the default name * if null, matches any specified name */ public Optional<String> match() { return this.match; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ClientQuotaFilterComponent that = (ClientQuotaFilterComponent) o; return Objects.equals(that.entityType, entityType) && Objects.equals(that.match, match); } @Override public int hashCode() { return Objects.hash(entityType, match); } @Override public String toString() { return "ClientQuotaFilterComponent(entityType=" + entityType + ", match=" + match + ")"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/quota/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides mechanisms for enforcing resource quotas. */ package org.apache.kafka.common.quota;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/AbstractLegacyRecordBatch.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.InvalidRecordException; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.utils.AbstractIterator; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.CloseableIterator; import org.apache.kafka.common.utils.Utils; import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import java.util.ArrayDeque; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; import java.util.OptionalLong; import static org.apache.kafka.common.record.Records.LOG_OVERHEAD; import static org.apache.kafka.common.record.Records.OFFSET_OFFSET; /** * This {@link RecordBatch} implementation is for magic versions 0 and 1. In addition to implementing * {@link RecordBatch}, it also implements {@link Record}, which exposes the duality of the old message * format in its handling of compressed messages. The wrapper record is considered the record batch in this * interface, while the inner records are considered the log records (though they both share the same schema). * * In general, this class should not be used directly. Instances of {@link Records} provides access to this * class indirectly through the {@link RecordBatch} interface. */ public abstract class AbstractLegacyRecordBatch extends AbstractRecordBatch implements Record { public abstract LegacyRecord outerRecord(); @Override public long lastOffset() { return offset(); } @Override public boolean isValid() { return outerRecord().isValid(); } @Override public void ensureValid() { outerRecord().ensureValid(); } @Override public int keySize() { return outerRecord().keySize(); } @Override public boolean hasKey() { return outerRecord().hasKey(); } @Override public ByteBuffer key() { return outerRecord().key(); } @Override public int valueSize() { return outerRecord().valueSize(); } @Override public boolean hasValue() { return !outerRecord().hasNullValue(); } @Override public ByteBuffer value() { return outerRecord().value(); } @Override public Header[] headers() { return Record.EMPTY_HEADERS; } @Override public boolean hasMagic(byte magic) { return magic == outerRecord().magic(); } @Override public boolean hasTimestampType(TimestampType timestampType) { return outerRecord().timestampType() == timestampType; } @Override public long checksum() { return outerRecord().checksum(); } @Override public long maxTimestamp() { return timestamp(); } @Override public long timestamp() { return outerRecord().timestamp(); } @Override public TimestampType timestampType() { return outerRecord().timestampType(); } @Override public long baseOffset() { return iterator().next().offset(); } @Override public byte magic() { return outerRecord().magic(); } @Override public CompressionType compressionType() { return outerRecord().compressionType(); } @Override public int sizeInBytes() { return outerRecord().sizeInBytes() + LOG_OVERHEAD; } @Override public Integer countOrNull() { return null; } @Override public String toString() { return "LegacyRecordBatch(offset=" + offset() + ", " + outerRecord() + ")"; } @Override public void writeTo(ByteBuffer buffer) { writeHeader(buffer, offset(), outerRecord().sizeInBytes()); buffer.put(outerRecord().buffer().duplicate()); } @Override public long producerId() { return RecordBatch.NO_PRODUCER_ID; } @Override public short producerEpoch() { return RecordBatch.NO_PRODUCER_EPOCH; } @Override public boolean hasProducerId() { return false; } @Override public int sequence() { return RecordBatch.NO_SEQUENCE; } @Override public int baseSequence() { return RecordBatch.NO_SEQUENCE; } @Override public int lastSequence() { return RecordBatch.NO_SEQUENCE; } @Override public boolean isTransactional() { return false; } @Override public int partitionLeaderEpoch() { return RecordBatch.NO_PARTITION_LEADER_EPOCH; } @Override public boolean isControlBatch() { return false; } @Override public OptionalLong deleteHorizonMs() { return OptionalLong.empty(); } /** * Get an iterator for the nested entries contained within this batch. Note that * if the batch is not compressed, then this method will return an iterator over the * shallow record only (i.e. this object). * @return An iterator over the records contained within this batch */ @Override public Iterator<Record> iterator() { return iterator(BufferSupplier.NO_CACHING); } CloseableIterator<Record> iterator(BufferSupplier bufferSupplier) { if (isCompressed()) return new DeepRecordsIterator(this, false, Integer.MAX_VALUE, bufferSupplier); return new CloseableIterator<Record>() { private boolean hasNext = true; @Override public void close() {} @Override public boolean hasNext() { return hasNext; } @Override public Record next() { if (!hasNext) throw new NoSuchElementException(); hasNext = false; return AbstractLegacyRecordBatch.this; } @Override public void remove() { throw new UnsupportedOperationException(); } }; } @Override public CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier) { // the older message format versions do not support streaming, so we return the normal iterator return iterator(bufferSupplier); } static void writeHeader(ByteBuffer buffer, long offset, int size) { buffer.putLong(offset); buffer.putInt(size); } static void writeHeader(DataOutputStream out, long offset, int size) throws IOException { out.writeLong(offset); out.writeInt(size); } private static final class DataLogInputStream implements LogInputStream<AbstractLegacyRecordBatch> { private final InputStream stream; protected final int maxMessageSize; private final ByteBuffer offsetAndSizeBuffer; DataLogInputStream(InputStream stream, int maxMessageSize) { this.stream = stream; this.maxMessageSize = maxMessageSize; this.offsetAndSizeBuffer = ByteBuffer.allocate(Records.LOG_OVERHEAD); } public AbstractLegacyRecordBatch nextBatch() throws IOException { offsetAndSizeBuffer.clear(); Utils.readFully(stream, offsetAndSizeBuffer); if (offsetAndSizeBuffer.hasRemaining()) return null; long offset = offsetAndSizeBuffer.getLong(Records.OFFSET_OFFSET); int size = offsetAndSizeBuffer.getInt(Records.SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (size > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); ByteBuffer batchBuffer = ByteBuffer.allocate(size); Utils.readFully(stream, batchBuffer); if (batchBuffer.hasRemaining()) return null; batchBuffer.flip(); return new BasicLegacyRecordBatch(offset, new LegacyRecord(batchBuffer)); } } private static class DeepRecordsIterator extends AbstractIterator<Record> implements CloseableIterator<Record> { private final ArrayDeque<AbstractLegacyRecordBatch> innerEntries; private final long absoluteBaseOffset; private final byte wrapperMagic; private DeepRecordsIterator(AbstractLegacyRecordBatch wrapperEntry, boolean ensureMatchingMagic, int maxMessageSize, BufferSupplier bufferSupplier) { LegacyRecord wrapperRecord = wrapperEntry.outerRecord(); this.wrapperMagic = wrapperRecord.magic(); if (wrapperMagic != RecordBatch.MAGIC_VALUE_V0 && wrapperMagic != RecordBatch.MAGIC_VALUE_V1) throw new InvalidRecordException("Invalid wrapper magic found in legacy deep record iterator " + wrapperMagic); CompressionType compressionType = wrapperRecord.compressionType(); if (compressionType == CompressionType.ZSTD) throw new InvalidRecordException("Invalid wrapper compressionType found in legacy deep record iterator " + wrapperMagic); ByteBuffer wrapperValue = wrapperRecord.value(); if (wrapperValue == null) throw new InvalidRecordException("Found invalid compressed record set with null value (magic = " + wrapperMagic + ")"); InputStream stream = compressionType.wrapForInput(wrapperValue, wrapperRecord.magic(), bufferSupplier); LogInputStream<AbstractLegacyRecordBatch> logStream = new DataLogInputStream(stream, maxMessageSize); long lastOffsetFromWrapper = wrapperEntry.lastOffset(); long timestampFromWrapper = wrapperRecord.timestamp(); this.innerEntries = new ArrayDeque<>(); // If relative offset is used, we need to decompress the entire message first to compute // the absolute offset. For simplicity and because it's a format that is on its way out, we // do the same for message format version 0 try { while (true) { AbstractLegacyRecordBatch innerEntry = logStream.nextBatch(); if (innerEntry == null) break; LegacyRecord record = innerEntry.outerRecord(); byte magic = record.magic(); if (ensureMatchingMagic && magic != wrapperMagic) throw new InvalidRecordException("Compressed message magic " + magic + " does not match wrapper magic " + wrapperMagic); if (magic == RecordBatch.MAGIC_VALUE_V1) { LegacyRecord recordWithTimestamp = new LegacyRecord( record.buffer(), timestampFromWrapper, wrapperRecord.timestampType()); innerEntry = new BasicLegacyRecordBatch(innerEntry.lastOffset(), recordWithTimestamp); } innerEntries.addLast(innerEntry); } if (innerEntries.isEmpty()) throw new InvalidRecordException("Found invalid compressed record set with no inner records"); if (wrapperMagic == RecordBatch.MAGIC_VALUE_V1) { if (lastOffsetFromWrapper == 0) { // The outer offset may be 0 if this is produce data from certain versions of librdkafka. this.absoluteBaseOffset = 0; } else { long lastInnerOffset = innerEntries.getLast().offset(); if (lastOffsetFromWrapper < lastInnerOffset) throw new InvalidRecordException("Found invalid wrapper offset in compressed v1 message set, " + "wrapper offset '" + lastOffsetFromWrapper + "' is less than the last inner message " + "offset '" + lastInnerOffset + "' and it is not zero."); this.absoluteBaseOffset = lastOffsetFromWrapper - lastInnerOffset; } } else { this.absoluteBaseOffset = -1; } } catch (IOException e) { throw new KafkaException(e); } finally { Utils.closeQuietly(stream, "records iterator stream"); } } @Override protected Record makeNext() { if (innerEntries.isEmpty()) return allDone(); AbstractLegacyRecordBatch entry = innerEntries.remove(); // Convert offset to absolute offset if needed. if (wrapperMagic == RecordBatch.MAGIC_VALUE_V1) { long absoluteOffset = absoluteBaseOffset + entry.offset(); entry = new BasicLegacyRecordBatch(absoluteOffset, entry.outerRecord()); } if (entry.isCompressed()) throw new InvalidRecordException("Inner messages must not be compressed"); return entry; } @Override public void close() {} } private static class BasicLegacyRecordBatch extends AbstractLegacyRecordBatch { private final LegacyRecord record; private final long offset; private BasicLegacyRecordBatch(long offset, LegacyRecord record) { this.offset = offset; this.record = record; } @Override public long offset() { return offset; } @Override public LegacyRecord outerRecord() { return record; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; BasicLegacyRecordBatch that = (BasicLegacyRecordBatch) o; return offset == that.offset && Objects.equals(record, that.record); } @Override public int hashCode() { int result = record != null ? record.hashCode() : 0; result = 31 * result + Long.hashCode(offset); return result; } } static class ByteBufferLegacyRecordBatch extends AbstractLegacyRecordBatch implements MutableRecordBatch { private final ByteBuffer buffer; private final LegacyRecord record; ByteBufferLegacyRecordBatch(ByteBuffer buffer) { this.buffer = buffer; buffer.position(LOG_OVERHEAD); this.record = new LegacyRecord(buffer.slice()); buffer.position(OFFSET_OFFSET); } @Override public long offset() { return buffer.getLong(OFFSET_OFFSET); } @Override public OptionalLong deleteHorizonMs() { return OptionalLong.empty(); } @Override public LegacyRecord outerRecord() { return record; } @Override public void setLastOffset(long offset) { buffer.putLong(OFFSET_OFFSET, offset); } @Override public void setMaxTimestamp(TimestampType timestampType, long timestamp) { if (record.magic() == RecordBatch.MAGIC_VALUE_V0) throw new UnsupportedOperationException("Cannot set timestamp for a record with magic = 0"); long currentTimestamp = record.timestamp(); // We don't need to recompute crc if the timestamp is not updated. if (record.timestampType() == timestampType && currentTimestamp == timestamp) return; setTimestampAndUpdateCrc(timestampType, timestamp); } @Override public void setPartitionLeaderEpoch(int epoch) { throw new UnsupportedOperationException("Magic versions prior to 2 do not support partition leader epoch"); } private void setTimestampAndUpdateCrc(TimestampType timestampType, long timestamp) { byte attributes = LegacyRecord.computeAttributes(magic(), compressionType(), timestampType); buffer.put(LOG_OVERHEAD + LegacyRecord.ATTRIBUTES_OFFSET, attributes); buffer.putLong(LOG_OVERHEAD + LegacyRecord.TIMESTAMP_OFFSET, timestamp); long crc = record.computeChecksum(); ByteUtils.writeUnsignedInt(buffer, LOG_OVERHEAD + LegacyRecord.CRC_OFFSET, crc); } /** * LegacyRecordBatch does not implement this iterator and would hence fallback to the normal iterator. * * @return An iterator over the records contained within this batch */ @Override public CloseableIterator<Record> skipKeyValueIterator(BufferSupplier bufferSupplier) { return CloseableIterator.wrap(iterator(bufferSupplier)); } @Override public void writeTo(ByteBufferOutputStream outputStream) { outputStream.write(buffer.duplicate()); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; ByteBufferLegacyRecordBatch that = (ByteBufferLegacyRecordBatch) o; return Objects.equals(buffer, that.buffer); } @Override public int hashCode() { return buffer != null ? buffer.hashCode() : 0; } } static class LegacyFileChannelRecordBatch extends FileLogInputStream.FileChannelRecordBatch { LegacyFileChannelRecordBatch(long offset, byte magic, FileRecords fileRecords, int position, int batchSize) { super(offset, magic, fileRecords, position, batchSize); } @Override protected RecordBatch toMemoryRecordBatch(ByteBuffer buffer) { return new ByteBufferLegacyRecordBatch(buffer); } @Override public long baseOffset() { return loadFullBatch().baseOffset(); } @Override public OptionalLong deleteHorizonMs() { return OptionalLong.empty(); } @Override public long lastOffset() { return offset; } @Override public long producerId() { return RecordBatch.NO_PRODUCER_ID; } @Override public short producerEpoch() { return RecordBatch.NO_PRODUCER_EPOCH; } @Override public int baseSequence() { return RecordBatch.NO_SEQUENCE; } @Override public int lastSequence() { return RecordBatch.NO_SEQUENCE; } @Override public Integer countOrNull() { return null; } @Override public boolean isTransactional() { return false; } @Override public boolean isControlBatch() { return false; } @Override public int partitionLeaderEpoch() { return RecordBatch.NO_PARTITION_LEADER_EPOCH; } @Override protected int headerSize() { return LOG_OVERHEAD + LegacyRecord.headerSize(magic); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/AbstractRecordBatch.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; abstract class AbstractRecordBatch implements RecordBatch { @Override public boolean hasProducerId() { return RecordBatch.NO_PRODUCER_ID < producerId(); } @Override public long nextOffset() { return lastOffset() + 1; } @Override public boolean isCompressed() { return compressionType() != CompressionType.NONE; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/AbstractRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.utils.AbstractIterator; import org.apache.kafka.common.utils.Utils; import java.nio.ByteBuffer; import java.util.Iterator; public abstract class AbstractRecords implements Records { private final Iterable<Record> records = this::recordsIterator; @Override public boolean hasMatchingMagic(byte magic) { for (RecordBatch batch : batches()) if (batch.magic() != magic) return false; return true; } public RecordBatch firstBatch() { Iterator<? extends RecordBatch> iterator = batches().iterator(); if (!iterator.hasNext()) return null; return iterator.next(); } /** * Get an iterator over the deep records. * @return An iterator over the records */ @Override public Iterable<Record> records() { return records; } @Override public DefaultRecordsSend<Records> toSend() { return new DefaultRecordsSend<>(this); } private Iterator<Record> recordsIterator() { return new AbstractIterator<Record>() { private final Iterator<? extends RecordBatch> batches = batches().iterator(); private Iterator<Record> records; @Override protected Record makeNext() { if (records != null && records.hasNext()) return records.next(); if (batches.hasNext()) { records = batches.next().iterator(); return makeNext(); } return allDone(); } }; } public static int estimateSizeInBytes(byte magic, long baseOffset, CompressionType compressionType, Iterable<Record> records) { int size = 0; if (magic <= RecordBatch.MAGIC_VALUE_V1) { for (Record record : records) size += Records.LOG_OVERHEAD + LegacyRecord.recordSize(magic, record.key(), record.value()); } else { size = DefaultRecordBatch.sizeInBytes(baseOffset, records); } return estimateCompressedSizeInBytes(size, compressionType); } public static int estimateSizeInBytes(byte magic, CompressionType compressionType, Iterable<SimpleRecord> records) { int size = 0; if (magic <= RecordBatch.MAGIC_VALUE_V1) { for (SimpleRecord record : records) size += Records.LOG_OVERHEAD + LegacyRecord.recordSize(magic, record.key(), record.value()); } else { size = DefaultRecordBatch.sizeInBytes(records); } return estimateCompressedSizeInBytes(size, compressionType); } private static int estimateCompressedSizeInBytes(int size, CompressionType compressionType) { return compressionType == CompressionType.NONE ? size : Math.min(Math.max(size / 2, 1024), 1 << 16); } /** * Get an upper bound estimate on the batch size needed to hold a record with the given fields. This is only * an estimate because it does not take into account overhead from the compression algorithm. */ public static int estimateSizeInBytesUpperBound(byte magic, CompressionType compressionType, byte[] key, byte[] value, Header[] headers) { return estimateSizeInBytesUpperBound(magic, compressionType, Utils.wrapNullable(key), Utils.wrapNullable(value), headers); } /** * Get an upper bound estimate on the batch size needed to hold a record with the given fields. This is only * an estimate because it does not take into account overhead from the compression algorithm. */ public static int estimateSizeInBytesUpperBound(byte magic, CompressionType compressionType, ByteBuffer key, ByteBuffer value, Header[] headers) { if (magic >= RecordBatch.MAGIC_VALUE_V2) return DefaultRecordBatch.estimateBatchSizeUpperBound(key, value, headers); else if (compressionType != CompressionType.NONE) return Records.LOG_OVERHEAD + LegacyRecord.recordOverhead(magic) + LegacyRecord.recordSize(magic, key, value); else return Records.LOG_OVERHEAD + LegacyRecord.recordSize(magic, key, value); } /** * Return the size of the record batch header. * * For V0 and V1 with no compression, it's unclear if Records.LOG_OVERHEAD or 0 should be chosen. There is no header * per batch, but a sequence of batches is preceded by the offset and size. This method returns `0` as it's what * `MemoryRecordsBuilder` requires. */ public static int recordBatchHeaderSizeInBytes(byte magic, CompressionType compressionType) { if (magic > RecordBatch.MAGIC_VALUE_V1) { return DefaultRecordBatch.RECORD_BATCH_OVERHEAD; } else if (compressionType != CompressionType.NONE) { return Records.LOG_OVERHEAD + LegacyRecord.recordOverhead(magic); } else { return 0; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/BaseRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; /** * Base interface for accessing records which could be contained in the log, or an in-memory materialization of log records. */ public interface BaseRecords { /** * The size of these records in bytes. * @return The size in bytes of the records */ int sizeInBytes(); /** * Encapsulate this {@link BaseRecords} object into {@link RecordsSend} * @return Initialized {@link RecordsSend} object */ RecordsSend<? extends BaseRecords> toSend(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/ByteBufferLogInputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.errors.CorruptRecordException; import java.nio.ByteBuffer; import static org.apache.kafka.common.record.Records.HEADER_SIZE_UP_TO_MAGIC; import static org.apache.kafka.common.record.Records.LOG_OVERHEAD; import static org.apache.kafka.common.record.Records.MAGIC_OFFSET; import static org.apache.kafka.common.record.Records.SIZE_OFFSET; /** * A byte buffer backed log input stream. This class avoids the need to copy records by returning * slices from the underlying byte buffer. */ class ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { private final ByteBuffer buffer; private final int maxMessageSize; ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize) { this.buffer = buffer; this.maxMessageSize = maxMessageSize; } public MutableRecordBatch nextBatch() { int remaining = buffer.remaining(); Integer batchSize = nextBatchSize(); if (batchSize == null || remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } /** * Validates the header of the next batch and returns batch size. * @return next batch size including LOG_OVERHEAD if buffer contains header up to * magic byte, null otherwise * @throws CorruptRecordException if record size or magic is invalid */ Integer nextBatchSize() throws CorruptRecordException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); // V0 has the smallest overhead, stricter checking is done later if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size %d is less than the minimum record overhead (%d)", recordSize, LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size %d exceeds the largest allowable message size (%d).", recordSize, maxMessageSize)); if (remaining < HEADER_SIZE_UP_TO_MAGIC) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); return recordSize + LOG_OVERHEAD; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/CompressionRatioEstimator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; /** * This class help estimate the compression ratio for each topic and compression type combination. */ public class CompressionRatioEstimator { // The constant speed to increase compression ratio when a batch compresses better than expected. public static final float COMPRESSION_RATIO_IMPROVING_STEP = 0.005f; // The minimum speed to decrease compression ratio when a batch compresses worse than expected. public static final float COMPRESSION_RATIO_DETERIORATE_STEP = 0.05f; private static final ConcurrentMap<String, float[]> COMPRESSION_RATIO = new ConcurrentHashMap<>(); /** * Update the compression ratio estimation for a topic and compression type. * * @param topic the topic to update compression ratio estimation. * @param type the compression type. * @param observedRatio the observed compression ratio. * @return the compression ratio estimation after the update. */ public static float updateEstimation(String topic, CompressionType type, float observedRatio) { float[] compressionRatioForTopic = getAndCreateEstimationIfAbsent(topic); float currentEstimation = compressionRatioForTopic[type.id]; synchronized (compressionRatioForTopic) { if (observedRatio > currentEstimation) compressionRatioForTopic[type.id] = Math.max(currentEstimation + COMPRESSION_RATIO_DETERIORATE_STEP, observedRatio); else if (observedRatio < currentEstimation) { compressionRatioForTopic[type.id] = Math.max(currentEstimation - COMPRESSION_RATIO_IMPROVING_STEP, observedRatio); } } return compressionRatioForTopic[type.id]; } /** * Get the compression ratio estimation for a topic and compression type. */ public static float estimation(String topic, CompressionType type) { float[] compressionRatioForTopic = getAndCreateEstimationIfAbsent(topic); return compressionRatioForTopic[type.id]; } /** * Reset the compression ratio estimation to the initial values for a topic. */ public static void resetEstimation(String topic) { float[] compressionRatioForTopic = getAndCreateEstimationIfAbsent(topic); synchronized (compressionRatioForTopic) { for (CompressionType type : CompressionType.values()) { compressionRatioForTopic[type.id] = type.rate; } } } /** * Remove the compression ratio estimation for a topic. */ public static void removeEstimation(String topic) { COMPRESSION_RATIO.remove(topic); } /** * Set the compression estimation for a topic compression type combination. This method is for unit test purpose. */ public static void setEstimation(String topic, CompressionType type, float ratio) { float[] compressionRatioForTopic = getAndCreateEstimationIfAbsent(topic); synchronized (compressionRatioForTopic) { compressionRatioForTopic[type.id] = ratio; } } private static float[] getAndCreateEstimationIfAbsent(String topic) { float[] compressionRatioForTopic = COMPRESSION_RATIO.get(topic); if (compressionRatioForTopic == null) { compressionRatioForTopic = initialCompressionRatio(); float[] existingCompressionRatio = COMPRESSION_RATIO.putIfAbsent(topic, compressionRatioForTopic); // Someone created the compression ratio array before us, use it. if (existingCompressionRatio != null) return existingCompressionRatio; } return compressionRatioForTopic; } private static float[] initialCompressionRatio() { float[] compressionRatio = new float[CompressionType.values().length]; for (CompressionType type : CompressionType.values()) { compressionRatio[type.id] = type.rate; } return compressionRatio; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/CompressionType.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.compress.KafkaLZ4BlockInputStream; import org.apache.kafka.common.compress.KafkaLZ4BlockOutputStream; import org.apache.kafka.common.compress.SnappyFactory; import org.apache.kafka.common.compress.ZstdFactory; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferInputStream; import org.apache.kafka.common.utils.ByteBufferOutputStream; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; /** * The compression type to use */ public enum CompressionType { NONE(0, "none", 1.0f) { @Override public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { return buffer; } @Override public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { return new ByteBufferInputStream(buffer); } }, // Shipped with the JDK GZIP(1, "gzip", 1.0f) { @Override public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { try { // Set input buffer (uncompressed) to 16 KB (none by default) and output buffer (compressed) to // 8 KB (0.5 KB by default) to ensure reasonable performance in cases where the caller passes a small // number of bytes to write (potentially a single byte) return new BufferedOutputStream(new GZIPOutputStream(buffer, 8 * 1024), 16 * 1024); } catch (Exception e) { throw new KafkaException(e); } } @Override public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { try { // Set output buffer (uncompressed) to 16 KB (none by default) and input buffer (compressed) to // 8 KB (0.5 KB by default) to ensure reasonable performance in cases where the caller reads a small // number of bytes (potentially a single byte) return new BufferedInputStream(new GZIPInputStream(new ByteBufferInputStream(buffer), 8 * 1024), 16 * 1024); } catch (Exception e) { throw new KafkaException(e); } } }, // We should only load classes from a given compression library when we actually use said compression library. This // is because compression libraries include native code for a set of platforms and we want to avoid errors // in case the platform is not supported and the compression library is not actually used. // To ensure this, we only reference compression library code from classes that are only invoked when actual usage // happens. SNAPPY(2, "snappy", 1.0f) { @Override public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { return SnappyFactory.wrapForOutput(buffer); } @Override public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { return SnappyFactory.wrapForInput(buffer); } }, LZ4(3, "lz4", 1.0f) { @Override public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { try { return new KafkaLZ4BlockOutputStream(buffer, messageVersion == RecordBatch.MAGIC_VALUE_V0); } catch (Throwable e) { throw new KafkaException(e); } } @Override public InputStream wrapForInput(ByteBuffer inputBuffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { try { return new KafkaLZ4BlockInputStream(inputBuffer, decompressionBufferSupplier, messageVersion == RecordBatch.MAGIC_VALUE_V0); } catch (Throwable e) { throw new KafkaException(e); } } }, ZSTD(4, "zstd", 1.0f) { @Override public OutputStream wrapForOutput(ByteBufferOutputStream buffer, byte messageVersion) { return ZstdFactory.wrapForOutput(buffer); } @Override public InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier) { return ZstdFactory.wrapForInput(buffer, messageVersion, decompressionBufferSupplier); } }; public final int id; public final String name; public final float rate; CompressionType(int id, String name, float rate) { this.id = id; this.name = name; this.rate = rate; } /** * Wrap bufferStream with an OutputStream that will compress data with this CompressionType. * * Note: Unlike {@link #wrapForInput}, {@link #wrapForOutput} cannot take {@link ByteBuffer}s directly. * Currently, {@link MemoryRecordsBuilder#writeDefaultBatchHeader()} and {@link MemoryRecordsBuilder#writeLegacyCompressedWrapperHeader()} * write to the underlying buffer in the given {@link ByteBufferOutputStream} after the compressed data has been written. * In the event that the buffer needs to be expanded while writing the data, access to the underlying buffer needs to be preserved. */ public abstract OutputStream wrapForOutput(ByteBufferOutputStream bufferStream, byte messageVersion); /** * Wrap buffer with an InputStream that will decompress data with this CompressionType. * * @param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. * For small record batches, allocating a potentially large buffer (64 KB for LZ4) * will dominate the cost of decompressing and iterating over the records in the * batch. As such, a supplier that reuses buffers will have a significant * performance impact. */ public abstract InputStream wrapForInput(ByteBuffer buffer, byte messageVersion, BufferSupplier decompressionBufferSupplier); public static CompressionType forId(int id) { switch (id) { case 0: return NONE; case 1: return GZIP; case 2: return SNAPPY; case 3: return LZ4; case 4: return ZSTD; default: throw new IllegalArgumentException("Unknown compression type id: " + id); } } public static CompressionType forName(String name) { if (NONE.name.equals(name)) return NONE; else if (GZIP.name.equals(name)) return GZIP; else if (SNAPPY.name.equals(name)) return SNAPPY; else if (LZ4.name.equals(name)) return LZ4; else if (ZSTD.name.equals(name)) return ZSTD; else throw new IllegalArgumentException("Unknown compression name: " + name); } @Override public String toString() { return name; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/ControlRecordType.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.InvalidRecordException; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Struct; import org.apache.kafka.common.protocol.types.Type; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; /** * Control records specify a schema for the record key which includes a version and type: * * Key => Version Type * Version => Int16 * Type => Int16 * * In the future, the version can be bumped to indicate a new schema, but it must be backwards compatible * with the current schema. In general, this means we can add new fields, but we cannot remove old ones. * * Note that control records are not considered for compaction by the log cleaner. * * The schema for the value field is left to the control record type to specify. */ public enum ControlRecordType { ABORT((short) 0), COMMIT((short) 1), // Raft quorum related control messages. LEADER_CHANGE((short) 2), SNAPSHOT_HEADER((short) 3), SNAPSHOT_FOOTER((short) 4), // UNKNOWN is used to indicate a control type which the client is not aware of and should be ignored UNKNOWN((short) -1); private static final Logger log = LoggerFactory.getLogger(ControlRecordType.class); static final short CURRENT_CONTROL_RECORD_KEY_VERSION = 0; static final int CURRENT_CONTROL_RECORD_KEY_SIZE = 4; private static final Schema CONTROL_RECORD_KEY_SCHEMA_VERSION_V0 = new Schema( new Field("version", Type.INT16), new Field("type", Type.INT16)); private final short type; ControlRecordType(short type) { this.type = type; } public short type() { return type; } public Struct recordKey() { if (this == UNKNOWN) throw new IllegalArgumentException("Cannot serialize UNKNOWN control record type"); Struct struct = new Struct(CONTROL_RECORD_KEY_SCHEMA_VERSION_V0); struct.set("version", CURRENT_CONTROL_RECORD_KEY_VERSION); struct.set("type", type); return struct; } public static short parseTypeId(ByteBuffer key) { if (key.remaining() < CURRENT_CONTROL_RECORD_KEY_SIZE) throw new InvalidRecordException("Invalid value size found for end control record key. Must have " + "at least " + CURRENT_CONTROL_RECORD_KEY_SIZE + " bytes, but found only " + key.remaining()); short version = key.getShort(0); if (version < 0) throw new InvalidRecordException("Invalid version found for control record: " + version + ". May indicate data corruption"); if (version != CURRENT_CONTROL_RECORD_KEY_VERSION) log.debug("Received unknown control record key version {}. Parsing as version {}", version, CURRENT_CONTROL_RECORD_KEY_VERSION); return key.getShort(2); } public static ControlRecordType fromTypeId(short typeId) { switch (typeId) { case 0: return ABORT; case 1: return COMMIT; case 2: return LEADER_CHANGE; case 3: return SNAPSHOT_HEADER; case 4: return SNAPSHOT_FOOTER; default: return UNKNOWN; } } public static ControlRecordType parse(ByteBuffer key) { return fromTypeId(parseTypeId(key)); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/ControlRecordUtils.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.message.LeaderChangeMessage; import org.apache.kafka.common.message.SnapshotHeaderRecord; import org.apache.kafka.common.message.SnapshotFooterRecord; import org.apache.kafka.common.protocol.ByteBufferAccessor; import java.nio.ByteBuffer; /** * Utility class for easy interaction with control records. */ public class ControlRecordUtils { public static final short LEADER_CHANGE_CURRENT_VERSION = 0; public static final short SNAPSHOT_HEADER_CURRENT_VERSION = 0; public static final short SNAPSHOT_FOOTER_CURRENT_VERSION = 0; public static LeaderChangeMessage deserializeLeaderChangeMessage(Record record) { ControlRecordType recordType = ControlRecordType.parse(record.key()); if (recordType != ControlRecordType.LEADER_CHANGE) { throw new IllegalArgumentException( "Expected LEADER_CHANGE control record type(2), but found " + recordType.toString()); } return deserializeLeaderChangeMessage(record.value()); } public static LeaderChangeMessage deserializeLeaderChangeMessage(ByteBuffer data) { ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(data.slice()); return new LeaderChangeMessage(byteBufferAccessor, LEADER_CHANGE_CURRENT_VERSION); } public static SnapshotHeaderRecord deserializeSnapshotHeaderRecord(Record record) { ControlRecordType recordType = ControlRecordType.parse(record.key()); if (recordType != ControlRecordType.SNAPSHOT_HEADER) { throw new IllegalArgumentException( "Expected SNAPSHOT_HEADER control record type(3), but found " + recordType.toString()); } return deserializeSnapshotHeaderRecord(record.value()); } public static SnapshotHeaderRecord deserializeSnapshotHeaderRecord(ByteBuffer data) { ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(data.slice()); return new SnapshotHeaderRecord(byteBufferAccessor, SNAPSHOT_HEADER_CURRENT_VERSION); } public static SnapshotFooterRecord deserializeSnapshotFooterRecord(Record record) { ControlRecordType recordType = ControlRecordType.parse(record.key()); if (recordType != ControlRecordType.SNAPSHOT_FOOTER) { throw new IllegalArgumentException( "Expected SNAPSHOT_FOOTER control record type(4), but found " + recordType.toString()); } return deserializeSnapshotFooterRecord(record.value()); } public static SnapshotFooterRecord deserializeSnapshotFooterRecord(ByteBuffer data) { ByteBufferAccessor byteBufferAccessor = new ByteBufferAccessor(data.slice()); return new SnapshotFooterRecord(byteBufferAccessor, SNAPSHOT_FOOTER_CURRENT_VERSION); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/ConvertedRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; public class ConvertedRecords<T extends Records> { private final T records; private final RecordConversionStats recordConversionStats; public ConvertedRecords(T records, RecordConversionStats recordConversionStats) { this.records = records; this.recordConversionStats = recordConversionStats; } public T records() { return records; } public RecordConversionStats recordConversionStats() { return recordConversionStats; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/DefaultRecord.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.InvalidRecordException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.PrimitiveRef; import org.apache.kafka.common.utils.PrimitiveRef.IntRef; import org.apache.kafka.common.utils.Utils; import java.io.DataInput; import java.io.DataOutputStream; import java.io.IOException; import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Objects; import static org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V2; /** * This class implements the inner record format for magic 2 and above. The schema is as follows: * * * Record => * Length => Varint * Attributes => Int8 * TimestampDelta => Varlong * OffsetDelta => Varint * Key => Bytes * Value => Bytes * Headers => [HeaderKey HeaderValue] * HeaderKey => String * HeaderValue => Bytes * * Note that in this schema, the Bytes and String types use a variable length integer to represent * the length of the field. The array type used for the headers also uses a Varint for the number of * headers. * * The current record attributes are depicted below: * * ---------------- * | Unused (0-7) | * ---------------- * * The offset and timestamp deltas compute the difference relative to the base offset and * base timestamp of the batch that this record is contained in. */ public class DefaultRecord implements Record { // excluding key, value and headers: 5 bytes length + 10 bytes timestamp + 5 bytes offset + 1 byte attributes public static final int MAX_RECORD_OVERHEAD = 21; private static final int NULL_VARINT_SIZE_BYTES = ByteUtils.sizeOfVarint(-1); private final int sizeInBytes; private final byte attributes; private final long offset; private final long timestamp; private final int sequence; private final ByteBuffer key; private final ByteBuffer value; private final Header[] headers; DefaultRecord(int sizeInBytes, byte attributes, long offset, long timestamp, int sequence, ByteBuffer key, ByteBuffer value, Header[] headers) { this.sizeInBytes = sizeInBytes; this.attributes = attributes; this.offset = offset; this.timestamp = timestamp; this.sequence = sequence; this.key = key; this.value = value; this.headers = headers; } @Override public long offset() { return offset; } @Override public int sequence() { return sequence; } @Override public int sizeInBytes() { return sizeInBytes; } @Override public long timestamp() { return timestamp; } public byte attributes() { return attributes; } @Override public void ensureValid() {} @Override public int keySize() { return key == null ? -1 : key.remaining(); } @Override public int valueSize() { return value == null ? -1 : value.remaining(); } @Override public boolean hasKey() { return key != null; } @Override public ByteBuffer key() { return key == null ? null : key.duplicate(); } @Override public boolean hasValue() { return value != null; } @Override public ByteBuffer value() { return value == null ? null : value.duplicate(); } @Override public Header[] headers() { return headers; } /** * Write the record to `out` and return its size. */ public static int writeTo(DataOutputStream out, int offsetDelta, long timestampDelta, ByteBuffer key, ByteBuffer value, Header[] headers) throws IOException { int sizeInBytes = sizeOfBodyInBytes(offsetDelta, timestampDelta, key, value, headers); ByteUtils.writeVarint(sizeInBytes, out); byte attributes = 0; // there are no used record attributes at the moment out.write(attributes); ByteUtils.writeVarlong(timestampDelta, out); ByteUtils.writeVarint(offsetDelta, out); if (key == null) { ByteUtils.writeVarint(-1, out); } else { int keySize = key.remaining(); ByteUtils.writeVarint(keySize, out); Utils.writeTo(out, key, keySize); } if (value == null) { ByteUtils.writeVarint(-1, out); } else { int valueSize = value.remaining(); ByteUtils.writeVarint(valueSize, out); Utils.writeTo(out, value, valueSize); } if (headers == null) throw new IllegalArgumentException("Headers cannot be null"); ByteUtils.writeVarint(headers.length, out); for (Header header : headers) { String headerKey = header.key(); if (headerKey == null) throw new IllegalArgumentException("Invalid null header key found in headers"); byte[] utf8Bytes = Utils.utf8(headerKey); ByteUtils.writeVarint(utf8Bytes.length, out); out.write(utf8Bytes); byte[] headerValue = header.value(); if (headerValue == null) { ByteUtils.writeVarint(-1, out); } else { ByteUtils.writeVarint(headerValue.length, out); out.write(headerValue); } } return ByteUtils.sizeOfVarint(sizeInBytes) + sizeInBytes; } @Override public boolean hasMagic(byte magic) { return magic >= MAGIC_VALUE_V2; } @Override public boolean isCompressed() { return false; } @Override public boolean hasTimestampType(TimestampType timestampType) { return false; } @Override public String toString() { return String.format("DefaultRecord(offset=%d, timestamp=%d, key=%d bytes, value=%d bytes)", offset, timestamp, key == null ? 0 : key.limit(), value == null ? 0 : value.limit()); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DefaultRecord that = (DefaultRecord) o; return sizeInBytes == that.sizeInBytes && attributes == that.attributes && offset == that.offset && timestamp == that.timestamp && sequence == that.sequence && Objects.equals(key, that.key) && Objects.equals(value, that.value) && Arrays.equals(headers, that.headers); } @Override public int hashCode() { int result = sizeInBytes; result = 31 * result + (int) attributes; result = 31 * result + Long.hashCode(offset); result = 31 * result + Long.hashCode(timestamp); result = 31 * result + sequence; result = 31 * result + (key != null ? key.hashCode() : 0); result = 31 * result + (value != null ? value.hashCode() : 0); result = 31 * result + Arrays.hashCode(headers); return result; } public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } public static DefaultRecord readFrom(ByteBuffer buffer, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) { int sizeOfBodyInBytes = ByteUtils.readVarint(buffer); if (buffer.remaining() < sizeOfBodyInBytes) throw new InvalidRecordException("Invalid record size: expected " + sizeOfBodyInBytes + " bytes in record payload, but instead the buffer has only " + buffer.remaining() + " remaining bytes."); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(buffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private static DefaultRecord readFrom(ByteBuffer buffer, int sizeInBytes, int sizeOfBodyInBytes, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) { try { int recordStart = buffer.position(); byte attributes = buffer.get(); long timestampDelta = ByteUtils.readVarlong(buffer); long timestamp = baseTimestamp + timestampDelta; if (logAppendTime != null) timestamp = logAppendTime; int offsetDelta = ByteUtils.readVarint(buffer); long offset = baseOffset + offsetDelta; int sequence = baseSequence >= 0 ? DefaultRecordBatch.incrementSequence(baseSequence, offsetDelta) : RecordBatch.NO_SEQUENCE; ByteBuffer key = null; int keySize = ByteUtils.readVarint(buffer); if (keySize >= 0) { key = buffer.slice(); key.limit(keySize); buffer.position(buffer.position() + keySize); } ByteBuffer value = null; int valueSize = ByteUtils.readVarint(buffer); if (valueSize >= 0) { value = buffer.slice(); value.limit(valueSize); buffer.position(buffer.position() + valueSize); } int numHeaders = ByteUtils.readVarint(buffer); if (numHeaders < 0) throw new InvalidRecordException("Found invalid number of record headers " + numHeaders); if (numHeaders > buffer.remaining()) throw new InvalidRecordException("Found invalid number of record headers. " + numHeaders + " is larger than the remaining size of the buffer"); final Header[] headers; if (numHeaders == 0) headers = Record.EMPTY_HEADERS; else headers = readHeaders(buffer, numHeaders); // validate whether we have read all header bytes in the current record if (buffer.position() - recordStart != sizeOfBodyInBytes) throw new InvalidRecordException("Invalid record size: expected to read " + sizeOfBodyInBytes + " bytes in record payload, but instead read " + (buffer.position() - recordStart)); return new DefaultRecord(sizeInBytes, attributes, offset, timestamp, sequence, key, value, headers); } catch (BufferUnderflowException | IllegalArgumentException e) { throw new InvalidRecordException("Found invalid record structure", e); } } public static PartialDefaultRecord readPartiallyFrom(DataInput input, byte[] skipArray, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readPartiallyFrom(input, skipArray, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private static PartialDefaultRecord readPartiallyFrom(DataInput input, byte[] skipArray, int sizeInBytes, int sizeOfBodyInBytes, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { ByteBuffer skipBuffer = ByteBuffer.wrap(skipArray); // set its limit to 0 to indicate no bytes readable yet skipBuffer.limit(0); try { // reading the attributes / timestamp / offset and key-size does not require // any byte array allocation and therefore we can just read them straight-forwardly IntRef bytesRemaining = PrimitiveRef.ofInt(sizeOfBodyInBytes); byte attributes = readByte(skipBuffer, input, bytesRemaining); long timestampDelta = readVarLong(skipBuffer, input, bytesRemaining); long timestamp = baseTimestamp + timestampDelta; if (logAppendTime != null) timestamp = logAppendTime; int offsetDelta = readVarInt(skipBuffer, input, bytesRemaining); long offset = baseOffset + offsetDelta; int sequence = baseSequence >= 0 ? DefaultRecordBatch.incrementSequence(baseSequence, offsetDelta) : RecordBatch.NO_SEQUENCE; // first skip key int keySize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining); // then skip value int valueSize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining); // then skip header int numHeaders = readVarInt(skipBuffer, input, bytesRemaining); if (numHeaders < 0) throw new InvalidRecordException("Found invalid number of record headers " + numHeaders); for (int i = 0; i < numHeaders; i++) { int headerKeySize = skipLengthDelimitedField(skipBuffer, input, bytesRemaining); if (headerKeySize < 0) throw new InvalidRecordException("Invalid negative header key size " + headerKeySize); // headerValueSize skipLengthDelimitedField(skipBuffer, input, bytesRemaining); } if (bytesRemaining.value > 0 || skipBuffer.remaining() > 0) throw new InvalidRecordException("Invalid record size: expected to read " + sizeOfBodyInBytes + " bytes in record payload, but there are still bytes remaining"); return new PartialDefaultRecord(sizeInBytes, attributes, offset, timestamp, sequence, keySize, valueSize); } catch (BufferUnderflowException | IllegalArgumentException e) { throw new InvalidRecordException("Found invalid record structure", e); } } private static byte readByte(ByteBuffer buffer, DataInput input, IntRef bytesRemaining) throws IOException { if (buffer.remaining() < 1 && bytesRemaining.value > 0) { readMore(buffer, input, bytesRemaining); } return buffer.get(); } private static long readVarLong(ByteBuffer buffer, DataInput input, IntRef bytesRemaining) throws IOException { if (buffer.remaining() < 10 && bytesRemaining.value > 0) { readMore(buffer, input, bytesRemaining); } return ByteUtils.readVarlong(buffer); } private static int readVarInt(ByteBuffer buffer, DataInput input, IntRef bytesRemaining) throws IOException { if (buffer.remaining() < 5 && bytesRemaining.value > 0) { readMore(buffer, input, bytesRemaining); } return ByteUtils.readVarint(buffer); } private static int skipLengthDelimitedField(ByteBuffer buffer, DataInput input, IntRef bytesRemaining) throws IOException { boolean needMore = false; int sizeInBytes = -1; int bytesToSkip = -1; while (true) { if (needMore) { readMore(buffer, input, bytesRemaining); needMore = false; } if (bytesToSkip < 0) { if (buffer.remaining() < 5 && bytesRemaining.value > 0) { needMore = true; } else { sizeInBytes = ByteUtils.readVarint(buffer); if (sizeInBytes <= 0) return sizeInBytes; else bytesToSkip = sizeInBytes; } } else { if (bytesToSkip > buffer.remaining()) { bytesToSkip -= buffer.remaining(); buffer.position(buffer.limit()); needMore = true; } else { buffer.position(buffer.position() + bytesToSkip); return sizeInBytes; } } } } private static void readMore(ByteBuffer buffer, DataInput input, IntRef bytesRemaining) throws IOException { if (bytesRemaining.value > 0) { byte[] array = buffer.array(); // first copy the remaining bytes to the beginning of the array; // at most 4 bytes would be shifted here int stepsToLeftShift = buffer.position(); int bytesToLeftShift = buffer.remaining(); for (int i = 0; i < bytesToLeftShift; i++) { array[i] = array[i + stepsToLeftShift]; } // then try to read more bytes to the remaining of the array int bytesRead = Math.min(bytesRemaining.value, array.length - bytesToLeftShift); input.readFully(array, bytesToLeftShift, bytesRead); buffer.rewind(); // only those many bytes are readable buffer.limit(bytesToLeftShift + bytesRead); bytesRemaining.value -= bytesRead; } else { throw new InvalidRecordException("Invalid record size: expected to read more bytes in record payload"); } } private static Header[] readHeaders(ByteBuffer buffer, int numHeaders) { Header[] headers = new Header[numHeaders]; for (int i = 0; i < numHeaders; i++) { int headerKeySize = ByteUtils.readVarint(buffer); if (headerKeySize < 0) throw new InvalidRecordException("Invalid negative header key size " + headerKeySize); ByteBuffer headerKeyBuffer = buffer.slice(); headerKeyBuffer.limit(headerKeySize); buffer.position(buffer.position() + headerKeySize); ByteBuffer headerValue = null; int headerValueSize = ByteUtils.readVarint(buffer); if (headerValueSize >= 0) { headerValue = buffer.slice(); headerValue.limit(headerValueSize); buffer.position(buffer.position() + headerValueSize); } headers[i] = new RecordHeader(headerKeyBuffer, headerValue); } return headers; } public static int sizeInBytes(int offsetDelta, long timestampDelta, ByteBuffer key, ByteBuffer value, Header[] headers) { int bodySize = sizeOfBodyInBytes(offsetDelta, timestampDelta, key, value, headers); return bodySize + ByteUtils.sizeOfVarint(bodySize); } public static int sizeInBytes(int offsetDelta, long timestampDelta, int keySize, int valueSize, Header[] headers) { int bodySize = sizeOfBodyInBytes(offsetDelta, timestampDelta, keySize, valueSize, headers); return bodySize + ByteUtils.sizeOfVarint(bodySize); } private static int sizeOfBodyInBytes(int offsetDelta, long timestampDelta, ByteBuffer key, ByteBuffer value, Header[] headers) { int keySize = key == null ? -1 : key.remaining(); int valueSize = value == null ? -1 : value.remaining(); return sizeOfBodyInBytes(offsetDelta, timestampDelta, keySize, valueSize, headers); } public static int sizeOfBodyInBytes(int offsetDelta, long timestampDelta, int keySize, int valueSize, Header[] headers) { int size = 1; // always one byte for attributes size += ByteUtils.sizeOfVarint(offsetDelta); size += ByteUtils.sizeOfVarlong(timestampDelta); size += sizeOf(keySize, valueSize, headers); return size; } private static int sizeOf(int keySize, int valueSize, Header[] headers) { int size = 0; if (keySize < 0) size += NULL_VARINT_SIZE_BYTES; else size += ByteUtils.sizeOfVarint(keySize) + keySize; if (valueSize < 0) size += NULL_VARINT_SIZE_BYTES; else size += ByteUtils.sizeOfVarint(valueSize) + valueSize; if (headers == null) throw new IllegalArgumentException("Headers cannot be null"); size += ByteUtils.sizeOfVarint(headers.length); for (Header header : headers) { String headerKey = header.key(); if (headerKey == null) throw new IllegalArgumentException("Invalid null header key found in headers"); int headerKeySize = Utils.utf8Length(headerKey); size += ByteUtils.sizeOfVarint(headerKeySize) + headerKeySize; byte[] headerValue = header.value(); if (headerValue == null) { size += NULL_VARINT_SIZE_BYTES; } else { size += ByteUtils.sizeOfVarint(headerValue.length) + headerValue.length; } } return size; } static int recordSizeUpperBound(ByteBuffer key, ByteBuffer value, Header[] headers) { int keySize = key == null ? -1 : key.remaining(); int valueSize = value == null ? -1 : value.remaining(); return MAX_RECORD_OVERHEAD + sizeOf(keySize, valueSize, headers); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/DefaultRecordBatch.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.InvalidRecordException; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.CloseableIterator; import org.apache.kafka.common.utils.Crc32C; import java.io.DataInputStream; import java.io.EOFException; import java.io.IOException; import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; import java.util.Objects; import java.util.OptionalLong; import static org.apache.kafka.common.record.Records.LOG_OVERHEAD; /** * RecordBatch implementation for magic 2 and above. The schema is given below: * * RecordBatch => * BaseOffset => Int64 * Length => Int32 * PartitionLeaderEpoch => Int32 * Magic => Int8 * CRC => Uint32 * Attributes => Int16 * LastOffsetDelta => Int32 // also serves as LastSequenceDelta * BaseTimestamp => Int64 * MaxTimestamp => Int64 * ProducerId => Int64 * ProducerEpoch => Int16 * BaseSequence => Int32 * Records => [Record] * * Note that when compression is enabled (see attributes below), the compressed record data is serialized * directly following the count of the number of records. * * The CRC covers the data from the attributes to the end of the batch (i.e. all the bytes that follow the CRC). It is * located after the magic byte, which means that clients must parse the magic byte before deciding how to interpret * the bytes between the batch length and the magic byte. The partition leader epoch field is not included in the CRC * computation to avoid the need to recompute the CRC when this field is assigned for every batch that is received by * the broker. The CRC-32C (Castagnoli) polynomial is used for the computation. * * On Compaction: Unlike the older message formats, magic v2 and above preserves the first and last offset/sequence * numbers from the original batch when the log is cleaned. This is required in order to be able to restore the * producer's state when the log is reloaded. If we did not retain the last sequence number, then following * a partition leader failure, once the new leader has rebuilt the producer state from the log, the next sequence * expected number would no longer be in sync with what was written by the client. This would cause an * unexpected OutOfOrderSequence error, which is typically fatal. The base sequence number must be preserved for * duplicate checking: the broker checks incoming Produce requests for duplicates by verifying that the first and * last sequence numbers of the incoming batch match the last from that producer. * * Note that if all of the records in a batch are removed during compaction, the broker may still retain an empty * batch header in order to preserve the producer sequence information as described above. These empty batches * are retained only until either a new sequence number is written by the corresponding producer or the producerId * is expired from lack of activity. * * There is no similar need to preserve the timestamp from the original batch after compaction. The BaseTimestamp * field therefore reflects the timestamp of the first record in the batch in most cases. If the batch is empty, the * BaseTimestamp will be set to -1 (NO_TIMESTAMP). If the delete horizon flag is set to 1, the BaseTimestamp * will be set to the time at which tombstone records and aborted transaction markers in the batch should be removed. * * Similarly, the MaxTimestamp field reflects the maximum timestamp of the current records if the timestamp type * is CREATE_TIME. For LOG_APPEND_TIME, on the other hand, the MaxTimestamp field reflects the timestamp set * by the broker and is preserved after compaction. Additionally, the MaxTimestamp of an empty batch always retains * the previous value prior to becoming empty. * * The current attributes are given below: * * --------------------------------------------------------------------------------------------------------------------------- * | Unused (7-15) | Delete Horizon Flag (6) | Control (5) | Transactional (4) | Timestamp Type (3) | Compression Type (0-2) | * --------------------------------------------------------------------------------------------------------------------------- */ public class DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { static final int BASE_OFFSET_OFFSET = 0; static final int BASE_OFFSET_LENGTH = 8; static final int LENGTH_OFFSET = BASE_OFFSET_OFFSET + BASE_OFFSET_LENGTH; static final int LENGTH_LENGTH = 4; static final int PARTITION_LEADER_EPOCH_OFFSET = LENGTH_OFFSET + LENGTH_LENGTH; static final int PARTITION_LEADER_EPOCH_LENGTH = 4; static final int MAGIC_OFFSET = PARTITION_LEADER_EPOCH_OFFSET + PARTITION_LEADER_EPOCH_LENGTH; static final int MAGIC_LENGTH = 1; public static final int CRC_OFFSET = MAGIC_OFFSET + MAGIC_LENGTH; static final int CRC_LENGTH = 4; static final int ATTRIBUTES_OFFSET = CRC_OFFSET + CRC_LENGTH; static final int ATTRIBUTE_LENGTH = 2; public static final int LAST_OFFSET_DELTA_OFFSET = ATTRIBUTES_OFFSET + ATTRIBUTE_LENGTH; static final int LAST_OFFSET_DELTA_LENGTH = 4; static final int BASE_TIMESTAMP_OFFSET = LAST_OFFSET_DELTA_OFFSET + LAST_OFFSET_DELTA_LENGTH; static final int BASE_TIMESTAMP_LENGTH = 8; static final int MAX_TIMESTAMP_OFFSET = BASE_TIMESTAMP_OFFSET + BASE_TIMESTAMP_LENGTH; static final int MAX_TIMESTAMP_LENGTH = 8; static final int PRODUCER_ID_OFFSET = MAX_TIMESTAMP_OFFSET + MAX_TIMESTAMP_LENGTH; static final int PRODUCER_ID_LENGTH = 8; static final int PRODUCER_EPOCH_OFFSET = PRODUCER_ID_OFFSET + PRODUCER_ID_LENGTH; static final int PRODUCER_EPOCH_LENGTH = 2; static final int BASE_SEQUENCE_OFFSET = PRODUCER_EPOCH_OFFSET + PRODUCER_EPOCH_LENGTH; static final int BASE_SEQUENCE_LENGTH = 4; public static final int RECORDS_COUNT_OFFSET = BASE_SEQUENCE_OFFSET + BASE_SEQUENCE_LENGTH; static final int RECORDS_COUNT_LENGTH = 4; static final int RECORDS_OFFSET = RECORDS_COUNT_OFFSET + RECORDS_COUNT_LENGTH; public static final int RECORD_BATCH_OVERHEAD = RECORDS_OFFSET; private static final byte COMPRESSION_CODEC_MASK = 0x07; private static final byte TRANSACTIONAL_FLAG_MASK = 0x10; private static final int CONTROL_FLAG_MASK = 0x20; private static final byte DELETE_HORIZON_FLAG_MASK = 0x40; private static final byte TIMESTAMP_TYPE_MASK = 0x08; private static final int MAX_SKIP_BUFFER_SIZE = 2048; private final ByteBuffer buffer; DefaultRecordBatch(ByteBuffer buffer) { this.buffer = buffer; } @Override public byte magic() { return buffer.get(MAGIC_OFFSET); } @Override public void ensureValid() { if (sizeInBytes() < RECORD_BATCH_OVERHEAD) throw new CorruptRecordException("Record batch is corrupt (the size " + sizeInBytes() + " is smaller than the minimum allowed overhead " + RECORD_BATCH_OVERHEAD + ")"); if (!isValid()) throw new CorruptRecordException("Record is corrupt (stored crc = " + checksum() + ", computed crc = " + computeChecksum() + ")"); } /** * Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas. * * @return The base timestamp */ public long baseTimestamp() { return buffer.getLong(BASE_TIMESTAMP_OFFSET); } @Override public long maxTimestamp() { return buffer.getLong(MAX_TIMESTAMP_OFFSET); } @Override public TimestampType timestampType() { return (attributes() & TIMESTAMP_TYPE_MASK) == 0 ? TimestampType.CREATE_TIME : TimestampType.LOG_APPEND_TIME; } @Override public long baseOffset() { return buffer.getLong(BASE_OFFSET_OFFSET); } @Override public long lastOffset() { return baseOffset() + lastOffsetDelta(); } @Override public long producerId() { return buffer.getLong(PRODUCER_ID_OFFSET); } @Override public short producerEpoch() { return buffer.getShort(PRODUCER_EPOCH_OFFSET); } @Override public int baseSequence() { return buffer.getInt(BASE_SEQUENCE_OFFSET); } private int lastOffsetDelta() { return buffer.getInt(LAST_OFFSET_DELTA_OFFSET); } @Override public int lastSequence() { int baseSequence = baseSequence(); if (baseSequence == RecordBatch.NO_SEQUENCE) return RecordBatch.NO_SEQUENCE; return incrementSequence(baseSequence, lastOffsetDelta()); } @Override public CompressionType compressionType() { return CompressionType.forId(attributes() & COMPRESSION_CODEC_MASK); } @Override public int sizeInBytes() { return LOG_OVERHEAD + buffer.getInt(LENGTH_OFFSET); } private int count() { return buffer.getInt(RECORDS_COUNT_OFFSET); } @Override public Integer countOrNull() { return count(); } @Override public void writeTo(ByteBuffer buffer) { buffer.put(this.buffer.duplicate()); } @Override public void writeTo(ByteBufferOutputStream outputStream) { outputStream.write(this.buffer.duplicate()); } @Override public boolean isTransactional() { return (attributes() & TRANSACTIONAL_FLAG_MASK) > 0; } private boolean hasDeleteHorizonMs() { return (attributes() & DELETE_HORIZON_FLAG_MASK) > 0; } @Override public OptionalLong deleteHorizonMs() { if (hasDeleteHorizonMs()) return OptionalLong.of(buffer.getLong(BASE_TIMESTAMP_OFFSET)); else return OptionalLong.empty(); } @Override public boolean isControlBatch() { return (attributes() & CONTROL_FLAG_MASK) > 0; } @Override public int partitionLeaderEpoch() { return buffer.getInt(PARTITION_LEADER_EPOCH_OFFSET); } public DataInputStream recordInputStream(BufferSupplier bufferSupplier) { final ByteBuffer buffer = this.buffer.duplicate(); buffer.position(RECORDS_OFFSET); return new DataInputStream(compressionType().wrapForInput(buffer, magic(), bufferSupplier)); } private CloseableIterator<Record> compressedIterator(BufferSupplier bufferSupplier, boolean skipKeyValue) { final DataInputStream inputStream = recordInputStream(bufferSupplier); if (skipKeyValue) { // this buffer is used to skip length delimited fields like key, value, headers byte[] skipArray = new byte[MAX_SKIP_BUFFER_SIZE]; return new StreamRecordIterator(inputStream) { @Override protected Record doReadRecord(long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { return DefaultRecord.readPartiallyFrom(inputStream, skipArray, baseOffset, baseTimestamp, baseSequence, logAppendTime); } }; } else { return new StreamRecordIterator(inputStream) { @Override protected Record doReadRecord(long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { return DefaultRecord.readFrom(inputStream, baseOffset, baseTimestamp, baseSequence, logAppendTime); } }; } } private CloseableIterator<Record> uncompressedIterator() { final ByteBuffer buffer = this.buffer.duplicate(); buffer.position(RECORDS_OFFSET); return new RecordIterator() { @Override protected Record readNext(long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) { try { return DefaultRecord.readFrom(buffer, baseOffset, baseTimestamp, baseSequence, logAppendTime); } catch (BufferUnderflowException e) { throw new InvalidRecordException("Incorrect declared batch size, premature EOF reached"); } } @Override protected boolean ensureNoneRemaining() { return !buffer.hasRemaining(); } @Override public void close() {} }; } @Override public Iterator<Record> iterator() { if (count() == 0) return Collections.emptyIterator(); if (!isCompressed()) return uncompressedIterator(); // for a normal iterator, we cannot ensure that the underlying compression stream is closed, // so we decompress the full record set here. Use cases which call for a lower memory footprint // can use `streamingIterator` at the cost of additional complexity try (CloseableIterator<Record> iterator = compressedIterator(BufferSupplier.NO_CACHING, false)) { List<Record> records = new ArrayList<>(count()); while (iterator.hasNext()) records.add(iterator.next()); return records.iterator(); } } @Override public CloseableIterator<Record> skipKeyValueIterator(BufferSupplier bufferSupplier) { if (count() == 0) { return CloseableIterator.wrap(Collections.emptyIterator()); } /* * For uncompressed iterator, it is actually not worth skipping key / value / headers at all since * its ByteBufferInputStream's skip() function is less efficient compared with just reading it actually * as it will allocate new byte array. */ if (!isCompressed()) return uncompressedIterator(); // we define this to be a closable iterator so that caller (i.e. the log validator) needs to close it // while we can save memory footprint of not decompressing the full record set ahead of time return compressedIterator(bufferSupplier, true); } @Override public CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier) { if (isCompressed()) return compressedIterator(bufferSupplier, false); else return uncompressedIterator(); } @Override public void setLastOffset(long offset) { buffer.putLong(BASE_OFFSET_OFFSET, offset - lastOffsetDelta()); } @Override public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) { long currentMaxTimestamp = maxTimestamp(); // We don't need to recompute crc if the timestamp is not updated. if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp) return; byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch(), hasDeleteHorizonMs()); buffer.putShort(ATTRIBUTES_OFFSET, attributes); buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp); long crc = computeChecksum(); ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc); } @Override public void setPartitionLeaderEpoch(int epoch) { buffer.putInt(PARTITION_LEADER_EPOCH_OFFSET, epoch); } @Override public long checksum() { return ByteUtils.readUnsignedInt(buffer, CRC_OFFSET); } public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } private long computeChecksum() { return Crc32C.compute(buffer, ATTRIBUTES_OFFSET, buffer.limit() - ATTRIBUTES_OFFSET); } private byte attributes() { // note we're not using the second byte of attributes return (byte) buffer.getShort(ATTRIBUTES_OFFSET); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DefaultRecordBatch that = (DefaultRecordBatch) o; return Objects.equals(buffer, that.buffer); } @Override public int hashCode() { return buffer != null ? buffer.hashCode() : 0; } private static byte computeAttributes(CompressionType type, TimestampType timestampType, boolean isTransactional, boolean isControl, boolean isDeleteHorizonSet) { if (timestampType == TimestampType.NO_TIMESTAMP_TYPE) throw new IllegalArgumentException("Timestamp type must be provided to compute attributes for message " + "format v2 and above"); byte attributes = isTransactional ? TRANSACTIONAL_FLAG_MASK : 0; if (isControl) attributes |= CONTROL_FLAG_MASK; if (type.id > 0) attributes |= COMPRESSION_CODEC_MASK & type.id; if (timestampType == TimestampType.LOG_APPEND_TIME) attributes |= TIMESTAMP_TYPE_MASK; if (isDeleteHorizonSet) attributes |= DELETE_HORIZON_FLAG_MASK; return attributes; } public static void writeEmptyHeader(ByteBuffer buffer, byte magic, long producerId, short producerEpoch, int baseSequence, long baseOffset, long lastOffset, int partitionLeaderEpoch, TimestampType timestampType, long timestamp, boolean isTransactional, boolean isControlRecord) { int offsetDelta = (int) (lastOffset - baseOffset); writeHeader(buffer, baseOffset, offsetDelta, DefaultRecordBatch.RECORD_BATCH_OVERHEAD, magic, CompressionType.NONE, timestampType, RecordBatch.NO_TIMESTAMP, timestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlRecord, false, partitionLeaderEpoch, 0); } public static void writeHeader(ByteBuffer buffer, long baseOffset, int lastOffsetDelta, int sizeInBytes, byte magic, CompressionType compressionType, TimestampType timestampType, long baseTimestamp, long maxTimestamp, long producerId, short epoch, int sequence, boolean isTransactional, boolean isControlBatch, boolean isDeleteHorizonSet, int partitionLeaderEpoch, int numRecords) { if (magic < RecordBatch.CURRENT_MAGIC_VALUE) throw new IllegalArgumentException("Invalid magic value " + magic); if (baseTimestamp < 0 && baseTimestamp != NO_TIMESTAMP) throw new IllegalArgumentException("Invalid message timestamp " + baseTimestamp); short attributes = computeAttributes(compressionType, timestampType, isTransactional, isControlBatch, isDeleteHorizonSet); int position = buffer.position(); buffer.putLong(position + BASE_OFFSET_OFFSET, baseOffset); buffer.putInt(position + LENGTH_OFFSET, sizeInBytes - LOG_OVERHEAD); buffer.putInt(position + PARTITION_LEADER_EPOCH_OFFSET, partitionLeaderEpoch); buffer.put(position + MAGIC_OFFSET, magic); buffer.putShort(position + ATTRIBUTES_OFFSET, attributes); buffer.putLong(position + BASE_TIMESTAMP_OFFSET, baseTimestamp); buffer.putLong(position + MAX_TIMESTAMP_OFFSET, maxTimestamp); buffer.putInt(position + LAST_OFFSET_DELTA_OFFSET, lastOffsetDelta); buffer.putLong(position + PRODUCER_ID_OFFSET, producerId); buffer.putShort(position + PRODUCER_EPOCH_OFFSET, epoch); buffer.putInt(position + BASE_SEQUENCE_OFFSET, sequence); buffer.putInt(position + RECORDS_COUNT_OFFSET, numRecords); long crc = Crc32C.compute(buffer, ATTRIBUTES_OFFSET, sizeInBytes - ATTRIBUTES_OFFSET); buffer.putInt(position + CRC_OFFSET, (int) crc); buffer.position(position + RECORD_BATCH_OVERHEAD); } @Override public String toString() { return "RecordBatch(magic=" + magic() + ", offsets=[" + baseOffset() + ", " + lastOffset() + "], " + "sequence=[" + baseSequence() + ", " + lastSequence() + "], " + "isTransactional=" + isTransactional() + ", isControlBatch=" + isControlBatch() + ", " + "compression=" + compressionType() + ", timestampType=" + timestampType() + ", crc=" + checksum() + ")"; } public static int sizeInBytes(long baseOffset, Iterable<Record> records) { Iterator<Record> iterator = records.iterator(); if (!iterator.hasNext()) return 0; int size = RECORD_BATCH_OVERHEAD; Long baseTimestamp = null; while (iterator.hasNext()) { Record record = iterator.next(); int offsetDelta = (int) (record.offset() - baseOffset); if (baseTimestamp == null) baseTimestamp = record.timestamp(); long timestampDelta = record.timestamp() - baseTimestamp; size += DefaultRecord.sizeInBytes(offsetDelta, timestampDelta, record.key(), record.value(), record.headers()); } return size; } public static int sizeInBytes(Iterable<SimpleRecord> records) { Iterator<SimpleRecord> iterator = records.iterator(); if (!iterator.hasNext()) return 0; int size = RECORD_BATCH_OVERHEAD; int offsetDelta = 0; Long baseTimestamp = null; while (iterator.hasNext()) { SimpleRecord record = iterator.next(); if (baseTimestamp == null) baseTimestamp = record.timestamp(); long timestampDelta = record.timestamp() - baseTimestamp; size += DefaultRecord.sizeInBytes(offsetDelta++, timestampDelta, record.key(), record.value(), record.headers()); } return size; } /** * Get an upper bound on the size of a batch with only a single record using a given key and value. This * is only an estimate because it does not take into account additional overhead from the compression * algorithm used. */ static int estimateBatchSizeUpperBound(ByteBuffer key, ByteBuffer value, Header[] headers) { return RECORD_BATCH_OVERHEAD + DefaultRecord.recordSizeUpperBound(key, value, headers); } public static int incrementSequence(int sequence, int increment) { if (sequence > Integer.MAX_VALUE - increment) return increment - (Integer.MAX_VALUE - sequence) - 1; return sequence + increment; } public static int decrementSequence(int sequence, int decrement) { if (sequence < decrement) return Integer.MAX_VALUE - (decrement - sequence) + 1; return sequence - decrement; } private abstract class RecordIterator implements CloseableIterator<Record> { private final Long logAppendTime; private final long baseOffset; private final long baseTimestamp; private final int baseSequence; private final int numRecords; private int readRecords = 0; RecordIterator() { this.logAppendTime = timestampType() == TimestampType.LOG_APPEND_TIME ? maxTimestamp() : null; this.baseOffset = baseOffset(); this.baseTimestamp = baseTimestamp(); this.baseSequence = baseSequence(); int numRecords = count(); if (numRecords < 0) throw new InvalidRecordException("Found invalid record count " + numRecords + " in magic v" + magic() + " batch"); this.numRecords = numRecords; } @Override public boolean hasNext() { return readRecords < numRecords; } @Override public Record next() { if (readRecords >= numRecords) throw new NoSuchElementException(); readRecords++; Record rec = readNext(baseOffset, baseTimestamp, baseSequence, logAppendTime); if (readRecords == numRecords) { // Validate that the actual size of the batch is equal to declared size // by checking that after reading declared number of items, there no items left // (overflow case, i.e. reading past buffer end is checked elsewhere). if (!ensureNoneRemaining()) throw new InvalidRecordException("Incorrect declared batch size, records still remaining in file"); } return rec; } protected abstract Record readNext(long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime); protected abstract boolean ensureNoneRemaining(); @Override public void remove() { throw new UnsupportedOperationException(); } } private abstract class StreamRecordIterator extends RecordIterator { private final DataInputStream inputStream; StreamRecordIterator(DataInputStream inputStream) { super(); this.inputStream = inputStream; } abstract Record doReadRecord(long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException; @Override protected Record readNext(long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) { try { return doReadRecord(baseOffset, baseTimestamp, baseSequence, logAppendTime); } catch (EOFException e) { throw new InvalidRecordException("Incorrect declared batch size, premature EOF reached"); } catch (IOException e) { throw new KafkaException("Failed to decompress record stream", e); } } @Override protected boolean ensureNoneRemaining() { try { return inputStream.read() == -1; } catch (IOException e) { throw new KafkaException("Error checking for remaining bytes after reading batch", e); } } @Override public void close() { try { inputStream.close(); } catch (IOException e) { throw new KafkaException("Failed to close record stream", e); } } } static class DefaultFileChannelRecordBatch extends FileLogInputStream.FileChannelRecordBatch { DefaultFileChannelRecordBatch(long offset, byte magic, FileRecords fileRecords, int position, int batchSize) { super(offset, magic, fileRecords, position, batchSize); } @Override protected RecordBatch toMemoryRecordBatch(ByteBuffer buffer) { return new DefaultRecordBatch(buffer); } @Override public long baseOffset() { return offset; } @Override public long lastOffset() { return loadBatchHeader().lastOffset(); } @Override public long producerId() { return loadBatchHeader().producerId(); } @Override public short producerEpoch() { return loadBatchHeader().producerEpoch(); } @Override public int baseSequence() { return loadBatchHeader().baseSequence(); } @Override public int lastSequence() { return loadBatchHeader().lastSequence(); } @Override public long checksum() { return loadBatchHeader().checksum(); } @Override public Integer countOrNull() { return loadBatchHeader().countOrNull(); } @Override public boolean isTransactional() { return loadBatchHeader().isTransactional(); } @Override public OptionalLong deleteHorizonMs() { return loadBatchHeader().deleteHorizonMs(); } @Override public boolean isControlBatch() { return loadBatchHeader().isControlBatch(); } @Override public int partitionLeaderEpoch() { return loadBatchHeader().partitionLeaderEpoch(); } @Override protected int headerSize() { return RECORD_BATCH_OVERHEAD; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/DefaultRecordsSend.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.network.TransferableChannel; import java.io.IOException; public class DefaultRecordsSend<T extends TransferableRecords> extends RecordsSend<T> { public DefaultRecordsSend(T records) { this(records, records.sizeInBytes()); } public DefaultRecordsSend(T records, int maxBytesToWrite) { super(records, maxBytesToWrite); } @Override protected long writeTo(TransferableChannel channel, long previouslyWritten, int remaining) throws IOException { return records().writeTo(channel, previouslyWritten, remaining); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/EndTransactionMarker.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.InvalidRecordException; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Struct; import org.apache.kafka.common.protocol.types.Type; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; /** * This class represents the control record which is written to the log to indicate the completion * of a transaction. The record key specifies the {@link ControlRecordType control type} and the * value embeds information useful for write validation (for now, just the coordinator epoch). */ public class EndTransactionMarker { private static final Logger log = LoggerFactory.getLogger(EndTransactionMarker.class); private static final short CURRENT_END_TXN_MARKER_VERSION = 0; private static final Schema END_TXN_MARKER_SCHEMA_VERSION_V0 = new Schema( new Field("version", Type.INT16), new Field("coordinator_epoch", Type.INT32)); static final int CURRENT_END_TXN_MARKER_VALUE_SIZE = 6; static final int CURRENT_END_TXN_SCHEMA_RECORD_SIZE = DefaultRecord.sizeInBytes(0, 0L, ControlRecordType.CURRENT_CONTROL_RECORD_KEY_SIZE, EndTransactionMarker.CURRENT_END_TXN_MARKER_VALUE_SIZE, Record.EMPTY_HEADERS); private final ControlRecordType type; private final int coordinatorEpoch; public EndTransactionMarker(ControlRecordType type, int coordinatorEpoch) { ensureTransactionMarkerControlType(type); this.type = type; this.coordinatorEpoch = coordinatorEpoch; } public int coordinatorEpoch() { return coordinatorEpoch; } public ControlRecordType controlType() { return type; } private Struct buildRecordValue() { Struct struct = new Struct(END_TXN_MARKER_SCHEMA_VERSION_V0); struct.set("version", CURRENT_END_TXN_MARKER_VERSION); struct.set("coordinator_epoch", coordinatorEpoch); return struct; } public ByteBuffer serializeValue() { Struct valueStruct = buildRecordValue(); ByteBuffer value = ByteBuffer.allocate(valueStruct.sizeOf()); valueStruct.writeTo(value); value.flip(); return value; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; EndTransactionMarker that = (EndTransactionMarker) o; return coordinatorEpoch == that.coordinatorEpoch && type == that.type; } @Override public int hashCode() { int result = type != null ? type.hashCode() : 0; result = 31 * result + coordinatorEpoch; return result; } private static void ensureTransactionMarkerControlType(ControlRecordType type) { if (type != ControlRecordType.COMMIT && type != ControlRecordType.ABORT) throw new IllegalArgumentException("Invalid control record type for end transaction marker" + type); } public static EndTransactionMarker deserialize(Record record) { ControlRecordType type = ControlRecordType.parse(record.key()); return deserializeValue(type, record.value()); } static EndTransactionMarker deserializeValue(ControlRecordType type, ByteBuffer value) { ensureTransactionMarkerControlType(type); if (value.remaining() < CURRENT_END_TXN_MARKER_VALUE_SIZE) throw new InvalidRecordException("Invalid value size found for end transaction marker. Must have " + "at least " + CURRENT_END_TXN_MARKER_VALUE_SIZE + " bytes, but found only " + value.remaining()); short version = value.getShort(0); if (version < 0) throw new InvalidRecordException("Invalid version found for end transaction marker: " + version + ". May indicate data corruption"); if (version > CURRENT_END_TXN_MARKER_VERSION) log.debug("Received end transaction marker value version {}. Parsing as version {}", version, CURRENT_END_TXN_MARKER_VERSION); int coordinatorEpoch = value.getInt(2); return new EndTransactionMarker(type, coordinatorEpoch); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/FileLogInputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.record.AbstractLegacyRecordBatch.LegacyFileChannelRecordBatch; import org.apache.kafka.common.record.DefaultRecordBatch.DefaultFileChannelRecordBatch; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.CloseableIterator; import org.apache.kafka.common.utils.Utils; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Iterator; import java.util.Objects; import static org.apache.kafka.common.record.Records.LOG_OVERHEAD; import static org.apache.kafka.common.record.Records.HEADER_SIZE_UP_TO_MAGIC; import static org.apache.kafka.common.record.Records.MAGIC_OFFSET; import static org.apache.kafka.common.record.Records.OFFSET_OFFSET; import static org.apache.kafka.common.record.Records.SIZE_OFFSET; /** * A log input stream which is backed by a {@link FileChannel}. */ public class FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { private int position; private final int end; private final FileRecords fileRecords; private final ByteBuffer logHeaderBuffer = ByteBuffer.allocate(HEADER_SIZE_UP_TO_MAGIC); /** * Create a new log input stream over the FileChannel * @param records Underlying FileRecords instance * @param start Position in the file channel to start from * @param end Position in the file channel not to read past */ FileLogInputStream(FileRecords records, int start, int end) { this.fileRecords = records; this.position = start; this.end = end; } @Override public FileChannelRecordBatch nextBatch() throws IOException { FileChannel channel = fileRecords.channel(); if (position >= end - HEADER_SIZE_UP_TO_MAGIC) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); // V0 has the smallest overhead, stricter checking is done later if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Found record size %d smaller than minimum record " + "overhead (%d) in file %s.", size, LegacyRecord.RECORD_OVERHEAD_V0, fileRecords.file())); if (position > end - LOG_OVERHEAD - size) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, fileRecords, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, fileRecords, position, size); position += batch.sizeInBytes(); return batch; } /** * Log entry backed by an underlying FileChannel. This allows iteration over the record batches * without needing to read the record data into memory until it is needed. The downside * is that entries will generally no longer be readable when the underlying channel is closed. */ public abstract static class FileChannelRecordBatch extends AbstractRecordBatch { protected final long offset; protected final byte magic; protected final FileRecords fileRecords; protected final int position; protected final int batchSize; private RecordBatch fullBatch; private RecordBatch batchHeader; FileChannelRecordBatch(long offset, byte magic, FileRecords fileRecords, int position, int batchSize) { this.offset = offset; this.magic = magic; this.fileRecords = fileRecords; this.position = position; this.batchSize = batchSize; } @Override public CompressionType compressionType() { return loadBatchHeader().compressionType(); } @Override public TimestampType timestampType() { return loadBatchHeader().timestampType(); } @Override public long checksum() { return loadBatchHeader().checksum(); } @Override public long maxTimestamp() { return loadBatchHeader().maxTimestamp(); } public int position() { return position; } @Override public byte magic() { return magic; } @Override public Iterator<Record> iterator() { return loadFullBatch().iterator(); } @Override public CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier) { return loadFullBatch().streamingIterator(bufferSupplier); } @Override public boolean isValid() { return loadFullBatch().isValid(); } @Override public void ensureValid() { loadFullBatch().ensureValid(); } @Override public int sizeInBytes() { return LOG_OVERHEAD + batchSize; } @Override public void writeTo(ByteBuffer buffer) { FileChannel channel = fileRecords.channel(); try { int limit = buffer.limit(); buffer.limit(buffer.position() + sizeInBytes()); Utils.readFully(channel, buffer, position); buffer.limit(limit); } catch (IOException e) { throw new KafkaException("Failed to read record batch at position " + position + " from " + fileRecords, e); } } protected abstract RecordBatch toMemoryRecordBatch(ByteBuffer buffer); protected abstract int headerSize(); protected RecordBatch loadFullBatch() { if (fullBatch == null) { batchHeader = null; fullBatch = loadBatchWithSize(sizeInBytes(), "full record batch"); } return fullBatch; } protected RecordBatch loadBatchHeader() { if (fullBatch != null) return fullBatch; if (batchHeader == null) batchHeader = loadBatchWithSize(headerSize(), "record batch header"); return batchHeader; } private RecordBatch loadBatchWithSize(int size, String description) { FileChannel channel = fileRecords.channel(); try { ByteBuffer buffer = ByteBuffer.allocate(size); Utils.readFullyOrFail(channel, buffer, position, description); buffer.rewind(); return toMemoryRecordBatch(buffer); } catch (IOException e) { throw new KafkaException("Failed to load record batch at position " + position + " from " + fileRecords, e); } } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; FileChannelRecordBatch that = (FileChannelRecordBatch) o; FileChannel channel = fileRecords == null ? null : fileRecords.channel(); FileChannel thatChannel = that.fileRecords == null ? null : that.fileRecords.channel(); return offset == that.offset && position == that.position && batchSize == that.batchSize && Objects.equals(channel, thatChannel); } @Override public int hashCode() { FileChannel channel = fileRecords == null ? null : fileRecords.channel(); int result = Long.hashCode(offset); result = 31 * result + (channel != null ? channel.hashCode() : 0); result = 31 * result + position; result = 31 * result + batchSize; return result; } @Override public String toString() { return "FileChannelRecordBatch(magic: " + magic + ", offset: " + offset + ", size: " + batchSize + ")"; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/FileRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.network.TransferableChannel; import org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch; import org.apache.kafka.common.utils.AbstractIterator; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.file.Files; import java.nio.file.StandardOpenOption; import java.util.Objects; import java.util.Optional; import java.util.concurrent.atomic.AtomicInteger; /** * A {@link Records} implementation backed by a file. An optional start and end position can be applied to this * instance to enable slicing a range of the log records. */ public class FileRecords extends AbstractRecords implements Closeable { private final boolean isSlice; private final int start; private final int end; private final Iterable<FileLogInputStream.FileChannelRecordBatch> batches; // mutable state private final AtomicInteger size; private final FileChannel channel; private volatile File file; /** * The {@code FileRecords.open} methods should be used instead of this constructor whenever possible. * The constructor is visible for tests. */ FileRecords(File file, FileChannel channel, int start, int end, boolean isSlice) throws IOException { this.file = file; this.channel = channel; this.start = start; this.end = end; this.isSlice = isSlice; this.size = new AtomicInteger(); if (isSlice) { // don't check the file size if this is just a slice view size.set(end - start); } else { if (channel.size() > Integer.MAX_VALUE) throw new KafkaException("The size of segment " + file + " (" + channel.size() + ") is larger than the maximum allowed segment size of " + Integer.MAX_VALUE); int limit = Math.min((int) channel.size(), end); size.set(limit - start); // if this is not a slice, update the file pointer to the end of the file // set the file position to the last byte in the file channel.position(limit); } batches = batchesFrom(start); } @Override public int sizeInBytes() { return size.get(); } /** * Get the underlying file. * @return The file */ public File file() { return file; } /** * Get the underlying file channel. * @return The file channel */ public FileChannel channel() { return channel; } /** * Read log batches into the given buffer until there are no bytes remaining in the buffer or the end of the file * is reached. * * @param buffer The buffer to write the batches to * @param position Position in the buffer to read from * @throws IOException If an I/O error occurs, see {@link FileChannel#read(ByteBuffer, long)} for details on the * possible exceptions */ public void readInto(ByteBuffer buffer, int position) throws IOException { Utils.readFully(channel, buffer, position + this.start); buffer.flip(); } /** * Return a slice of records from this instance, which is a view into this set starting from the given position * and with the given size limit. * * If the size is beyond the end of the file, the end will be based on the size of the file at the time of the read. * * If this message set is already sliced, the position will be taken relative to that slicing. * * @param position The start position to begin the read from * @param size The number of bytes after the start position to include * @return A sliced wrapper on this message set limited based on the given position and size */ public FileRecords slice(int position, int size) throws IOException { int availableBytes = availableBytes(position, size); int startPosition = this.start + position; return new FileRecords(file, channel, startPosition, startPosition + availableBytes, true); } /** * Return a slice of records from this instance, the difference with {@link FileRecords#slice(int, int)} is * that the position is not necessarily on an offset boundary. * * This method is reserved for cases where offset alignment is not necessary, such as in the replication of raft * snapshots. * * @param position The start position to begin the read from * @param size The number of bytes after the start position to include * @return A unaligned slice of records on this message set limited based on the given position and size */ public UnalignedFileRecords sliceUnaligned(int position, int size) { int availableBytes = availableBytes(position, size); return new UnalignedFileRecords(channel, this.start + position, availableBytes); } private int availableBytes(int position, int size) { // Cache current size in case concurrent write changes it int currentSizeInBytes = sizeInBytes(); if (position < 0) throw new IllegalArgumentException("Invalid position: " + position + " in read from " + this); if (position > currentSizeInBytes - start) throw new IllegalArgumentException("Slice from position " + position + " exceeds end position of " + this); if (size < 0) throw new IllegalArgumentException("Invalid size: " + size + " in read from " + this); int end = this.start + position + size; // Handle integer overflow or if end is beyond the end of the file if (end < 0 || end > start + currentSizeInBytes) end = this.start + currentSizeInBytes; return end - (this.start + position); } /** * Append a set of records to the file. This method is not thread-safe and must be * protected with a lock. * * @param records The records to append * @return the number of bytes written to the underlying file */ public int append(MemoryRecords records) throws IOException { if (records.sizeInBytes() > Integer.MAX_VALUE - size.get()) throw new IllegalArgumentException("Append of size " + records.sizeInBytes() + " bytes is too large for segment with current file position at " + size.get()); int written = records.writeFullyTo(channel); size.getAndAdd(written); return written; } /** * Commit all written data to the physical disk */ public void flush() throws IOException { channel.force(true); } /** * Close this record set */ public void close() throws IOException { flush(); trim(); channel.close(); } /** * Close file handlers used by the FileChannel but don't write to disk. This is used when the disk may have failed */ public void closeHandlers() throws IOException { channel.close(); } /** * Delete this message set from the filesystem * @throws IOException if deletion fails due to an I/O error * @return {@code true} if the file was deleted by this method; {@code false} if the file could not be deleted * because it did not exist */ public boolean deleteIfExists() throws IOException { Utils.closeQuietly(channel, "FileChannel"); return Files.deleteIfExists(file.toPath()); } /** * Trim file when close or roll to next file */ public void trim() throws IOException { truncateTo(sizeInBytes()); } /** * Update the parent directory (to be used with caution since this does not reopen the file channel) * @param parentDir The new parent directory */ public void updateParentDir(File parentDir) { this.file = new File(parentDir, file.getName()); } /** * Rename the file that backs this message set * @throws IOException if rename fails. */ public void renameTo(File f) throws IOException { try { Utils.atomicMoveWithFallback(file.toPath(), f.toPath(), false); } finally { this.file = f; } } /** * Truncate this file message set to the given size in bytes. Note that this API does no checking that the * given size falls on a valid message boundary. * In some versions of the JDK truncating to the same size as the file message set will cause an * update of the files mtime, so truncate is only performed if the targetSize is smaller than the * size of the underlying FileChannel. * It is expected that no other threads will do writes to the log when this function is called. * @param targetSize The size to truncate to. Must be between 0 and sizeInBytes. * @return The number of bytes truncated off */ public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment " + file + " to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } @Override public ConvertedRecords<? extends Records> downConvert(byte toMagic, long firstOffset, Time time) { ConvertedRecords<MemoryRecords> convertedRecords = RecordsUtil.downConvert(batches, toMagic, firstOffset, time); if (convertedRecords.recordConversionStats().numRecordsConverted() == 0) { // This indicates that the message is too large, which means that the buffer is not large // enough to hold a full record batch. We just return all the bytes in this instance. // Even though the record batch does not have the right format version, we expect old clients // to raise an error to the user after reading the record batch size and seeing that there // are not enough available bytes in the response to read it fully. Note that this is // only possible prior to KIP-74, after which the broker was changed to always return at least // one full record batch, even if it requires exceeding the max fetch size requested by the client. return new ConvertedRecords<>(this, RecordConversionStats.EMPTY); } else { return convertedRecords; } } @Override public long writeTo(TransferableChannel destChannel, long offset, int length) throws IOException { long newSize = Math.min(channel.size(), end) - start; int oldSize = sizeInBytes(); if (newSize < oldSize) throw new KafkaException(String.format( "Size of FileRecords %s has been truncated during write: old size %d, new size %d", file.getAbsolutePath(), oldSize, newSize)); long position = start + offset; long count = Math.min(length, oldSize - offset); return destChannel.transferFrom(channel, position, count); } /** * Search forward for the file position of the last offset that is greater than or equal to the target offset * and return its physical position and the size of the message (including log overhead) at the returned offset. If * no such offsets are found, return null. * * @param targetOffset The offset to search for. * @param startingPosition The starting position in the file to begin searching from. */ public LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition) { for (FileChannelRecordBatch batch : batchesFrom(startingPosition)) { long offset = batch.lastOffset(); if (offset >= targetOffset) return new LogOffsetPosition(offset, batch.position(), batch.sizeInBytes()); } return null; } /** * Search forward for the first message that meets the following requirements: * - Message's timestamp is greater than or equals to the targetTimestamp. * - Message's position in the log file is greater than or equals to the startingPosition. * - Message's offset is greater than or equals to the startingOffset. * * @param targetTimestamp The timestamp to search for. * @param startingPosition The starting position to search. * @param startingOffset The starting offset to search. * @return The timestamp and offset of the message found. Null if no message is found. */ public TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset) { for (RecordBatch batch : batchesFrom(startingPosition)) { if (batch.maxTimestamp() >= targetTimestamp) { // We found a message for (Record record : batch) { long timestamp = record.timestamp(); if (timestamp >= targetTimestamp && record.offset() >= startingOffset) return new TimestampAndOffset(timestamp, record.offset(), maybeLeaderEpoch(batch.partitionLeaderEpoch())); } } } return null; } /** * Return the largest timestamp of the messages after a given position in this file message set. * @param startingPosition The starting position. * @return The largest timestamp of the messages after the given position. */ public TimestampAndOffset largestTimestampAfter(int startingPosition) { long maxTimestamp = RecordBatch.NO_TIMESTAMP; long offsetOfMaxTimestamp = -1L; int leaderEpochOfMaxTimestamp = RecordBatch.NO_PARTITION_LEADER_EPOCH; for (RecordBatch batch : batchesFrom(startingPosition)) { long timestamp = batch.maxTimestamp(); if (timestamp > maxTimestamp) { maxTimestamp = timestamp; offsetOfMaxTimestamp = batch.lastOffset(); leaderEpochOfMaxTimestamp = batch.partitionLeaderEpoch(); } } return new TimestampAndOffset(maxTimestamp, offsetOfMaxTimestamp, maybeLeaderEpoch(leaderEpochOfMaxTimestamp)); } private Optional<Integer> maybeLeaderEpoch(int leaderEpoch) { return leaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH ? Optional.empty() : Optional.of(leaderEpoch); } /** * Get an iterator over the record batches in the file. Note that the batches are * backed by the open file channel. When the channel is closed (i.e. when this instance * is closed), the batches will generally no longer be readable. * @return An iterator over the batches */ @Override public Iterable<FileChannelRecordBatch> batches() { return batches; } @Override public String toString() { return "FileRecords(size=" + sizeInBytes() + ", file=" + file + ", start=" + start + ", end=" + end + ")"; } /** * Get an iterator over the record batches in the file, starting at a specific position. This is similar to * {@link #batches()} except that callers specify a particular position to start reading the batches from. This * method must be used with caution: the start position passed in must be a known start of a batch. * @param start The position to start record iteration from; must be a known position for start of a batch * @return An iterator over batches starting from {@code start} */ public Iterable<FileChannelRecordBatch> batchesFrom(final int start) { return () -> batchIterator(start); } @Override public AbstractIterator<FileChannelRecordBatch> batchIterator() { return batchIterator(start); } private AbstractIterator<FileChannelRecordBatch> batchIterator(int start) { final int end; if (isSlice) end = this.end; else end = this.sizeInBytes(); FileLogInputStream inputStream = new FileLogInputStream(this, start, end); return new RecordBatchIterator<>(inputStream); } public static FileRecords open(File file, boolean mutable, boolean fileAlreadyExists, int initFileSize, boolean preallocate) throws IOException { FileChannel channel = openChannel(file, mutable, fileAlreadyExists, initFileSize, preallocate); int end = (!fileAlreadyExists && preallocate) ? 0 : Integer.MAX_VALUE; return new FileRecords(file, channel, 0, end, false); } public static FileRecords open(File file, boolean fileAlreadyExists, int initFileSize, boolean preallocate) throws IOException { return open(file, true, fileAlreadyExists, initFileSize, preallocate); } public static FileRecords open(File file, boolean mutable) throws IOException { return open(file, mutable, false, 0, false); } public static FileRecords open(File file) throws IOException { return open(file, true); } /** * Open a channel for the given file * For windows NTFS and some old LINUX file system, set preallocate to true and initFileSize * with one value (for example 512 * 1025 *1024 ) can improve the kafka produce performance. * @param file File path * @param mutable mutable * @param fileAlreadyExists File already exists or not * @param initFileSize The size used for pre allocate file, for example 512 * 1025 *1024 * @param preallocate Pre-allocate file or not, gotten from configuration. */ private static FileChannel openChannel(File file, boolean mutable, boolean fileAlreadyExists, int initFileSize, boolean preallocate) throws IOException { if (mutable) { if (fileAlreadyExists || !preallocate) { return FileChannel.open(file.toPath(), StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE); } else { RandomAccessFile randomAccessFile = new RandomAccessFile(file, "rw"); randomAccessFile.setLength(initFileSize); return randomAccessFile.getChannel(); } } else { return FileChannel.open(file.toPath()); } } public static class LogOffsetPosition { public final long offset; public final int position; public final int size; public LogOffsetPosition(long offset, int position, int size) { this.offset = offset; this.position = position; this.size = size; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; LogOffsetPosition that = (LogOffsetPosition) o; return offset == that.offset && position == that.position && size == that.size; } @Override public int hashCode() { int result = Long.hashCode(offset); result = 31 * result + position; result = 31 * result + size; return result; } @Override public String toString() { return "LogOffsetPosition(" + "offset=" + offset + ", position=" + position + ", size=" + size + ')'; } } public static class TimestampAndOffset { public final long timestamp; public final long offset; public final Optional<Integer> leaderEpoch; public TimestampAndOffset(long timestamp, long offset, Optional<Integer> leaderEpoch) { this.timestamp = timestamp; this.offset = offset; this.leaderEpoch = leaderEpoch; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; TimestampAndOffset that = (TimestampAndOffset) o; return timestamp == that.timestamp && offset == that.offset && Objects.equals(leaderEpoch, that.leaderEpoch); } @Override public int hashCode() { return Objects.hash(timestamp, offset, leaderEpoch); } @Override public String toString() { return "TimestampAndOffset(" + "timestamp=" + timestamp + ", offset=" + offset + ", leaderEpoch=" + leaderEpoch + ')'; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/LazyDownConversionRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.AbstractIterator; import org.apache.kafka.common.utils.Time; import java.util.ArrayList; import java.util.List; import java.util.Objects; /** * Encapsulation for holding records that require down-conversion in a lazy, chunked manner (KIP-283). See * {@link LazyDownConversionRecordsSend} for the actual chunked send implementation. */ public class LazyDownConversionRecords implements BaseRecords { private final TopicPartition topicPartition; private final Records records; private final byte toMagic; private final long firstOffset; private ConvertedRecords firstConvertedBatch; private final int sizeInBytes; private final Time time; /** * @param topicPartition The topic-partition to which records belong * @param records Records to lazily down-convert * @param toMagic Magic version to down-convert to * @param firstOffset The starting offset for down-converted records. This only impacts some cases. See * {@link RecordsUtil#downConvert(Iterable, byte, long, Time)} for an explanation. * @param time The time instance to use * * @throws org.apache.kafka.common.errors.UnsupportedCompressionTypeException If the first batch to down-convert * has a compression type which we do not support down-conversion for. */ public LazyDownConversionRecords(TopicPartition topicPartition, Records records, byte toMagic, long firstOffset, Time time) { this.topicPartition = Objects.requireNonNull(topicPartition); this.records = Objects.requireNonNull(records); this.toMagic = toMagic; this.firstOffset = firstOffset; this.time = Objects.requireNonNull(time); // To make progress, kafka consumers require at least one full record batch per partition, i.e. we need to // ensure we can accommodate one full batch of down-converted messages. We achieve this by having `sizeInBytes` // factor in the size of the first down-converted batch and we return at least that many bytes. java.util.Iterator<ConvertedRecords<?>> it = iterator(0); if (it.hasNext()) { firstConvertedBatch = it.next(); sizeInBytes = Math.max(records.sizeInBytes(), firstConvertedBatch.records().sizeInBytes()); } else { // If there are messages before down-conversion and no messages after down-conversion, // make sure we are able to send at least an overflow message to the consumer so that it can throw // a RecordTooLargeException. Typically, the consumer would need to increase the fetch size in such cases. // If there are no messages before down-conversion, we return an empty record batch. firstConvertedBatch = null; sizeInBytes = records.batches().iterator().hasNext() ? LazyDownConversionRecordsSend.MIN_OVERFLOW_MESSAGE_LENGTH : 0; } } @Override public int sizeInBytes() { return sizeInBytes; } @Override public LazyDownConversionRecordsSend toSend() { return new LazyDownConversionRecordsSend(this); } public TopicPartition topicPartition() { return topicPartition; } @Override public boolean equals(Object o) { if (o instanceof LazyDownConversionRecords) { LazyDownConversionRecords that = (LazyDownConversionRecords) o; return toMagic == that.toMagic && firstOffset == that.firstOffset && topicPartition.equals(that.topicPartition) && records.equals(that.records); } return false; } @Override public int hashCode() { int result = toMagic; result = 31 * result + Long.hashCode(firstOffset); result = 31 * result + topicPartition.hashCode(); result = 31 * result + records.hashCode(); return result; } @Override public String toString() { return "LazyDownConversionRecords(size=" + sizeInBytes + ", underlying=" + records + ", toMagic=" + toMagic + ", firstOffset=" + firstOffset + ")"; } public java.util.Iterator<ConvertedRecords<?>> iterator(long maximumReadSize) { // We typically expect only one iterator instance to be created, so null out the first converted batch after // first use to make it available for GC. ConvertedRecords firstBatch = firstConvertedBatch; firstConvertedBatch = null; return new Iterator(records, maximumReadSize, firstBatch); } /** * Implementation for being able to iterate over down-converted records. Goal of this implementation is to keep * it as memory-efficient as possible by not having to maintain all down-converted records in-memory. Maintains * a view into batches of down-converted records. */ private class Iterator extends AbstractIterator<ConvertedRecords<?>> { private final AbstractIterator<? extends RecordBatch> batchIterator; private final long maximumReadSize; private ConvertedRecords firstConvertedBatch; /** * @param recordsToDownConvert Records that require down-conversion * @param maximumReadSize Maximum possible size of underlying records that will be down-converted in each call to * {@link #makeNext()}. This is a soft limit as {@link #makeNext()} will always convert * and return at least one full message batch. */ private Iterator(Records recordsToDownConvert, long maximumReadSize, ConvertedRecords<?> firstConvertedBatch) { this.batchIterator = recordsToDownConvert.batchIterator(); this.maximumReadSize = maximumReadSize; this.firstConvertedBatch = firstConvertedBatch; // If we already have the first down-converted batch, advance the underlying records iterator to next batch if (firstConvertedBatch != null) this.batchIterator.next(); } /** * Make next set of down-converted records * @return Down-converted records */ @Override protected ConvertedRecords makeNext() { // If we have cached the first down-converted batch, return that now if (firstConvertedBatch != null) { ConvertedRecords convertedBatch = firstConvertedBatch; firstConvertedBatch = null; return convertedBatch; } while (batchIterator.hasNext()) { final List<RecordBatch> batches = new ArrayList<>(); boolean isFirstBatch = true; long sizeSoFar = 0; // Figure out batches we should down-convert based on the size constraints while (batchIterator.hasNext() && (isFirstBatch || (batchIterator.peek().sizeInBytes() + sizeSoFar) <= maximumReadSize)) { RecordBatch currentBatch = batchIterator.next(); batches.add(currentBatch); sizeSoFar += currentBatch.sizeInBytes(); isFirstBatch = false; } ConvertedRecords convertedRecords = RecordsUtil.downConvert(batches, toMagic, firstOffset, time); // During conversion, it is possible that we drop certain batches because they do not have an equivalent // representation in the message format we want to convert to. For example, V0 and V1 message formats // have no notion of transaction markers which were introduced in V2 so they get dropped during conversion. // We return converted records only when we have at least one valid batch of messages after conversion. if (convertedRecords.records().sizeInBytes() > 0) return convertedRecords; } return allDone(); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/LazyDownConversionRecordsSend.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.network.TransferableChannel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Iterator; /** * Encapsulation for {@link RecordsSend} for {@link LazyDownConversionRecords}. Records are down-converted in batches and * on-demand when {@link #writeTo} method is called. */ public final class LazyDownConversionRecordsSend extends RecordsSend<LazyDownConversionRecords> { private static final Logger log = LoggerFactory.getLogger(LazyDownConversionRecordsSend.class); private static final int MAX_READ_SIZE = 128 * 1024; static final int MIN_OVERFLOW_MESSAGE_LENGTH = Records.LOG_OVERHEAD; private RecordConversionStats recordConversionStats; private RecordsSend convertedRecordsWriter; private Iterator<ConvertedRecords<?>> convertedRecordsIterator; public LazyDownConversionRecordsSend(LazyDownConversionRecords records) { super(records, records.sizeInBytes()); convertedRecordsWriter = null; recordConversionStats = new RecordConversionStats(); convertedRecordsIterator = records().iterator(MAX_READ_SIZE); } private MemoryRecords buildOverflowBatch(int remaining) { // We do not have any records left to down-convert. Construct an overflow message for the length remaining. // This message will be ignored by the consumer because its length will be past the length of maximum // possible response size. // DefaultRecordBatch => // BaseOffset => Int64 // Length => Int32 // ... ByteBuffer overflowMessageBatch = ByteBuffer.allocate( Math.max(MIN_OVERFLOW_MESSAGE_LENGTH, Math.min(remaining + 1, MAX_READ_SIZE))); overflowMessageBatch.putLong(-1L); // Fill in the length of the overflow batch. A valid batch must be at least as long as the minimum batch // overhead. overflowMessageBatch.putInt(Math.max(remaining + 1, DefaultRecordBatch.RECORD_BATCH_OVERHEAD)); log.debug("Constructed overflow message batch for partition {} with length={}", topicPartition(), remaining); return MemoryRecords.readableRecords(overflowMessageBatch); } @Override public long writeTo(TransferableChannel channel, long previouslyWritten, int remaining) throws IOException { if (convertedRecordsWriter == null || convertedRecordsWriter.completed()) { MemoryRecords convertedRecords; try { // Check if we have more chunks left to down-convert if (convertedRecordsIterator.hasNext()) { // Get next chunk of down-converted messages ConvertedRecords<?> recordsAndStats = convertedRecordsIterator.next(); convertedRecords = (MemoryRecords) recordsAndStats.records(); recordConversionStats.add(recordsAndStats.recordConversionStats()); log.debug("Down-converted records for partition {} with length={}", topicPartition(), convertedRecords.sizeInBytes()); } else { convertedRecords = buildOverflowBatch(remaining); } } catch (UnsupportedCompressionTypeException e) { // We have encountered a compression type which does not support down-conversion (e.g. zstd). // Since we have already sent at least one batch and we have committed to the fetch size, we // send an overflow batch. The consumer will read the first few records and then fetch from the // offset of the batch which has the unsupported compression type. At that time, we will // send back the UNSUPPORTED_COMPRESSION_TYPE error which will allow the consumer to fail gracefully. convertedRecords = buildOverflowBatch(remaining); } convertedRecordsWriter = new DefaultRecordsSend<>(convertedRecords, Math.min(convertedRecords.sizeInBytes(), remaining)); } return convertedRecordsWriter.writeTo(channel); } public RecordConversionStats recordConversionStats() { return recordConversionStats; } public TopicPartition topicPartition() { return records().topicPartition(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/LegacyRecord.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import java.util.zip.CRC32; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Checksums; import org.apache.kafka.common.utils.Utils; import java.io.DataOutputStream; import java.io.IOException; import java.nio.ByteBuffer; import static org.apache.kafka.common.utils.Utils.wrapNullable; /** * This class represents the serialized key and value along with the associated CRC and other fields * of message format versions 0 and 1. Note that it is uncommon to need to access this class directly. * Usually it should be accessed indirectly through the {@link Record} interface which is exposed * through the {@link Records} object. */ public final class LegacyRecord { /** * The current offset and size for all the fixed-length fields */ public static final int CRC_OFFSET = 0; public static final int CRC_LENGTH = 4; public static final int MAGIC_OFFSET = CRC_OFFSET + CRC_LENGTH; public static final int MAGIC_LENGTH = 1; public static final int ATTRIBUTES_OFFSET = MAGIC_OFFSET + MAGIC_LENGTH; public static final int ATTRIBUTES_LENGTH = 1; public static final int TIMESTAMP_OFFSET = ATTRIBUTES_OFFSET + ATTRIBUTES_LENGTH; public static final int TIMESTAMP_LENGTH = 8; public static final int KEY_SIZE_OFFSET_V0 = ATTRIBUTES_OFFSET + ATTRIBUTES_LENGTH; public static final int KEY_SIZE_OFFSET_V1 = TIMESTAMP_OFFSET + TIMESTAMP_LENGTH; public static final int KEY_SIZE_LENGTH = 4; public static final int KEY_OFFSET_V0 = KEY_SIZE_OFFSET_V0 + KEY_SIZE_LENGTH; public static final int KEY_OFFSET_V1 = KEY_SIZE_OFFSET_V1 + KEY_SIZE_LENGTH; public static final int VALUE_SIZE_LENGTH = 4; /** * The size for the record header */ public static final int HEADER_SIZE_V0 = CRC_LENGTH + MAGIC_LENGTH + ATTRIBUTES_LENGTH; public static final int HEADER_SIZE_V1 = CRC_LENGTH + MAGIC_LENGTH + ATTRIBUTES_LENGTH + TIMESTAMP_LENGTH; /** * The amount of overhead bytes in a record */ public static final int RECORD_OVERHEAD_V0 = HEADER_SIZE_V0 + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH; /** * The amount of overhead bytes in a record */ public static final int RECORD_OVERHEAD_V1 = HEADER_SIZE_V1 + KEY_SIZE_LENGTH + VALUE_SIZE_LENGTH; /** * Specifies the mask for the compression code. 3 bits to hold the compression codec. 0 is reserved to indicate no * compression */ private static final int COMPRESSION_CODEC_MASK = 0x07; /** * Specify the mask of timestamp type: 0 for CreateTime, 1 for LogAppendTime. */ private static final byte TIMESTAMP_TYPE_MASK = 0x08; /** * Timestamp value for records without a timestamp */ public static final long NO_TIMESTAMP = -1L; private final ByteBuffer buffer; private final Long wrapperRecordTimestamp; private final TimestampType wrapperRecordTimestampType; public LegacyRecord(ByteBuffer buffer) { this(buffer, null, null); } public LegacyRecord(ByteBuffer buffer, Long wrapperRecordTimestamp, TimestampType wrapperRecordTimestampType) { this.buffer = buffer; this.wrapperRecordTimestamp = wrapperRecordTimestamp; this.wrapperRecordTimestampType = wrapperRecordTimestampType; } /** * Compute the checksum of the record from the record contents */ public long computeChecksum() { return crc32(buffer, MAGIC_OFFSET, buffer.limit() - MAGIC_OFFSET); } /** * Retrieve the previously computed CRC for this record */ public long checksum() { return ByteUtils.readUnsignedInt(buffer, CRC_OFFSET); } /** * Returns true if the crc stored with the record matches the crc computed off the record contents */ public boolean isValid() { return sizeInBytes() >= RECORD_OVERHEAD_V0 && checksum() == computeChecksum(); } public Long wrapperRecordTimestamp() { return wrapperRecordTimestamp; } public TimestampType wrapperRecordTimestampType() { return wrapperRecordTimestampType; } /** * Throw an InvalidRecordException if isValid is false for this record */ public void ensureValid() { if (sizeInBytes() < RECORD_OVERHEAD_V0) throw new CorruptRecordException("Record is corrupt (crc could not be retrieved as the record is too " + "small, size = " + sizeInBytes() + ")"); if (!isValid()) throw new CorruptRecordException("Record is corrupt (stored crc = " + checksum() + ", computed crc = " + computeChecksum() + ")"); } /** * The complete serialized size of this record in bytes (including crc, header attributes, etc), but * excluding the log overhead (offset and record size). * @return the size in bytes */ public int sizeInBytes() { return buffer.limit(); } /** * The length of the key in bytes * @return the size in bytes of the key (0 if the key is null) */ public int keySize() { if (magic() == RecordBatch.MAGIC_VALUE_V0) return buffer.getInt(KEY_SIZE_OFFSET_V0); else return buffer.getInt(KEY_SIZE_OFFSET_V1); } /** * Does the record have a key? * @return true if so, false otherwise */ public boolean hasKey() { return keySize() >= 0; } /** * The position where the value size is stored */ private int valueSizeOffset() { if (magic() == RecordBatch.MAGIC_VALUE_V0) return KEY_OFFSET_V0 + Math.max(0, keySize()); else return KEY_OFFSET_V1 + Math.max(0, keySize()); } /** * The length of the value in bytes * @return the size in bytes of the value (0 if the value is null) */ public int valueSize() { return buffer.getInt(valueSizeOffset()); } /** * Check whether the value field of this record is null. * @return true if the value is null, false otherwise */ public boolean hasNullValue() { return valueSize() < 0; } /** * The magic value (i.e. message format version) of this record * @return the magic value */ public byte magic() { return buffer.get(MAGIC_OFFSET); } /** * The attributes stored with this record * @return the attributes */ public byte attributes() { return buffer.get(ATTRIBUTES_OFFSET); } /** * When magic value is greater than 0, the timestamp of a record is determined in the following way: * 1. wrapperRecordTimestampType = null and wrapperRecordTimestamp is null - Uncompressed message, timestamp is in the message. * 2. wrapperRecordTimestampType = LOG_APPEND_TIME and WrapperRecordTimestamp is not null - Compressed message using LOG_APPEND_TIME * 3. wrapperRecordTimestampType = CREATE_TIME and wrapperRecordTimestamp is not null - Compressed message using CREATE_TIME * * @return the timestamp as determined above */ public long timestamp() { if (magic() == RecordBatch.MAGIC_VALUE_V0) return RecordBatch.NO_TIMESTAMP; else { // case 2 if (wrapperRecordTimestampType == TimestampType.LOG_APPEND_TIME && wrapperRecordTimestamp != null) return wrapperRecordTimestamp; // Case 1, 3 else return buffer.getLong(TIMESTAMP_OFFSET); } } /** * Get the timestamp type of the record. * * @return The timestamp type or {@link TimestampType#NO_TIMESTAMP_TYPE} if the magic is 0. */ public TimestampType timestampType() { return timestampType(magic(), wrapperRecordTimestampType, attributes()); } /** * The compression type used with this record */ public CompressionType compressionType() { return CompressionType.forId(buffer.get(ATTRIBUTES_OFFSET) & COMPRESSION_CODEC_MASK); } /** * A ByteBuffer containing the value of this record * @return the value or null if the value for this record is null */ public ByteBuffer value() { return Utils.sizeDelimited(buffer, valueSizeOffset()); } /** * A ByteBuffer containing the message key * @return the buffer or null if the key for this record is null */ public ByteBuffer key() { if (magic() == RecordBatch.MAGIC_VALUE_V0) return Utils.sizeDelimited(buffer, KEY_SIZE_OFFSET_V0); else return Utils.sizeDelimited(buffer, KEY_SIZE_OFFSET_V1); } /** * Get the underlying buffer backing this record instance. * * @return the buffer */ public ByteBuffer buffer() { return this.buffer; } public String toString() { if (magic() > 0) return String.format("Record(magic=%d, attributes=%d, compression=%s, crc=%d, %s=%d, key=%d bytes, value=%d bytes)", magic(), attributes(), compressionType(), checksum(), timestampType(), timestamp(), key() == null ? 0 : key().limit(), value() == null ? 0 : value().limit()); else return String.format("Record(magic=%d, attributes=%d, compression=%s, crc=%d, key=%d bytes, value=%d bytes)", magic(), attributes(), compressionType(), checksum(), key() == null ? 0 : key().limit(), value() == null ? 0 : value().limit()); } public boolean equals(Object other) { if (this == other) return true; if (other == null) return false; if (!other.getClass().equals(LegacyRecord.class)) return false; LegacyRecord record = (LegacyRecord) other; return this.buffer.equals(record.buffer); } public int hashCode() { return buffer.hashCode(); } /** * Create a new record instance. If the record's compression type is not none, then * its value payload should be already compressed with the specified type; the constructor * would always write the value payload as is and will not do the compression itself. * * @param magic The magic value to use * @param timestamp The timestamp of the record * @param key The key of the record (null, if none) * @param value The record value * @param compressionType The compression type used on the contents of the record (if any) * @param timestampType The timestamp type to be used for this record */ public static LegacyRecord create(byte magic, long timestamp, byte[] key, byte[] value, CompressionType compressionType, TimestampType timestampType) { int keySize = key == null ? 0 : key.length; int valueSize = value == null ? 0 : value.length; ByteBuffer buffer = ByteBuffer.allocate(recordSize(magic, keySize, valueSize)); write(buffer, magic, timestamp, wrapNullable(key), wrapNullable(value), compressionType, timestampType); buffer.rewind(); return new LegacyRecord(buffer); } public static LegacyRecord create(byte magic, long timestamp, byte[] key, byte[] value) { return create(magic, timestamp, key, value, CompressionType.NONE, TimestampType.CREATE_TIME); } /** * Write the header for a compressed record set in-place (i.e. assuming the compressed record data has already * been written at the value offset in a wrapped record). This lets you dynamically create a compressed message * set, and then go back later and fill in its size and CRC, which saves the need for copying to another buffer. * * @param buffer The buffer containing the compressed record data positioned at the first offset of the * @param magic The magic value of the record set * @param recordSize The size of the record (including record overhead) * @param timestamp The timestamp of the wrapper record * @param compressionType The compression type used * @param timestampType The timestamp type of the wrapper record */ public static void writeCompressedRecordHeader(ByteBuffer buffer, byte magic, int recordSize, long timestamp, CompressionType compressionType, TimestampType timestampType) { int recordPosition = buffer.position(); int valueSize = recordSize - recordOverhead(magic); // write the record header with a null value (the key is always null for the wrapper) write(buffer, magic, timestamp, null, null, compressionType, timestampType); buffer.position(recordPosition); // now fill in the value size buffer.putInt(recordPosition + keyOffset(magic), valueSize); // compute and fill the crc from the beginning of the message long crc = crc32(buffer, MAGIC_OFFSET, recordSize - MAGIC_OFFSET); ByteUtils.writeUnsignedInt(buffer, recordPosition + CRC_OFFSET, crc); } private static void write(ByteBuffer buffer, byte magic, long timestamp, ByteBuffer key, ByteBuffer value, CompressionType compressionType, TimestampType timestampType) { try { DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer)); write(out, magic, timestamp, key, value, compressionType, timestampType); } catch (IOException e) { throw new KafkaException(e); } } /** * Write the record data with the given compression type and return the computed crc. * * @param out The output stream to write to * @param magic The magic value to be used * @param timestamp The timestamp of the record * @param key The record key * @param value The record value * @param compressionType The compression type * @param timestampType The timestamp type * @return the computed CRC for this record. * @throws IOException for any IO errors writing to the output stream. */ public static long write(DataOutputStream out, byte magic, long timestamp, byte[] key, byte[] value, CompressionType compressionType, TimestampType timestampType) throws IOException { return write(out, magic, timestamp, wrapNullable(key), wrapNullable(value), compressionType, timestampType); } public static long write(DataOutputStream out, byte magic, long timestamp, ByteBuffer key, ByteBuffer value, CompressionType compressionType, TimestampType timestampType) throws IOException { byte attributes = computeAttributes(magic, compressionType, timestampType); long crc = computeChecksum(magic, attributes, timestamp, key, value); write(out, magic, crc, attributes, timestamp, key, value); return crc; } /** * Write a record using raw fields (without validation). This should only be used in testing. */ public static void write(DataOutputStream out, byte magic, long crc, byte attributes, long timestamp, byte[] key, byte[] value) throws IOException { write(out, magic, crc, attributes, timestamp, wrapNullable(key), wrapNullable(value)); } // Write a record to the buffer, if the record's compression type is none, then // its value payload should be already compressed with the specified type private static void write(DataOutputStream out, byte magic, long crc, byte attributes, long timestamp, ByteBuffer key, ByteBuffer value) throws IOException { if (magic != RecordBatch.MAGIC_VALUE_V0 && magic != RecordBatch.MAGIC_VALUE_V1) throw new IllegalArgumentException("Invalid magic value " + magic); if (timestamp < 0 && timestamp != RecordBatch.NO_TIMESTAMP) throw new IllegalArgumentException("Invalid message timestamp " + timestamp); // write crc out.writeInt((int) (crc & 0xffffffffL)); // write magic value out.writeByte(magic); // write attributes out.writeByte(attributes); // maybe write timestamp if (magic > RecordBatch.MAGIC_VALUE_V0) out.writeLong(timestamp); // write the key if (key == null) { out.writeInt(-1); } else { int size = key.remaining(); out.writeInt(size); Utils.writeTo(out, key, size); } // write the value if (value == null) { out.writeInt(-1); } else { int size = value.remaining(); out.writeInt(size); Utils.writeTo(out, value, size); } } static int recordSize(byte magic, ByteBuffer key, ByteBuffer value) { return recordSize(magic, key == null ? 0 : key.limit(), value == null ? 0 : value.limit()); } public static int recordSize(byte magic, int keySize, int valueSize) { return recordOverhead(magic) + keySize + valueSize; } // visible only for testing public static byte computeAttributes(byte magic, CompressionType type, TimestampType timestampType) { byte attributes = 0; if (type.id > 0) attributes |= COMPRESSION_CODEC_MASK & type.id; if (magic > RecordBatch.MAGIC_VALUE_V0) { if (timestampType == TimestampType.NO_TIMESTAMP_TYPE) throw new IllegalArgumentException("Timestamp type must be provided to compute attributes for " + "message format v1"); if (timestampType == TimestampType.LOG_APPEND_TIME) attributes |= TIMESTAMP_TYPE_MASK; } return attributes; } // visible only for testing public static long computeChecksum(byte magic, byte attributes, long timestamp, byte[] key, byte[] value) { return computeChecksum(magic, attributes, timestamp, wrapNullable(key), wrapNullable(value)); } private static long crc32(ByteBuffer buffer, int offset, int size) { CRC32 crc = new CRC32(); Checksums.update(crc, buffer, offset, size); return crc.getValue(); } /** * Compute the checksum of the record from the attributes, key and value payloads */ private static long computeChecksum(byte magic, byte attributes, long timestamp, ByteBuffer key, ByteBuffer value) { CRC32 crc = new CRC32(); crc.update(magic); crc.update(attributes); if (magic > RecordBatch.MAGIC_VALUE_V0) Checksums.updateLong(crc, timestamp); // update for the key if (key == null) { Checksums.updateInt(crc, -1); } else { int size = key.remaining(); Checksums.updateInt(crc, size); Checksums.update(crc, key, size); } // update for the value if (value == null) { Checksums.updateInt(crc, -1); } else { int size = value.remaining(); Checksums.updateInt(crc, size); Checksums.update(crc, value, size); } return crc.getValue(); } static int recordOverhead(byte magic) { if (magic == 0) return RECORD_OVERHEAD_V0; else if (magic == 1) return RECORD_OVERHEAD_V1; throw new IllegalArgumentException("Invalid magic used in LegacyRecord: " + magic); } static int headerSize(byte magic) { if (magic == 0) return HEADER_SIZE_V0; else if (magic == 1) return HEADER_SIZE_V1; throw new IllegalArgumentException("Invalid magic used in LegacyRecord: " + magic); } private static int keyOffset(byte magic) { if (magic == 0) return KEY_OFFSET_V0; else if (magic == 1) return KEY_OFFSET_V1; throw new IllegalArgumentException("Invalid magic used in LegacyRecord: " + magic); } public static TimestampType timestampType(byte magic, TimestampType wrapperRecordTimestampType, byte attributes) { if (magic == 0) return TimestampType.NO_TIMESTAMP_TYPE; else if (wrapperRecordTimestampType != null) return wrapperRecordTimestampType; else return (attributes & TIMESTAMP_TYPE_MASK) == 0 ? TimestampType.CREATE_TIME : TimestampType.LOG_APPEND_TIME; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/LogInputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import java.io.IOException; /** * An abstraction between an underlying input stream and record iterators, a {@link LogInputStream} only returns * the batches at one level. For magic values 0 and 1, this means that it can either handle iteration * at the top level of the log or deep iteration within the payload of a single message, but it does not attempt * to handle both. For magic value 2, this is only used for iterating over the top-level record batches (inner * records do not follow the {@link RecordBatch} interface. * * The generic typing allows for implementations which present only a view of the log entries, which enables more * efficient iteration when the record data is not actually needed. See for example * {@link FileLogInputStream.FileChannelRecordBatch} in which the record is not brought into memory until needed. * * @param <T> Type parameter of the log entry */ interface LogInputStream<T extends RecordBatch> { /** * Get the next record batch from the underlying input stream. * * @return The next record batch or null if there is none * @throws IOException for any IO errors */ T nextBatch() throws IOException; }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/MemoryRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.message.LeaderChangeMessage; import org.apache.kafka.common.message.SnapshotHeaderRecord; import org.apache.kafka.common.message.SnapshotFooterRecord; import org.apache.kafka.common.network.TransferableChannel; import org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetention; import org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetentionResult; import org.apache.kafka.common.utils.AbstractIterator; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.apache.kafka.common.utils.CloseableIterator; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.GatheringByteChannel; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; /** * A {@link Records} implementation backed by a ByteBuffer. This is used only for reading or * modifying in-place an existing buffer of record batches. To create a new buffer see {@link MemoryRecordsBuilder}, * or one of the {@link #builder(ByteBuffer, byte, CompressionType, TimestampType, long)} variants. */ public class MemoryRecords extends AbstractRecords { private static final Logger log = LoggerFactory.getLogger(MemoryRecords.class); public static final MemoryRecords EMPTY = MemoryRecords.readableRecords(ByteBuffer.allocate(0)); private final ByteBuffer buffer; private final Iterable<MutableRecordBatch> batches = this::batchIterator; private int validBytes = -1; // Construct a writable memory records private MemoryRecords(ByteBuffer buffer) { Objects.requireNonNull(buffer, "buffer should not be null"); this.buffer = buffer; } @Override public int sizeInBytes() { return buffer.limit(); } @Override public long writeTo(TransferableChannel channel, long position, int length) throws IOException { if (position > Integer.MAX_VALUE) throw new IllegalArgumentException("position should not be greater than Integer.MAX_VALUE: " + position); if (position + length > buffer.limit()) throw new IllegalArgumentException("position+length should not be greater than buffer.limit(), position: " + position + ", length: " + length + ", buffer.limit(): " + buffer.limit()); return Utils.tryWriteTo(channel, (int) position, length, buffer); } /** * Write all records to the given channel (including partial records). * @param channel The channel to write to * @return The number of bytes written * @throws IOException For any IO errors writing to the channel */ public int writeFullyTo(GatheringByteChannel channel) throws IOException { buffer.mark(); int written = 0; while (written < sizeInBytes()) written += channel.write(buffer); buffer.reset(); return written; } /** * The total number of bytes in this message set not including any partial, trailing messages. This * may be smaller than what is returned by {@link #sizeInBytes()}. * @return The number of valid bytes */ public int validBytes() { if (validBytes >= 0) return validBytes; int bytes = 0; for (RecordBatch batch : batches()) bytes += batch.sizeInBytes(); this.validBytes = bytes; return bytes; } @Override public ConvertedRecords<MemoryRecords> downConvert(byte toMagic, long firstOffset, Time time) { return RecordsUtil.downConvert(batches(), toMagic, firstOffset, time); } @Override public AbstractIterator<MutableRecordBatch> batchIterator() { return new RecordBatchIterator<>(new ByteBufferLogInputStream(buffer.duplicate(), Integer.MAX_VALUE)); } /** * Validates the header of the first batch and returns batch size. * @return first batch size including LOG_OVERHEAD if buffer contains header up to * magic byte, null otherwise * @throws CorruptRecordException if record size or magic is invalid */ public Integer firstBatchSize() { if (buffer.remaining() < HEADER_SIZE_UP_TO_MAGIC) return null; return new ByteBufferLogInputStream(buffer, Integer.MAX_VALUE).nextBatchSize(); } /** * Filter the records into the provided ByteBuffer. * * @param partition The partition that is filtered (used only for logging) * @param filter The filter function * @param destinationBuffer The byte buffer to write the filtered records to * @param maxRecordBatchSize The maximum record batch size. Note this is not a hard limit: if a batch * exceeds this after filtering, we log a warning, but the batch will still be * created. * @param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. For small * record batches, allocating a potentially large buffer (64 KB for LZ4) will * dominate the cost of decompressing and iterating over the records in the * batch. As such, a supplier that reuses buffers will have a significant * performance impact. * @return A FilterResult with a summary of the output (for metrics) and potentially an overflow buffer */ public FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) { return filterTo(partition, batches(), filter, destinationBuffer, maxRecordBatchSize, decompressionBufferSupplier); } /** * Note: This method is also used to convert the first timestamp of the batch (which is usually the timestamp of the first record) * to the delete horizon of the tombstones or txn markers which are present in the batch. */ private static FilterResult filterTo(TopicPartition partition, Iterable<MutableRecordBatch> batches, RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) { FilterResult filterResult = new FilterResult(destinationBuffer); ByteBufferOutputStream bufferOutputStream = new ByteBufferOutputStream(destinationBuffer); for (MutableRecordBatch batch : batches) { final BatchRetentionResult batchRetentionResult = filter.checkBatchRetention(batch); final boolean containsMarkerForEmptyTxn = batchRetentionResult.containsMarkerForEmptyTxn; final BatchRetention batchRetention = batchRetentionResult.batchRetention; filterResult.bytesRead += batch.sizeInBytes(); if (batchRetention == BatchRetention.DELETE) continue; // We use the absolute offset to decide whether to retain the message or not. Due to KAFKA-4298, we have to // allow for the possibility that a previous version corrupted the log by writing a compressed record batch // with a magic value not matching the magic of the records (magic < 2). This will be fixed as we // recopy the messages to the destination buffer. byte batchMagic = batch.magic(); List<Record> retainedRecords = new ArrayList<>(); final BatchFilterResult iterationResult = filterBatch(batch, decompressionBufferSupplier, filterResult, filter, batchMagic, true, retainedRecords); boolean containsTombstones = iterationResult.containsTombstones; boolean writeOriginalBatch = iterationResult.writeOriginalBatch; long maxOffset = iterationResult.maxOffset; if (!retainedRecords.isEmpty()) { // we check if the delete horizon should be set to a new value // in which case, we need to reset the base timestamp and overwrite the timestamp deltas // if the batch does not contain tombstones, then we don't need to overwrite batch boolean needToSetDeleteHorizon = batch.magic() >= RecordBatch.MAGIC_VALUE_V2 && (containsTombstones || containsMarkerForEmptyTxn) && !batch.deleteHorizonMs().isPresent(); if (writeOriginalBatch && !needToSetDeleteHorizon) { batch.writeTo(bufferOutputStream); filterResult.updateRetainedBatchMetadata(batch, retainedRecords.size(), false); } else { final MemoryRecordsBuilder builder; long deleteHorizonMs; if (needToSetDeleteHorizon) deleteHorizonMs = filter.currentTime + filter.deleteRetentionMs; else deleteHorizonMs = batch.deleteHorizonMs().orElse(RecordBatch.NO_TIMESTAMP); builder = buildRetainedRecordsInto(batch, retainedRecords, bufferOutputStream, deleteHorizonMs); MemoryRecords records = builder.build(); int filteredBatchSize = records.sizeInBytes(); if (filteredBatchSize > batch.sizeInBytes() && filteredBatchSize > maxRecordBatchSize) log.warn("Record batch from {} with last offset {} exceeded max record batch size {} after cleaning " + "(new size is {}). Consumers with version earlier than 0.10.1.0 may need to " + "increase their fetch sizes.", partition, batch.lastOffset(), maxRecordBatchSize, filteredBatchSize); MemoryRecordsBuilder.RecordsInfo info = builder.info(); filterResult.updateRetainedBatchMetadata(info.maxTimestamp, info.shallowOffsetOfMaxTimestamp, maxOffset, retainedRecords.size(), filteredBatchSize); } } else if (batchRetention == BatchRetention.RETAIN_EMPTY) { if (batchMagic < RecordBatch.MAGIC_VALUE_V2) throw new IllegalStateException("Empty batches are only supported for magic v2 and above"); bufferOutputStream.ensureRemaining(DefaultRecordBatch.RECORD_BATCH_OVERHEAD); DefaultRecordBatch.writeEmptyHeader(bufferOutputStream.buffer(), batchMagic, batch.producerId(), batch.producerEpoch(), batch.baseSequence(), batch.baseOffset(), batch.lastOffset(), batch.partitionLeaderEpoch(), batch.timestampType(), batch.maxTimestamp(), batch.isTransactional(), batch.isControlBatch()); filterResult.updateRetainedBatchMetadata(batch, 0, true); } // If we had to allocate a new buffer to fit the filtered buffer (see KAFKA-5316), return early to // avoid the need for additional allocations. ByteBuffer outputBuffer = bufferOutputStream.buffer(); if (outputBuffer != destinationBuffer) { filterResult.outputBuffer = outputBuffer; return filterResult; } } return filterResult; } private static BatchFilterResult filterBatch(RecordBatch batch, BufferSupplier decompressionBufferSupplier, FilterResult filterResult, RecordFilter filter, byte batchMagic, boolean writeOriginalBatch, List<Record> retainedRecords) { long maxOffset = -1; boolean containsTombstones = false; try (final CloseableIterator<Record> iterator = batch.streamingIterator(decompressionBufferSupplier)) { while (iterator.hasNext()) { Record record = iterator.next(); filterResult.messagesRead += 1; if (filter.shouldRetainRecord(batch, record)) { // Check for log corruption due to KAFKA-4298. If we find it, make sure that we overwrite // the corrupted batch with correct data. if (!record.hasMagic(batchMagic)) writeOriginalBatch = false; if (record.offset() > maxOffset) maxOffset = record.offset(); retainedRecords.add(record); if (!record.hasValue()) { containsTombstones = true; } } else { writeOriginalBatch = false; } } return new BatchFilterResult(writeOriginalBatch, containsTombstones, maxOffset); } } private static class BatchFilterResult { private final boolean writeOriginalBatch; private final boolean containsTombstones; private final long maxOffset; private BatchFilterResult(final boolean writeOriginalBatch, final boolean containsTombstones, final long maxOffset) { this.writeOriginalBatch = writeOriginalBatch; this.containsTombstones = containsTombstones; this.maxOffset = maxOffset; } } private static MemoryRecordsBuilder buildRetainedRecordsInto(RecordBatch originalBatch, List<Record> retainedRecords, ByteBufferOutputStream bufferOutputStream, final long deleteHorizonMs) { byte magic = originalBatch.magic(); TimestampType timestampType = originalBatch.timestampType(); long logAppendTime = timestampType == TimestampType.LOG_APPEND_TIME ? originalBatch.maxTimestamp() : RecordBatch.NO_TIMESTAMP; long baseOffset = magic >= RecordBatch.MAGIC_VALUE_V2 ? originalBatch.baseOffset() : retainedRecords.get(0).offset(); MemoryRecordsBuilder builder = new MemoryRecordsBuilder(bufferOutputStream, magic, originalBatch.compressionType(), timestampType, baseOffset, logAppendTime, originalBatch.producerId(), originalBatch.producerEpoch(), originalBatch.baseSequence(), originalBatch.isTransactional(), originalBatch.isControlBatch(), originalBatch.partitionLeaderEpoch(), bufferOutputStream.limit(), deleteHorizonMs); for (Record record : retainedRecords) builder.append(record); if (magic >= RecordBatch.MAGIC_VALUE_V2) // we must preserve the last offset from the initial batch in order to ensure that the // last sequence number from the batch remains even after compaction. Otherwise, the producer // could incorrectly see an out of sequence error. builder.overrideLastOffset(originalBatch.lastOffset()); return builder; } /** * Get the byte buffer that backs this instance for reading. */ public ByteBuffer buffer() { return buffer.duplicate(); } @Override public Iterable<MutableRecordBatch> batches() { return batches; } @Override public String toString() { return "MemoryRecords(size=" + sizeInBytes() + ", buffer=" + buffer + ")"; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; MemoryRecords that = (MemoryRecords) o; return buffer.equals(that.buffer); } @Override public int hashCode() { return buffer.hashCode(); } public static abstract class RecordFilter { public final long currentTime; public final long deleteRetentionMs; public RecordFilter(final long currentTime, final long deleteRetentionMs) { this.currentTime = currentTime; this.deleteRetentionMs = deleteRetentionMs; } public static class BatchRetentionResult { public final BatchRetention batchRetention; public final boolean containsMarkerForEmptyTxn; public BatchRetentionResult(final BatchRetention batchRetention, final boolean containsMarkerForEmptyTxn) { this.batchRetention = batchRetention; this.containsMarkerForEmptyTxn = containsMarkerForEmptyTxn; } } public enum BatchRetention { DELETE, // Delete the batch without inspecting records RETAIN_EMPTY, // Retain the batch even if it is empty DELETE_EMPTY // Delete the batch if it is empty } /** * Check whether the full batch can be discarded (i.e. whether we even need to * check the records individually). */ protected abstract BatchRetentionResult checkBatchRetention(RecordBatch batch); /** * Check whether a record should be retained in the log. Note that {@link #checkBatchRetention(RecordBatch)} * is used prior to checking individual record retention. Only records from batches which were not * explicitly discarded with {@link BatchRetention#DELETE} will be considered. */ protected abstract boolean shouldRetainRecord(RecordBatch recordBatch, Record record); } public static class FilterResult { private ByteBuffer outputBuffer; private int messagesRead = 0; // Note that `bytesRead` should contain only bytes from batches that have been processed, i.e. bytes from // `messagesRead` and any discarded batches. private int bytesRead = 0; private int messagesRetained = 0; private int bytesRetained = 0; private long maxOffset = -1L; private long maxTimestamp = RecordBatch.NO_TIMESTAMP; private long shallowOffsetOfMaxTimestamp = -1L; private FilterResult(ByteBuffer outputBuffer) { this.outputBuffer = outputBuffer; } private void updateRetainedBatchMetadata(MutableRecordBatch retainedBatch, int numMessagesInBatch, boolean headerOnly) { int bytesRetained = headerOnly ? DefaultRecordBatch.RECORD_BATCH_OVERHEAD : retainedBatch.sizeInBytes(); updateRetainedBatchMetadata(retainedBatch.maxTimestamp(), retainedBatch.lastOffset(), retainedBatch.lastOffset(), numMessagesInBatch, bytesRetained); } private void updateRetainedBatchMetadata(long maxTimestamp, long shallowOffsetOfMaxTimestamp, long maxOffset, int messagesRetained, int bytesRetained) { validateBatchMetadata(maxTimestamp, shallowOffsetOfMaxTimestamp, maxOffset); if (maxTimestamp > this.maxTimestamp) { this.maxTimestamp = maxTimestamp; this.shallowOffsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp; } this.maxOffset = Math.max(maxOffset, this.maxOffset); this.messagesRetained += messagesRetained; this.bytesRetained += bytesRetained; } private void validateBatchMetadata(long maxTimestamp, long shallowOffsetOfMaxTimestamp, long maxOffset) { if (maxTimestamp != RecordBatch.NO_TIMESTAMP && shallowOffsetOfMaxTimestamp < 0) throw new IllegalArgumentException("shallowOffset undefined for maximum timestamp " + maxTimestamp); if (maxOffset < 0) throw new IllegalArgumentException("maxOffset undefined"); } public ByteBuffer outputBuffer() { return outputBuffer; } public int messagesRead() { return messagesRead; } public int bytesRead() { return bytesRead; } public int messagesRetained() { return messagesRetained; } public int bytesRetained() { return bytesRetained; } public long maxOffset() { return maxOffset; } public long maxTimestamp() { return maxTimestamp; } public long shallowOffsetOfMaxTimestamp() { return shallowOffsetOfMaxTimestamp; } } public static MemoryRecords readableRecords(ByteBuffer buffer) { return new MemoryRecords(buffer); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset, int maxSize) { long logAppendTime = RecordBatch.NO_TIMESTAMP; if (timestampType == TimestampType.LOG_APPEND_TIME) logAppendTime = System.currentTimeMillis(); return new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset, logAppendTime, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, false, RecordBatch.NO_PARTITION_LEADER_EPOCH, maxSize); } public static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer, CompressionType compressionType, long baseOffset, long producerId, short producerEpoch, int baseSequence) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, baseOffset, System.currentTimeMillis(), producerId, producerEpoch, baseSequence); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, CompressionType compressionType, TimestampType timestampType, long baseOffset, long logAppendTime) { return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, RecordBatch.NO_PARTITION_LEADER_EPOCH); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, CompressionType compressionType, TimestampType timestampType, long baseOffset) { long logAppendTime = RecordBatch.NO_TIMESTAMP; if (timestampType == TimestampType.LOG_APPEND_TIME) logAppendTime = System.currentTimeMillis(); return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, RecordBatch.NO_PARTITION_LEADER_EPOCH); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, CompressionType compressionType, TimestampType timestampType, long baseOffset, long logAppendTime, int partitionLeaderEpoch) { return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, partitionLeaderEpoch); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, long baseOffset, long producerId, short producerEpoch, int baseSequence, boolean isTransactional) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, baseOffset, RecordBatch.NO_TIMESTAMP, producerId, producerEpoch, baseSequence, isTransactional, RecordBatch.NO_PARTITION_LEADER_EPOCH); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, CompressionType compressionType, TimestampType timestampType, long baseOffset, long logAppendTime, long producerId, short producerEpoch, int baseSequence) { return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, false, RecordBatch.NO_PARTITION_LEADER_EPOCH); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, CompressionType compressionType, TimestampType timestampType, long baseOffset, long logAppendTime, long producerId, short producerEpoch, int baseSequence, boolean isTransactional, int partitionLeaderEpoch) { return builder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, false, partitionLeaderEpoch); } public static MemoryRecordsBuilder builder(ByteBuffer buffer, byte magic, CompressionType compressionType, TimestampType timestampType, long baseOffset, long logAppendTime, long producerId, short producerEpoch, int baseSequence, boolean isTransactional, boolean isControlBatch, int partitionLeaderEpoch) { return new MemoryRecordsBuilder(buffer, magic, compressionType, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, partitionLeaderEpoch, buffer.remaining()); } public static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records) { return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, compressionType, records); } public static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records) { return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compressionType, TimestampType.CREATE_TIME, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, partitionLeaderEpoch, false, records); } public static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records) { return withRecords(magic, 0L, compressionType, TimestampType.CREATE_TIME, records); } public static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records) { return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, TimestampType.CREATE_TIME, records); } public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, SimpleRecord... records) { return withRecords(magic, initialOffset, compressionType, TimestampType.CREATE_TIME, records); } public static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records) { return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, TimestampType.CREATE_TIME, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, partitionLeaderEpoch, false, records); } public static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId, short producerEpoch, int baseSequence, SimpleRecord... records) { return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, RecordBatch.NO_PARTITION_LEADER_EPOCH, false, records); } public static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, SimpleRecord... records) { return withRecords(magic, initialOffset, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, false, records); } public static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, SimpleRecord... records) { return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, false, records); } public static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId, short producerEpoch, int baseSequence, SimpleRecord... records) { return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, RecordBatch.NO_PARTITION_LEADER_EPOCH, true, records); } public static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, SimpleRecord... records) { return withRecords(magic, initialOffset, compressionType, TimestampType.CREATE_TIME, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, true, records); } public static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, SimpleRecord... records) { return withTransactionalRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compressionType, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, records); } public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, TimestampType timestampType, SimpleRecord... records) { return withRecords(magic, initialOffset, compressionType, timestampType, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, RecordBatch.NO_PARTITION_LEADER_EPOCH, false, records); } public static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType, TimestampType timestampType, long producerId, short producerEpoch, int baseSequence, int partitionLeaderEpoch, boolean isTransactional, SimpleRecord... records) { if (records.length == 0) return MemoryRecords.EMPTY; int sizeEstimate = AbstractRecords.estimateSizeInBytes(magic, compressionType, Arrays.asList(records)); ByteBufferOutputStream bufferStream = new ByteBufferOutputStream(sizeEstimate); long logAppendTime = RecordBatch.NO_TIMESTAMP; if (timestampType == TimestampType.LOG_APPEND_TIME) logAppendTime = System.currentTimeMillis(); MemoryRecordsBuilder builder = new MemoryRecordsBuilder(bufferStream, magic, compressionType, timestampType, initialOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, false, partitionLeaderEpoch, sizeEstimate); for (SimpleRecord record : records) builder.append(record); return builder.build(); } public static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker) { return withEndTransactionMarker(0L, System.currentTimeMillis(), RecordBatch.NO_PARTITION_LEADER_EPOCH, producerId, producerEpoch, marker); } public static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch, EndTransactionMarker marker) { return withEndTransactionMarker(0L, timestamp, RecordBatch.NO_PARTITION_LEADER_EPOCH, producerId, producerEpoch, marker); } public static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch, long producerId, short producerEpoch, EndTransactionMarker marker) { int endTxnMarkerBatchSize = DefaultRecordBatch.RECORD_BATCH_OVERHEAD + EndTransactionMarker.CURRENT_END_TXN_SCHEMA_RECORD_SIZE; ByteBuffer buffer = ByteBuffer.allocate(endTxnMarkerBatchSize); writeEndTransactionalMarker(buffer, initialOffset, timestamp, partitionLeaderEpoch, producerId, producerEpoch, marker); buffer.flip(); return MemoryRecords.readableRecords(buffer); } public static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp, int partitionLeaderEpoch, long producerId, short producerEpoch, EndTransactionMarker marker) { boolean isTransactional = true; try (MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, initialOffset, timestamp, producerId, producerEpoch, RecordBatch.NO_SEQUENCE, isTransactional, true, partitionLeaderEpoch, buffer.capacity()) ) { builder.appendEndTxnMarker(timestamp, marker); } } public static MemoryRecords withLeaderChangeMessage( long initialOffset, long timestamp, int leaderEpoch, ByteBuffer buffer, LeaderChangeMessage leaderChangeMessage ) { writeLeaderChangeMessage(buffer, initialOffset, timestamp, leaderEpoch, leaderChangeMessage); buffer.flip(); return MemoryRecords.readableRecords(buffer); } private static void writeLeaderChangeMessage(ByteBuffer buffer, long initialOffset, long timestamp, int leaderEpoch, LeaderChangeMessage leaderChangeMessage) { try (MemoryRecordsBuilder builder = new MemoryRecordsBuilder( buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, initialOffset, timestamp, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, true, leaderEpoch, buffer.capacity()) ) { builder.appendLeaderChangeMessage(timestamp, leaderChangeMessage); } } public static MemoryRecords withSnapshotHeaderRecord( long initialOffset, long timestamp, int leaderEpoch, ByteBuffer buffer, SnapshotHeaderRecord snapshotHeaderRecord ) { writeSnapshotHeaderRecord(buffer, initialOffset, timestamp, leaderEpoch, snapshotHeaderRecord); buffer.flip(); return MemoryRecords.readableRecords(buffer); } private static void writeSnapshotHeaderRecord(ByteBuffer buffer, long initialOffset, long timestamp, int leaderEpoch, SnapshotHeaderRecord snapshotHeaderRecord ) { try (MemoryRecordsBuilder builder = new MemoryRecordsBuilder( buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, initialOffset, timestamp, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, true, leaderEpoch, buffer.capacity()) ) { builder.appendSnapshotHeaderMessage(timestamp, snapshotHeaderRecord); } } public static MemoryRecords withSnapshotFooterRecord( long initialOffset, long timestamp, int leaderEpoch, ByteBuffer buffer, SnapshotFooterRecord snapshotFooterRecord ) { writeSnapshotFooterRecord(buffer, initialOffset, timestamp, leaderEpoch, snapshotFooterRecord); buffer.flip(); return MemoryRecords.readableRecords(buffer); } private static void writeSnapshotFooterRecord(ByteBuffer buffer, long initialOffset, long timestamp, int leaderEpoch, SnapshotFooterRecord snapshotFooterRecord ) { try (MemoryRecordsBuilder builder = new MemoryRecordsBuilder( buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, initialOffset, timestamp, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, true, leaderEpoch, buffer.capacity()) ) { builder.appendSnapshotFooterMessage(timestamp, snapshotFooterRecord); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/MemoryRecordsBuilder.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.message.LeaderChangeMessage; import org.apache.kafka.common.message.SnapshotHeaderRecord; import org.apache.kafka.common.message.SnapshotFooterRecord; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.types.Struct; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.apache.kafka.common.utils.Utils; import java.io.DataOutputStream; import java.io.IOException; import java.io.OutputStream; import java.nio.ByteBuffer; import static org.apache.kafka.common.utils.Utils.wrapNullable; /** * This class is used to write new log data in memory, i.e. this is the write path for {@link MemoryRecords}. * It transparently handles compression and exposes methods for appending new records, possibly with message * format conversion. * * In cases where keeping memory retention low is important and there's a gap between the time that record appends stop * and the builder is closed (e.g. the Producer), it's important to call `closeForRecordAppends` when the former happens. * This will release resources like compression buffers that can be relatively large (64 KB for LZ4). */ public class MemoryRecordsBuilder implements AutoCloseable { private static final float COMPRESSION_RATE_ESTIMATION_FACTOR = 1.05f; private static final DataOutputStream CLOSED_STREAM = new DataOutputStream(new OutputStream() { @Override public void write(int b) { throw new IllegalStateException("MemoryRecordsBuilder is closed for record appends"); } }); private final TimestampType timestampType; private final CompressionType compressionType; // Used to hold a reference to the underlying ByteBuffer so that we can write the record batch header and access // the written bytes. ByteBufferOutputStream allocates a new ByteBuffer if the existing one is not large enough, // so it's not safe to hold a direct reference to the underlying ByteBuffer. private final ByteBufferOutputStream bufferStream; private final byte magic; private final int initialPosition; private final long baseOffset; private final long logAppendTime; private final boolean isControlBatch; private final int partitionLeaderEpoch; private final int writeLimit; private final int batchHeaderSizeInBytes; // Use a conservative estimate of the compression ratio. The producer overrides this using statistics // from previous batches before appending any records. private float estimatedCompressionRatio = 1.0F; // Used to append records, may compress data on the fly private DataOutputStream appendStream; private boolean isTransactional; private long producerId; private short producerEpoch; private int baseSequence; private int uncompressedRecordsSizeInBytes = 0; // Number of bytes (excluding the header) written before compression private int numRecords = 0; private float actualCompressionRatio = 1; private long maxTimestamp = RecordBatch.NO_TIMESTAMP; private long deleteHorizonMs; private long offsetOfMaxTimestamp = -1; private Long lastOffset = null; private Long baseTimestamp = null; private MemoryRecords builtRecords; private boolean aborted = false; public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, byte magic, CompressionType compressionType, TimestampType timestampType, long baseOffset, long logAppendTime, long producerId, short producerEpoch, int baseSequence, boolean isTransactional, boolean isControlBatch, int partitionLeaderEpoch, int writeLimit, long deleteHorizonMs) { if (magic > RecordBatch.MAGIC_VALUE_V0 && timestampType == TimestampType.NO_TIMESTAMP_TYPE) throw new IllegalArgumentException("TimestampType must be set for magic >= 0"); if (magic < RecordBatch.MAGIC_VALUE_V2) { if (isTransactional) throw new IllegalArgumentException("Transactional records are not supported for magic " + magic); if (isControlBatch) throw new IllegalArgumentException("Control records are not supported for magic " + magic); if (compressionType == CompressionType.ZSTD) throw new IllegalArgumentException("ZStandard compression is not supported for magic " + magic); if (deleteHorizonMs != RecordBatch.NO_TIMESTAMP) throw new IllegalArgumentException("Delete horizon timestamp is not supported for magic " + magic); } this.magic = magic; this.timestampType = timestampType; this.compressionType = compressionType; this.baseOffset = baseOffset; this.logAppendTime = logAppendTime; this.numRecords = 0; this.uncompressedRecordsSizeInBytes = 0; this.actualCompressionRatio = 1; this.maxTimestamp = RecordBatch.NO_TIMESTAMP; this.producerId = producerId; this.producerEpoch = producerEpoch; this.baseSequence = baseSequence; this.isTransactional = isTransactional; this.isControlBatch = isControlBatch; this.deleteHorizonMs = deleteHorizonMs; this.partitionLeaderEpoch = partitionLeaderEpoch; this.writeLimit = writeLimit; this.initialPosition = bufferStream.position(); this.batchHeaderSizeInBytes = AbstractRecords.recordBatchHeaderSizeInBytes(magic, compressionType); bufferStream.position(initialPosition + batchHeaderSizeInBytes); this.bufferStream = bufferStream; this.appendStream = new DataOutputStream(compressionType.wrapForOutput(this.bufferStream, magic)); if (hasDeleteHorizonMs()) { this.baseTimestamp = deleteHorizonMs; } } public MemoryRecordsBuilder(ByteBufferOutputStream bufferStream, byte magic, CompressionType compressionType, TimestampType timestampType, long baseOffset, long logAppendTime, long producerId, short producerEpoch, int baseSequence, boolean isTransactional, boolean isControlBatch, int partitionLeaderEpoch, int writeLimit) { this(bufferStream, magic, compressionType, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, partitionLeaderEpoch, writeLimit, RecordBatch.NO_TIMESTAMP); } /** * Construct a new builder. * * @param buffer The underlying buffer to use (note that this class will allocate a new buffer if necessary * to fit the records appended) * @param magic The magic value to use * @param compressionType The compression codec to use * @param timestampType The desired timestamp type. For magic > 0, this cannot be {@link TimestampType#NO_TIMESTAMP_TYPE}. * @param baseOffset The initial offset to use for * @param logAppendTime The log append time of this record set. Can be set to NO_TIMESTAMP if CREATE_TIME is used. * @param producerId The producer ID associated with the producer writing this record set * @param producerEpoch The epoch of the producer * @param baseSequence The sequence number of the first record in this set * @param isTransactional Whether or not the records are part of a transaction * @param isControlBatch Whether or not this is a control batch (e.g. for transaction markers) * @param partitionLeaderEpoch The epoch of the partition leader appending the record set to the log * @param writeLimit The desired limit on the total bytes for this record set (note that this can be exceeded * when compression is used since size estimates are rough, and in the case that the first * record added exceeds the size). */ public MemoryRecordsBuilder(ByteBuffer buffer, byte magic, CompressionType compressionType, TimestampType timestampType, long baseOffset, long logAppendTime, long producerId, short producerEpoch, int baseSequence, boolean isTransactional, boolean isControlBatch, int partitionLeaderEpoch, int writeLimit) { this(new ByteBufferOutputStream(buffer), magic, compressionType, timestampType, baseOffset, logAppendTime, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, partitionLeaderEpoch, writeLimit); } public ByteBuffer buffer() { return bufferStream.buffer(); } public int initialCapacity() { return bufferStream.initialCapacity(); } public double compressionRatio() { return actualCompressionRatio; } public CompressionType compressionType() { return compressionType; } public boolean isControlBatch() { return isControlBatch; } public boolean isTransactional() { return isTransactional; } public boolean hasDeleteHorizonMs() { return magic >= RecordBatch.MAGIC_VALUE_V2 && deleteHorizonMs >= 0L; } /** * Close this builder and return the resulting buffer. * @return The built log buffer */ public MemoryRecords build() { if (aborted) { throw new IllegalStateException("Attempting to build an aborted record batch"); } close(); return builtRecords; } /** * Get the max timestamp and its offset. The details of the offset returned are a bit subtle. * * If the log append time is used, the offset will be the last offset unless no compression is used and * the message format version is 0 or 1, in which case, it will be the first offset. * * If create time is used, the offset will be the last offset unless no compression is used and the message * format version is 0 or 1, in which case, it will be the offset of the record with the max timestamp. * * @return The max timestamp and its offset */ public RecordsInfo info() { if (timestampType == TimestampType.LOG_APPEND_TIME) { long shallowOffsetOfMaxTimestamp; // Use the last offset when dealing with record batches if (compressionType != CompressionType.NONE || magic >= RecordBatch.MAGIC_VALUE_V2) shallowOffsetOfMaxTimestamp = lastOffset; else shallowOffsetOfMaxTimestamp = baseOffset; return new RecordsInfo(logAppendTime, shallowOffsetOfMaxTimestamp); } else if (maxTimestamp == RecordBatch.NO_TIMESTAMP) { return new RecordsInfo(RecordBatch.NO_TIMESTAMP, lastOffset); } else { long shallowOffsetOfMaxTimestamp; // Use the last offset when dealing with record batches if (compressionType != CompressionType.NONE || magic >= RecordBatch.MAGIC_VALUE_V2) shallowOffsetOfMaxTimestamp = lastOffset; else shallowOffsetOfMaxTimestamp = offsetOfMaxTimestamp; return new RecordsInfo(maxTimestamp, shallowOffsetOfMaxTimestamp); } } public int numRecords() { return numRecords; } /** * Return the sum of the size of the batch header (always uncompressed) and the records (before compression). */ public int uncompressedBytesWritten() { return uncompressedRecordsSizeInBytes + batchHeaderSizeInBytes; } public void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional) { if (isClosed()) { // Sequence numbers are assigned when the batch is closed while the accumulator is being drained. // If the resulting ProduceRequest to the partition leader failed for a retriable error, the batch will // be re queued. In this case, we should not attempt to set the state again, since changing the producerId and sequence // once a batch has been sent to the broker risks introducing duplicates. throw new IllegalStateException("Trying to set producer state of an already closed batch. This indicates a bug on the client."); } this.producerId = producerId; this.producerEpoch = producerEpoch; this.baseSequence = baseSequence; this.isTransactional = isTransactional; } public void overrideLastOffset(long lastOffset) { if (builtRecords != null) throw new IllegalStateException("Cannot override the last offset after the records have been built"); this.lastOffset = lastOffset; } /** * Release resources required for record appends (e.g. compression buffers). Once this method is called, it's only * possible to update the RecordBatch header. */ public void closeForRecordAppends() { if (appendStream != CLOSED_STREAM) { try { appendStream.close(); } catch (IOException e) { throw new KafkaException(e); } finally { appendStream = CLOSED_STREAM; } } } public void abort() { closeForRecordAppends(); buffer().position(initialPosition); aborted = true; } public void reopenAndRewriteProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional) { if (aborted) throw new IllegalStateException("Should not reopen a batch which is already aborted."); builtRecords = null; this.producerId = producerId; this.producerEpoch = producerEpoch; this.baseSequence = baseSequence; this.isTransactional = isTransactional; } public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.uncompressedRecordsSizeInBytes; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.uncompressedRecordsSizeInBytes; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } private void validateProducerState() { if (isTransactional && producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("Cannot write transactional messages without a valid producer ID"); if (producerId != RecordBatch.NO_PRODUCER_ID) { if (producerEpoch == RecordBatch.NO_PRODUCER_EPOCH) throw new IllegalArgumentException("Invalid negative producer epoch"); if (baseSequence < 0 && !isControlBatch) throw new IllegalArgumentException("Invalid negative sequence number used"); if (magic < RecordBatch.MAGIC_VALUE_V2) throw new IllegalArgumentException("Idempotent messages are not supported for magic " + magic); } } /** * Write the header to the default batch. * @return the written compressed bytes. */ private int writeDefaultBatchHeader() { ensureOpenForRecordBatchWrite(); ByteBuffer buffer = bufferStream.buffer(); int pos = buffer.position(); buffer.position(initialPosition); int size = pos - initialPosition; int writtenCompressed = size - DefaultRecordBatch.RECORD_BATCH_OVERHEAD; int offsetDelta = (int) (lastOffset - baseOffset); final long maxTimestamp; if (timestampType == TimestampType.LOG_APPEND_TIME) maxTimestamp = logAppendTime; else maxTimestamp = this.maxTimestamp; DefaultRecordBatch.writeHeader(buffer, baseOffset, offsetDelta, size, magic, compressionType, timestampType, baseTimestamp, maxTimestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlBatch, hasDeleteHorizonMs(), partitionLeaderEpoch, numRecords); buffer.position(pos); return writtenCompressed; } /** * Write the header to the legacy batch. * @return the written compressed bytes. */ private int writeLegacyCompressedWrapperHeader() { ensureOpenForRecordBatchWrite(); ByteBuffer buffer = bufferStream.buffer(); int pos = buffer.position(); buffer.position(initialPosition); int wrapperSize = pos - initialPosition - Records.LOG_OVERHEAD; int writtenCompressed = wrapperSize - LegacyRecord.recordOverhead(magic); AbstractLegacyRecordBatch.writeHeader(buffer, lastOffset, wrapperSize); long timestamp = timestampType == TimestampType.LOG_APPEND_TIME ? logAppendTime : maxTimestamp; LegacyRecord.writeCompressedRecordHeader(buffer, magic, wrapperSize, timestamp, compressionType, timestampType); buffer.position(pos); return writtenCompressed; } /** * Append a new record at the given offset. */ private void appendWithOffset(long offset, boolean isControlRecord, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { try { if (isControlRecord != isControlBatch) throw new IllegalArgumentException("Control records can only be appended to control batches"); if (lastOffset != null && offset <= lastOffset) throw new IllegalArgumentException(String.format("Illegal offset %s following previous offset %s " + "(Offsets must increase monotonically).", offset, lastOffset)); if (timestamp < 0 && timestamp != RecordBatch.NO_TIMESTAMP) throw new IllegalArgumentException("Invalid negative timestamp " + timestamp); if (magic < RecordBatch.MAGIC_VALUE_V2 && headers != null && headers.length > 0) throw new IllegalArgumentException("Magic v" + magic + " does not support record headers"); if (baseTimestamp == null) baseTimestamp = timestamp; if (magic > RecordBatch.MAGIC_VALUE_V1) { appendDefaultRecord(offset, timestamp, key, value, headers); } else { appendLegacyRecord(offset, timestamp, key, value, magic); } } catch (IOException e) { throw new KafkaException("I/O exception when writing to the append stream, closing", e); } } /** * Append a new record at the given offset. * @param offset The absolute offset of the record in the log buffer * @param timestamp The record timestamp * @param key The record key * @param value The record value * @param headers The record headers if there are any */ public void appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers) { appendWithOffset(offset, false, timestamp, wrapNullable(key), wrapNullable(value), headers); } /** * Append a new record at the given offset. * @param offset The absolute offset of the record in the log buffer * @param timestamp The record timestamp * @param key The record key * @param value The record value * @param headers The record headers if there are any */ public void appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { appendWithOffset(offset, false, timestamp, key, value, headers); } /** * Append a new record at the given offset. * @param offset The absolute offset of the record in the log buffer * @param timestamp The record timestamp * @param key The record key * @param value The record value */ public void appendWithOffset(long offset, long timestamp, byte[] key, byte[] value) { appendWithOffset(offset, timestamp, wrapNullable(key), wrapNullable(value), Record.EMPTY_HEADERS); } /** * Append a new record at the given offset. * @param offset The absolute offset of the record in the log buffer * @param timestamp The record timestamp * @param key The record key * @param value The record value */ public void appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value) { appendWithOffset(offset, timestamp, key, value, Record.EMPTY_HEADERS); } /** * Append a new record at the given offset. * @param offset The absolute offset of the record in the log buffer * @param record The record to append */ public void appendWithOffset(long offset, SimpleRecord record) { appendWithOffset(offset, record.timestamp(), record.key(), record.value(), record.headers()); } /** * Append a control record at the given offset. The control record type must be known or * this method will raise an error. * * @param offset The absolute offset of the record in the log buffer * @param record The record to append */ public void appendControlRecordWithOffset(long offset, SimpleRecord record) { short typeId = ControlRecordType.parseTypeId(record.key()); ControlRecordType type = ControlRecordType.fromTypeId(typeId); if (type == ControlRecordType.UNKNOWN) throw new IllegalArgumentException("Cannot append record with unknown control record type " + typeId); appendWithOffset(offset, true, record.timestamp(), record.key(), record.value(), record.headers()); } /** * Append a new record at the next sequential offset. * @param timestamp The record timestamp * @param key The record key * @param value The record value */ public void append(long timestamp, ByteBuffer key, ByteBuffer value) { append(timestamp, key, value, Record.EMPTY_HEADERS); } /** * Append a new record at the next sequential offset. * @param timestamp The record timestamp * @param key The record key * @param value The record value * @param headers The record headers if there are any */ public void append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { appendWithOffset(nextSequentialOffset(), timestamp, key, value, headers); } /** * Append a new record at the next sequential offset. * @param timestamp The record timestamp * @param key The record key * @param value The record value */ public void append(long timestamp, byte[] key, byte[] value) { append(timestamp, wrapNullable(key), wrapNullable(value), Record.EMPTY_HEADERS); } /** * Append a new record at the next sequential offset. * @param timestamp The record timestamp * @param key The record key * @param value The record value * @param headers The record headers if there are any */ public void append(long timestamp, byte[] key, byte[] value, Header[] headers) { append(timestamp, wrapNullable(key), wrapNullable(value), headers); } /** * Append a new record at the next sequential offset. * @param record The record to append */ public void append(SimpleRecord record) { appendWithOffset(nextSequentialOffset(), record); } /** * Append a control record at the next sequential offset. * @param timestamp The record timestamp * @param type The control record type (cannot be UNKNOWN) * @param value The control record value */ private void appendControlRecord(long timestamp, ControlRecordType type, ByteBuffer value) { Struct keyStruct = type.recordKey(); ByteBuffer key = ByteBuffer.allocate(keyStruct.sizeOf()); keyStruct.writeTo(key); key.flip(); appendWithOffset(nextSequentialOffset(), true, timestamp, key, value, Record.EMPTY_HEADERS); } public void appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); appendControlRecord(timestamp, marker.controlType(), value); } public void appendLeaderChangeMessage(long timestamp, LeaderChangeMessage leaderChangeMessage) { if (partitionLeaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH) { throw new IllegalArgumentException("Partition leader epoch must be valid, but get " + partitionLeaderEpoch); } appendControlRecord( timestamp, ControlRecordType.LEADER_CHANGE, MessageUtil.toByteBuffer(leaderChangeMessage, ControlRecordUtils.LEADER_CHANGE_CURRENT_VERSION) ); } public void appendSnapshotHeaderMessage(long timestamp, SnapshotHeaderRecord snapshotHeaderRecord) { appendControlRecord( timestamp, ControlRecordType.SNAPSHOT_HEADER, MessageUtil.toByteBuffer(snapshotHeaderRecord, ControlRecordUtils.SNAPSHOT_HEADER_CURRENT_VERSION) ); } public void appendSnapshotFooterMessage(long timestamp, SnapshotFooterRecord snapshotHeaderRecord) { appendControlRecord( timestamp, ControlRecordType.SNAPSHOT_FOOTER, MessageUtil.toByteBuffer(snapshotHeaderRecord, ControlRecordUtils.SNAPSHOT_FOOTER_CURRENT_VERSION) ); } /** * Add a legacy record without doing offset/magic validation (this should only be used in testing). * @param offset The offset of the record * @param record The record to add */ public void appendUncheckedWithOffset(long offset, LegacyRecord record) { ensureOpenForRecordAppend(); try { int size = record.sizeInBytes(); AbstractLegacyRecordBatch.writeHeader(appendStream, toInnerOffset(offset), size); ByteBuffer buffer = record.buffer().duplicate(); appendStream.write(buffer.array(), buffer.arrayOffset(), buffer.limit()); recordWritten(offset, record.timestamp(), size + Records.LOG_OVERHEAD); } catch (IOException e) { throw new KafkaException("I/O exception when writing to the append stream, closing", e); } } /** * Append a record without doing offset/magic validation (this should only be used in testing). * * @param offset The offset of the record * @param record The record to add */ public void appendUncheckedWithOffset(long offset, SimpleRecord record) throws IOException { if (magic >= RecordBatch.MAGIC_VALUE_V2) { int offsetDelta = (int) (offset - baseOffset); long timestamp = record.timestamp(); if (baseTimestamp == null) baseTimestamp = timestamp; int sizeInBytes = DefaultRecord.writeTo(appendStream, offsetDelta, timestamp - baseTimestamp, record.key(), record.value(), record.headers()); recordWritten(offset, timestamp, sizeInBytes); } else { LegacyRecord legacyRecord = LegacyRecord.create(magic, record.timestamp(), Utils.toNullableArray(record.key()), Utils.toNullableArray(record.value())); appendUncheckedWithOffset(offset, legacyRecord); } } /** * Append a record at the next sequential offset. * @param record the record to add */ public void append(Record record) { appendWithOffset(record.offset(), isControlBatch, record.timestamp(), record.key(), record.value(), record.headers()); } /** * Append a log record using a different offset * @param offset The offset of the record * @param record The record to add */ public void appendWithOffset(long offset, Record record) { appendWithOffset(offset, record.timestamp(), record.key(), record.value(), record.headers()); } /** * Add a record with a given offset. The record must have a magic which matches the magic use to * construct this builder and the offset must be greater than the last appended record. * @param offset The offset of the record * @param record The record to add */ public void appendWithOffset(long offset, LegacyRecord record) { appendWithOffset(offset, record.timestamp(), record.key(), record.value()); } /** * Append the record at the next consecutive offset. If no records have been appended yet, use the base * offset of this builder. * @param record The record to add */ public void append(LegacyRecord record) { appendWithOffset(nextSequentialOffset(), record); } private void appendDefaultRecord(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) throws IOException { ensureOpenForRecordAppend(); int offsetDelta = (int) (offset - baseOffset); long timestampDelta = timestamp - baseTimestamp; int sizeInBytes = DefaultRecord.writeTo(appendStream, offsetDelta, timestampDelta, key, value, headers); recordWritten(offset, timestamp, sizeInBytes); } private long appendLegacyRecord(long offset, long timestamp, ByteBuffer key, ByteBuffer value, byte magic) throws IOException { ensureOpenForRecordAppend(); if (compressionType == CompressionType.NONE && timestampType == TimestampType.LOG_APPEND_TIME) timestamp = logAppendTime; int size = LegacyRecord.recordSize(magic, key, value); AbstractLegacyRecordBatch.writeHeader(appendStream, toInnerOffset(offset), size); if (timestampType == TimestampType.LOG_APPEND_TIME) timestamp = logAppendTime; long crc = LegacyRecord.write(appendStream, magic, timestamp, key, value, CompressionType.NONE, timestampType); recordWritten(offset, timestamp, size + Records.LOG_OVERHEAD); return crc; } private long toInnerOffset(long offset) { // use relative offsets for compressed messages with magic v1 if (magic > 0 && compressionType != CompressionType.NONE) return offset - baseOffset; return offset; } private void recordWritten(long offset, long timestamp, int size) { if (numRecords == Integer.MAX_VALUE) throw new IllegalArgumentException("Maximum number of records per batch exceeded, max records: " + Integer.MAX_VALUE); if (offset - baseOffset > Integer.MAX_VALUE) throw new IllegalArgumentException("Maximum offset delta exceeded, base offset: " + baseOffset + ", last offset: " + offset); numRecords += 1; uncompressedRecordsSizeInBytes += size; lastOffset = offset; if (magic > RecordBatch.MAGIC_VALUE_V0 && timestamp > maxTimestamp) { maxTimestamp = timestamp; offsetOfMaxTimestamp = offset; } } private void ensureOpenForRecordAppend() { if (appendStream == CLOSED_STREAM) throw new IllegalStateException("Tried to append a record, but MemoryRecordsBuilder is closed for record appends"); } private void ensureOpenForRecordBatchWrite() { if (isClosed()) throw new IllegalStateException("Tried to write record batch header, but MemoryRecordsBuilder is closed"); if (aborted) throw new IllegalStateException("Tried to write record batch header, but MemoryRecordsBuilder is aborted"); } /** * Get an estimate of the number of bytes written (based on the estimation factor hard-coded in {@link CompressionType}. * @return The estimated number of bytes written */ private int estimatedBytesWritten() { if (compressionType == CompressionType.NONE) { return batchHeaderSizeInBytes + uncompressedRecordsSizeInBytes; } else { // estimate the written bytes to the underlying byte buffer based on uncompressed written bytes return batchHeaderSizeInBytes + (int) (uncompressedRecordsSizeInBytes * estimatedCompressionRatio * COMPRESSION_RATE_ESTIMATION_FACTOR); } } /** * Set the estimated compression ratio for the memory records builder. */ public void setEstimatedCompressionRatio(float estimatedCompressionRatio) { this.estimatedCompressionRatio = estimatedCompressionRatio; } /** * Check if we have room for a new record containing the given key/value pair. If no records have been * appended, then this returns true. */ public boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers) { return hasRoomFor(timestamp, wrapNullable(key), wrapNullable(value), headers); } /** * Check if we have room for a new record containing the given key/value pair. If no records have been * appended, then this returns true. * * Note that the return value is based on the estimate of the bytes written to the compressor, which may not be * accurate if compression is used. When this happens, the following append may cause dynamic buffer * re-allocation in the underlying byte buffer stream. */ public boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { if (isFull()) return false; // We always allow at least one record to be appended (the ByteBufferOutputStream will grow as needed) if (numRecords == 0) return true; final int recordSize; if (magic < RecordBatch.MAGIC_VALUE_V2) { recordSize = Records.LOG_OVERHEAD + LegacyRecord.recordSize(magic, key, value); } else { int nextOffsetDelta = lastOffset == null ? 0 : (int) (lastOffset - baseOffset + 1); long timestampDelta = baseTimestamp == null ? 0 : timestamp - baseTimestamp; recordSize = DefaultRecord.sizeInBytes(nextOffsetDelta, timestampDelta, key, value, headers); } // Be conservative and not take compression of the new record into consideration. return this.writeLimit >= estimatedBytesWritten() + recordSize; } public boolean isClosed() { return builtRecords != null; } public boolean isFull() { // note that the write limit is respected only after the first record is added which ensures we can always // create non-empty batches (this is used to disable batching when the producer's batch size is set to 0). return appendStream == CLOSED_STREAM || (this.numRecords > 0 && this.writeLimit <= estimatedBytesWritten()); } /** * Get an estimate of the number of bytes written to the underlying buffer. The returned value * is exactly correct if the record set is not compressed or if the builder has been closed. */ public int estimatedSizeInBytes() { return builtRecords != null ? builtRecords.sizeInBytes() : estimatedBytesWritten(); } public byte magic() { return magic; } private long nextSequentialOffset() { return lastOffset == null ? baseOffset : lastOffset + 1; } public static class RecordsInfo { public final long maxTimestamp; public final long shallowOffsetOfMaxTimestamp; public RecordsInfo(long maxTimestamp, long shallowOffsetOfMaxTimestamp) { this.maxTimestamp = maxTimestamp; this.shallowOffsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp; } } /** * Return the producer id of the RecordBatches created by this builder. */ public long producerId() { return this.producerId; } public short producerEpoch() { return this.producerEpoch; } public int baseSequence() { return this.baseSequence; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/MultiRecordsSend.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.network.Send; import org.apache.kafka.common.network.TransferableChannel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Queue; /** * A set of composite sends with nested {@link RecordsSend}, sent one after another */ public class MultiRecordsSend implements Send { private static final Logger log = LoggerFactory.getLogger(MultiRecordsSend.class); private final Queue<Send> sendQueue; private final long size; private Map<TopicPartition, RecordConversionStats> recordConversionStats; private long totalWritten = 0; private Send current; /** * Construct a MultiRecordsSend from a queue of Send objects. The queue will be consumed as the MultiRecordsSend * progresses (on completion, it will be empty). */ public MultiRecordsSend(Queue<Send> sends) { this.sendQueue = sends; long size = 0; for (Send send : sends) size += send.size(); this.size = size; this.current = sendQueue.poll(); } public MultiRecordsSend(Queue<Send> sends, long size) { this.sendQueue = sends; this.size = size; this.current = sendQueue.poll(); } @Override public long size() { return size; } @Override public boolean completed() { return current == null; } // Visible for testing int numResidentSends() { int count = 0; if (current != null) count += 1; count += sendQueue.size(); return count; } @Override public long writeTo(TransferableChannel channel) throws IOException { if (completed()) throw new KafkaException("This operation cannot be invoked on a complete request."); int totalWrittenPerCall = 0; boolean sendComplete; do { long written = current.writeTo(channel); totalWrittenPerCall += written; sendComplete = current.completed(); if (sendComplete) { updateRecordConversionStats(current); current = sendQueue.poll(); } } while (!completed() && sendComplete); totalWritten += totalWrittenPerCall; if (completed() && totalWritten != size) log.error("mismatch in sending bytes over socket; expected: {} actual: {}", size, totalWritten); log.trace("Bytes written as part of multi-send call: {}, total bytes written so far: {}, expected bytes to write: {}", totalWrittenPerCall, totalWritten, size); return totalWrittenPerCall; } /** * Get any statistics that were recorded as part of executing this {@link MultiRecordsSend}. * @return Records processing statistics (could be null if no statistics were collected) */ public Map<TopicPartition, RecordConversionStats> recordConversionStats() { return recordConversionStats; } @Override public String toString() { return "MultiRecordsSend(" + "size=" + size + ", totalWritten=" + totalWritten + ')'; } private void updateRecordConversionStats(Send completedSend) { // The underlying send might have accumulated statistics that need to be recorded. For example, // LazyDownConversionRecordsSend accumulates statistics related to the number of bytes down-converted, the amount // of temporary memory used for down-conversion, etc. Pull out any such statistics from the underlying send // and fold it up appropriately. if (completedSend instanceof LazyDownConversionRecordsSend) { if (recordConversionStats == null) recordConversionStats = new HashMap<>(); LazyDownConversionRecordsSend lazyRecordsSend = (LazyDownConversionRecordsSend) completedSend; recordConversionStats.put(lazyRecordsSend.topicPartition(), lazyRecordsSend.recordConversionStats()); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/MutableRecordBatch.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.apache.kafka.common.utils.CloseableIterator; /** * A mutable record batch is one that can be modified in place (without copying). This is used by the broker * to override certain fields in the batch before appending it to the log. */ public interface MutableRecordBatch extends RecordBatch { /** * Set the last offset of this batch. * @param offset The last offset to use */ void setLastOffset(long offset); /** * Set the max timestamp for this batch. When using log append time, this effectively overrides the individual * timestamps of all the records contained in the batch. To avoid recompression, the record fields are not updated * by this method, but clients ignore them if the timestamp time is log append time. Note that baseTimestamp is not * updated by this method. * * This typically requires re-computation of the batch's CRC. * * @param timestampType The timestamp type * @param maxTimestamp The maximum timestamp */ void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); /** * Set the partition leader epoch for this batch of records. * @param epoch The partition leader epoch to use */ void setPartitionLeaderEpoch(int epoch); /** * Write this record batch into an output stream. * @param outputStream The buffer to write the batch to */ void writeTo(ByteBufferOutputStream outputStream); /** * Return an iterator which skips parsing key, value and headers from the record stream, and therefore the resulted * {@code org.apache.kafka.common.record.Record}'s key and value fields would be empty. This iterator is used * when the read record's key and value are not needed and hence can save some byte buffer allocating / GC overhead. * * @return The closeable iterator */ CloseableIterator<Record> skipKeyValueIterator(BufferSupplier bufferSupplier); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/PartialDefaultRecord.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.header.Header; import java.nio.ByteBuffer; public class PartialDefaultRecord extends DefaultRecord { private final int keySize; private final int valueSize; PartialDefaultRecord(int sizeInBytes, byte attributes, long offset, long timestamp, int sequence, int keySize, int valueSize) { super(sizeInBytes, attributes, offset, timestamp, sequence, null, null, null); this.keySize = keySize; this.valueSize = valueSize; } @Override public boolean equals(Object o) { return super.equals(o) && this.keySize == ((PartialDefaultRecord) o).keySize && this.valueSize == ((PartialDefaultRecord) o).valueSize; } @Override public int hashCode() { int result = super.hashCode(); result = 31 * result + keySize; result = 31 * result + valueSize; return result; } @Override public String toString() { return String.format("PartialDefaultRecord(offset=%d, timestamp=%d, key=%d bytes, value=%d bytes)", offset(), timestamp(), keySize, valueSize); } @Override public int keySize() { return keySize; } @Override public boolean hasKey() { return keySize >= 0; } @Override public ByteBuffer key() { throw new UnsupportedOperationException("key is skipped in PartialDefaultRecord"); } @Override public int valueSize() { return valueSize; } @Override public boolean hasValue() { return valueSize >= 0; } @Override public ByteBuffer value() { throw new UnsupportedOperationException("value is skipped in PartialDefaultRecord"); } @Override public Header[] headers() { throw new UnsupportedOperationException("headers is skipped in PartialDefaultRecord"); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/Record.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import java.nio.ByteBuffer; import org.apache.kafka.common.header.Header; /** * A log record is a tuple consisting of a unique offset in the log, a sequence number assigned by * the producer, a timestamp, a key and a value. */ public interface Record { Header[] EMPTY_HEADERS = new Header[0]; /** * The offset of this record in the log * @return the offset */ long offset(); /** * Get the sequence number assigned by the producer. * @return the sequence number */ int sequence(); /** * Get the size in bytes of this record. * @return the size of the record in bytes */ int sizeInBytes(); /** * Get the record's timestamp. * @return the record's timestamp */ long timestamp(); /** * Raise a {@link org.apache.kafka.common.errors.CorruptRecordException} if the record does not have a valid checksum. */ void ensureValid(); /** * Get the size in bytes of the key. * @return the size of the key, or -1 if there is no key */ int keySize(); /** * Check whether this record has a key * @return true if there is a key, false otherwise */ boolean hasKey(); /** * Get the record's key. * @return the key or null if there is none */ ByteBuffer key(); /** * Get the size in bytes of the value. * @return the size of the value, or -1 if the value is null */ int valueSize(); /** * Check whether a value is present (i.e. if the value is not null) * @return true if so, false otherwise */ boolean hasValue(); /** * Get the record's value * @return the (nullable) value */ ByteBuffer value(); /** * Check whether the record has a particular magic. For versions prior to 2, the record contains its own magic, * so this function can be used to check whether it matches a particular value. For version 2 and above, this * method returns true if the passed magic is greater than or equal to 2. * * @param magic the magic value to check * @return true if the record has a magic field (versions prior to 2) and the value matches */ boolean hasMagic(byte magic); /** * For versions prior to 2, check whether the record is compressed (and therefore * has nested record content). For versions 2 and above, this always returns false. * @return true if the magic is lower than 2 and the record is compressed */ boolean isCompressed(); /** * For versions prior to 2, the record contained a timestamp type attribute. This method can be * used to check whether the value of that attribute matches a particular timestamp type. For versions * 2 and above, this will always be false. * * @param timestampType the timestamp type to compare * @return true if the version is lower than 2 and the timestamp type matches */ boolean hasTimestampType(TimestampType timestampType); /** * Get the headers. For magic versions 1 and below, this always returns an empty array. * * @return the array of headers */ Header[] headers(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/RecordBatch.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.CloseableIterator; import java.nio.ByteBuffer; import java.util.Iterator; import java.util.OptionalLong; /** * A record batch is a container for records. In old versions of the record format (versions 0 and 1), * a batch consisted always of a single record if no compression was enabled, but could contain * many records otherwise. Newer versions (magic versions 2 and above) will generally contain many records * regardless of compression. */ public interface RecordBatch extends Iterable<Record> { /** * The "magic" values */ byte MAGIC_VALUE_V0 = 0; byte MAGIC_VALUE_V1 = 1; byte MAGIC_VALUE_V2 = 2; /** * The current "magic" value */ byte CURRENT_MAGIC_VALUE = MAGIC_VALUE_V2; /** * Timestamp value for records without a timestamp */ long NO_TIMESTAMP = -1L; /** * Values used in the v2 record format by non-idempotent/non-transactional producers or when * up-converting from an older format. */ long NO_PRODUCER_ID = -1L; short NO_PRODUCER_EPOCH = -1; int NO_SEQUENCE = -1; /** * Used to indicate an unknown leader epoch, which will be the case when the record set is * first created by the producer. */ int NO_PARTITION_LEADER_EPOCH = -1; /** * Check whether the checksum of this batch is correct. * * @return true If so, false otherwise */ boolean isValid(); /** * Raise an exception if the checksum is not valid. */ void ensureValid(); /** * Get the checksum of this record batch, which covers the batch header as well as all of the records. * * @return The 4-byte unsigned checksum represented as a long */ long checksum(); /** * Get the max timestamp or log append time of this record batch. * * If the timestamp type is create time, this is the max timestamp among all records contained in this batch and * the value is updated during compaction. * * @return The max timestamp */ long maxTimestamp(); /** * Get the timestamp type of this record batch. This will be {@link TimestampType#NO_TIMESTAMP_TYPE} * if the batch has magic 0. * * @return The timestamp type */ TimestampType timestampType(); /** * Get the base offset contained in this record batch. For magic version prior to 2, the base offset will * always be the offset of the first message in the batch. This generally requires deep iteration and will * return the offset of the first record in the record batch. For magic version 2 and above, this will return * the first offset of the original record batch (i.e. prior to compaction). For non-compacted topics, the * behavior is equivalent. * * Because this requires deep iteration for older magic versions, this method should be used with * caution. Generally {@link #lastOffset()} is safer since access is efficient for all magic versions. * * @return The base offset of this record batch (which may or may not be the offset of the first record * as described above). */ long baseOffset(); /** * Get the last offset in this record batch (inclusive). Just like {@link #baseOffset()}, the last offset * always reflects the offset of the last record in the original batch, even if it is removed during log * compaction. * * @return The offset of the last record in this batch */ long lastOffset(); /** * Get the offset following this record batch (i.e. the last offset contained in this batch plus one). * * @return the next consecutive offset following this batch */ long nextOffset(); /** * Get the record format version of this record batch (i.e its magic value). * * @return the magic byte */ byte magic(); /** * Get the producer id for this log record batch. For older magic versions, this will return -1. * * @return The producer id or -1 if there is none */ long producerId(); /** * Get the producer epoch for this log record batch. * * @return The producer epoch, or -1 if there is none */ short producerEpoch(); /** * Does the batch have a valid producer id set. */ boolean hasProducerId(); /** * Get the base sequence number of this record batch. Like {@link #baseOffset()}, this value is not * affected by compaction: it always retains the base sequence number from the original batch. * * @return The first sequence number or -1 if there is none */ int baseSequence(); /** * Get the last sequence number of this record batch. Like {@link #lastOffset()}, the last sequence number * always reflects the sequence number of the last record in the original batch, even if it is removed during log * compaction. * * @return The last sequence number or -1 if there is none */ int lastSequence(); /** * Get the compression type of this record batch. * * @return The compression type */ CompressionType compressionType(); /** * Get the size in bytes of this batch, including the size of the record and the batch overhead. * @return The size in bytes of this batch */ int sizeInBytes(); /** * Get the count if it is efficiently supported by the record format (which is only the case * for magic 2 and higher). * * @return The number of records in the batch or null for magic versions 0 and 1. */ Integer countOrNull(); /** * Check whether this record batch is compressed. * @return true if so, false otherwise */ boolean isCompressed(); /** * Write this record batch into a buffer. * @param buffer The buffer to write the batch to */ void writeTo(ByteBuffer buffer); /** * Whether or not this record batch is part of a transaction. * @return true if it is, false otherwise */ boolean isTransactional(); /** * Get the delete horizon, returns OptionalLong.EMPTY if the first timestamp is not the delete horizon * @return timestamp of the delete horizon */ OptionalLong deleteHorizonMs(); /** * Get the partition leader epoch of this record batch. * @return The leader epoch or -1 if it is unknown */ int partitionLeaderEpoch(); /** * Return a streaming iterator which basically delays decompression of the record stream until the records * are actually asked for using {@link Iterator#next()}. If the message format does not support streaming * iteration, then the normal iterator is returned. Either way, callers should ensure that the iterator is closed. * * @param decompressionBufferSupplier The supplier of ByteBuffer(s) used for decompression if supported. * For small record batches, allocating a potentially large buffer (64 KB for LZ4) * will dominate the cost of decompressing and iterating over the records in the * batch. As such, a supplier that reuses buffers will have a significant * performance impact. * @return The closeable iterator */ CloseableIterator<Record> streamingIterator(BufferSupplier decompressionBufferSupplier); /** * Check whether this is a control batch (i.e. whether the control bit is set in the batch attributes). * For magic versions prior to 2, this is always false. * * @return Whether this is a batch containing control records */ boolean isControlBatch(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/RecordBatchIterator.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.utils.AbstractIterator; import java.io.EOFException; import java.io.IOException; class RecordBatchIterator<T extends RecordBatch> extends AbstractIterator<T> { private final LogInputStream<T> logInputStream; RecordBatchIterator(LogInputStream<T> logInputStream) { this.logInputStream = logInputStream; } @Override protected T makeNext() { try { T batch = logInputStream.nextBatch(); if (batch == null) return allDone(); return batch; } catch (EOFException e) { throw new CorruptRecordException("Unexpected EOF while attempting to read the next batch", e); } catch (IOException e) { throw new KafkaException(e); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/RecordConversionStats.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; public class RecordConversionStats { public static final RecordConversionStats EMPTY = new RecordConversionStats(); private long temporaryMemoryBytes; private int numRecordsConverted; private long conversionTimeNanos; public RecordConversionStats(long temporaryMemoryBytes, int numRecordsConverted, long conversionTimeNanos) { this.temporaryMemoryBytes = temporaryMemoryBytes; this.numRecordsConverted = numRecordsConverted; this.conversionTimeNanos = conversionTimeNanos; } public RecordConversionStats() { this(0, 0, 0); } public void add(RecordConversionStats stats) { temporaryMemoryBytes += stats.temporaryMemoryBytes; numRecordsConverted += stats.numRecordsConverted; conversionTimeNanos += stats.conversionTimeNanos; } /** * Returns the number of temporary memory bytes allocated to process the records. * This size depends on whether the records need decompression and/or conversion: * <ul> * <li>Non compressed, no conversion: zero</li> * <li>Non compressed, with conversion: size of the converted buffer</li> * <li>Compressed, no conversion: size of the original buffer after decompression</li> * <li>Compressed, with conversion: size of the original buffer after decompression + size of the converted buffer uncompressed</li> * </ul> */ public long temporaryMemoryBytes() { return temporaryMemoryBytes; } public int numRecordsConverted() { return numRecordsConverted; } public long conversionTimeNanos() { return conversionTimeNanos; } @Override public String toString() { return String.format("RecordConversionStats(temporaryMemoryBytes=%d, numRecordsConverted=%d, conversionTimeNanos=%d)", temporaryMemoryBytes, numRecordsConverted, conversionTimeNanos); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/RecordVersion.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; /** * Defines the record format versions supported by Kafka. * * For historical reasons, the record format version is also known as `magic` and `message format version`. Note that * the version actually applies to the {@link RecordBatch} (instead of the {@link Record}). Finally, the * `message.format.version` topic config confusingly expects an ApiVersion instead of a RecordVersion. */ public enum RecordVersion { V0(0), V1(1), V2(2); private static final RecordVersion[] VALUES = values(); public final byte value; RecordVersion(int value) { this.value = (byte) value; } /** * Check whether this version precedes another version. * * @return true only if the magic value is less than the other's */ public boolean precedes(RecordVersion other) { return this.value < other.value; } public static RecordVersion lookup(byte value) { if (value < 0 || value >= VALUES.length) throw new IllegalArgumentException("Unknown record version: " + value); return VALUES[value]; } public static RecordVersion current() { return V2; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/Records.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.utils.AbstractIterator; import org.apache.kafka.common.utils.Time; import java.util.Iterator; /** * Interface for accessing the records contained in a log. The log itself is represented as a sequence of record * batches (see {@link RecordBatch}). * * For magic versions 1 and below, each batch consists of an 8 byte offset, a 4 byte record size, and a "shallow" * {@link Record record}. If the batch is not compressed, then each batch will have only the shallow record contained * inside it. If it is compressed, the batch contains "deep" records, which are packed into the value field of the * shallow record. To iterate over the shallow batches, use {@link Records#batches()}; for the deep records, use * {@link Records#records()}. Note that the deep iterator handles both compressed and non-compressed batches: * if the batch is not compressed, the shallow record is returned; otherwise, the shallow batch is decompressed and the * deep records are returned. * * For magic version 2, every batch contains 1 or more log record, regardless of compression. You can iterate * over the batches directly using {@link Records#batches()}. Records can be iterated either directly from an individual * batch or through {@link Records#records()}. Just as in previous versions, iterating over the records typically involves * decompression and should therefore be used with caution. * * See {@link MemoryRecords} for the in-memory representation and {@link FileRecords} for the on-disk representation. */ public interface Records extends TransferableRecords { int OFFSET_OFFSET = 0; int OFFSET_LENGTH = 8; int SIZE_OFFSET = OFFSET_OFFSET + OFFSET_LENGTH; int SIZE_LENGTH = 4; int LOG_OVERHEAD = SIZE_OFFSET + SIZE_LENGTH; // The magic offset is at the same offset for all current message formats, but the 4 bytes // between the size and the magic is dependent on the version. int MAGIC_OFFSET = LOG_OVERHEAD + 4; int MAGIC_LENGTH = 1; int HEADER_SIZE_UP_TO_MAGIC = MAGIC_OFFSET + MAGIC_LENGTH; /** * Get the record batches. Note that the signature allows subclasses * to return a more specific batch type. This enables optimizations such as in-place offset * assignment (see for example {@link DefaultRecordBatch}), and partial reading of * record data (see {@link FileLogInputStream.FileChannelRecordBatch#magic()}. * @return An iterator over the record batches of the log */ Iterable<? extends RecordBatch> batches(); /** * Get an iterator over the record batches. This is similar to {@link #batches()} but returns an {@link AbstractIterator} * instead of {@link Iterator}, so that clients can use methods like {@link AbstractIterator#peek() peek}. * @return An iterator over the record batches of the log */ AbstractIterator<? extends RecordBatch> batchIterator(); /** * Check whether all batches in this buffer have a certain magic value. * @param magic The magic value to check * @return true if all record batches have a matching magic value, false otherwise */ boolean hasMatchingMagic(byte magic); /** * Convert all batches in this buffer to the format passed as a parameter. Note that this requires * deep iteration since all of the deep records must also be converted to the desired format. * @param toMagic The magic value to convert to * @param firstOffset The starting offset for returned records. This only impacts some cases. See * {@link RecordsUtil#downConvert(Iterable, byte, long, Time)} for an explanation. * @param time instance used for reporting stats * @return A ConvertedRecords instance which may or may not contain the same instance in its records field. */ ConvertedRecords<? extends Records> downConvert(byte toMagic, long firstOffset, Time time); /** * Get an iterator over the records in this log. Note that this generally requires decompression, * and should therefore be used with care. * @return The record iterator */ Iterable<Record> records(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/RecordsSend.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.network.Send; import org.apache.kafka.common.network.TransferableChannel; import java.io.EOFException; import java.io.IOException; import java.nio.ByteBuffer; public abstract class RecordsSend<T extends BaseRecords> implements Send { private static final ByteBuffer EMPTY_BYTE_BUFFER = ByteBuffer.allocate(0); private final T records; private final int maxBytesToWrite; private int remaining; private boolean pending = false; protected RecordsSend(T records, int maxBytesToWrite) { this.records = records; this.maxBytesToWrite = maxBytesToWrite; this.remaining = maxBytesToWrite; } @Override public boolean completed() { return remaining <= 0 && !pending; } @Override public final long writeTo(TransferableChannel channel) throws IOException { long written = 0; if (remaining > 0) { written = writeTo(channel, size() - remaining, remaining); if (written < 0) throw new EOFException("Wrote negative bytes to channel. This shouldn't happen."); remaining -= written; } pending = channel.hasPendingWrites(); if (remaining <= 0 && pending) channel.write(EMPTY_BYTE_BUFFER); return written; } @Override public long size() { return maxBytesToWrite; } protected T records() { return records; } /** * Write records up to `remaining` bytes to `channel`. The implementation is allowed to be stateful. The contract * from the caller is that the first invocation will be with `previouslyWritten` equal to 0, and `remaining` equal to * the to maximum bytes we want to write the to `channel`. `previouslyWritten` and `remaining` will be adjusted * appropriately for every subsequent invocation. See {@link #writeTo} for example expected usage. * @param channel The channel to write to * @param previouslyWritten Bytes written in previous calls to {@link #writeTo(TransferableChannel, long, int)}; 0 if being called for the first time * @param remaining Number of bytes remaining to be written * @return The number of bytes actually written * @throws IOException For any IO errors */ protected abstract long writeTo(TransferableChannel channel, long previouslyWritten, int remaining) throws IOException; }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/RecordsUtil.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; public class RecordsUtil { /** * Down convert batches to the provided message format version. The first offset parameter is only relevant in the * conversion from uncompressed v2 or higher to v1 or lower. The reason is that uncompressed records in v0 and v1 * are not batched (put another way, each batch always has 1 record). * * If a client requests records in v1 format starting from the middle of an uncompressed batch in v2 format, we * need to drop records from the batch during the conversion. Some versions of librdkafka rely on this for * correctness. * * The temporaryMemoryBytes computation assumes that the batches are not loaded into the heap * (via classes like FileChannelRecordBatch) before this method is called. This is the case in the broker (we * only load records into the heap when down converting), but it's not for the producer. However, down converting * in the producer is very uncommon and the extra complexity to handle that case is not worth it. */ protected static ConvertedRecords<MemoryRecords> downConvert(Iterable<? extends RecordBatch> batches, byte toMagic, long firstOffset, Time time) { // maintain the batch along with the decompressed records to avoid the need to decompress again List<RecordBatchAndRecords> recordBatchAndRecordsList = new ArrayList<>(); int totalSizeEstimate = 0; long startNanos = time.nanoseconds(); for (RecordBatch batch : batches) { if (toMagic < RecordBatch.MAGIC_VALUE_V2) { if (batch.isControlBatch()) continue; if (batch.compressionType() == CompressionType.ZSTD) throw new UnsupportedCompressionTypeException("Down-conversion of zstandard-compressed batches " + "is not supported"); } if (batch.magic() <= toMagic) { totalSizeEstimate += batch.sizeInBytes(); recordBatchAndRecordsList.add(new RecordBatchAndRecords(batch, null, null)); } else { List<Record> records = new ArrayList<>(); for (Record record : batch) { // See the method javadoc for an explanation if (toMagic > RecordBatch.MAGIC_VALUE_V1 || batch.isCompressed() || record.offset() >= firstOffset) records.add(record); } if (records.isEmpty()) continue; final long baseOffset; if (batch.magic() >= RecordBatch.MAGIC_VALUE_V2 && toMagic >= RecordBatch.MAGIC_VALUE_V2) baseOffset = batch.baseOffset(); else baseOffset = records.get(0).offset(); totalSizeEstimate += AbstractRecords.estimateSizeInBytes(toMagic, baseOffset, batch.compressionType(), records); recordBatchAndRecordsList.add(new RecordBatchAndRecords(batch, records, baseOffset)); } } ByteBuffer buffer = ByteBuffer.allocate(totalSizeEstimate); long temporaryMemoryBytes = 0; int numRecordsConverted = 0; for (RecordBatchAndRecords recordBatchAndRecords : recordBatchAndRecordsList) { temporaryMemoryBytes += recordBatchAndRecords.batch.sizeInBytes(); if (recordBatchAndRecords.batch.magic() <= toMagic) { buffer = Utils.ensureCapacity(buffer, buffer.position() + recordBatchAndRecords.batch.sizeInBytes()); recordBatchAndRecords.batch.writeTo(buffer); } else { MemoryRecordsBuilder builder = convertRecordBatch(toMagic, buffer, recordBatchAndRecords); buffer = builder.buffer(); temporaryMemoryBytes += builder.uncompressedBytesWritten(); numRecordsConverted += builder.numRecords(); } } buffer.flip(); RecordConversionStats stats = new RecordConversionStats(temporaryMemoryBytes, numRecordsConverted, time.nanoseconds() - startNanos); return new ConvertedRecords<>(MemoryRecords.readableRecords(buffer), stats); } /** * Return a buffer containing the converted record batches. The returned buffer may not be the same as the received * one (e.g. it may require expansion). */ private static MemoryRecordsBuilder convertRecordBatch(byte magic, ByteBuffer buffer, RecordBatchAndRecords recordBatchAndRecords) { RecordBatch batch = recordBatchAndRecords.batch; final TimestampType timestampType = batch.timestampType(); long logAppendTime = timestampType == TimestampType.LOG_APPEND_TIME ? batch.maxTimestamp() : RecordBatch.NO_TIMESTAMP; MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, batch.compressionType(), timestampType, recordBatchAndRecords.baseOffset, logAppendTime); for (Record record : recordBatchAndRecords.records) { // Down-convert this record. Ignore headers when down-converting to V0 and V1 since they are not supported if (magic > RecordBatch.MAGIC_VALUE_V1) builder.append(record); else builder.appendWithOffset(record.offset(), record.timestamp(), record.key(), record.value()); } builder.close(); return builder; } private static class RecordBatchAndRecords { private final RecordBatch batch; private final List<Record> records; private final Long baseOffset; private RecordBatchAndRecords(RecordBatch batch, List<Record> records, Long baseOffset) { this.batch = batch; this.records = records; this.baseOffset = baseOffset; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/RemoteLogInputStream.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.errors.CorruptRecordException; import org.apache.kafka.common.utils.Utils; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; import static org.apache.kafka.common.record.Records.HEADER_SIZE_UP_TO_MAGIC; import static org.apache.kafka.common.record.Records.LOG_OVERHEAD; import static org.apache.kafka.common.record.Records.MAGIC_OFFSET; import static org.apache.kafka.common.record.Records.SIZE_OFFSET; public class RemoteLogInputStream implements LogInputStream<RecordBatch> { private final InputStream inputStream; // LogHeader buffer up to magic. private final ByteBuffer logHeaderBuffer = ByteBuffer.allocate(HEADER_SIZE_UP_TO_MAGIC); public RemoteLogInputStream(InputStream inputStream) { this.inputStream = inputStream; } @Override public RecordBatch nextBatch() throws IOException { logHeaderBuffer.clear(); Utils.readFully(inputStream, logHeaderBuffer); if (logHeaderBuffer.position() < HEADER_SIZE_UP_TO_MAGIC) return null; logHeaderBuffer.rewind(); int size = logHeaderBuffer.getInt(SIZE_OFFSET); // V0 has the smallest overhead, stricter checking is done later if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Found record size %d smaller than minimum record " + "overhead (%d).", size, LegacyRecord.RECORD_OVERHEAD_V0)); // Total size is: "LOG_OVERHEAD + the size of the rest of the content" int bufferSize = LOG_OVERHEAD + size; // buffer contains the complete payload including header and records. ByteBuffer buffer = ByteBuffer.allocate(bufferSize); // write log header into buffer buffer.put(logHeaderBuffer); // write the records payload into the buffer Utils.readFully(inputStream, buffer); if (buffer.position() != bufferSize) return null; buffer.rewind(); byte magic = logHeaderBuffer.get(MAGIC_OFFSET); MutableRecordBatch batch; if (magic > RecordBatch.MAGIC_VALUE_V1) batch = new DefaultRecordBatch(buffer); else batch = new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(buffer); return batch; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/SimpleRecord.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.utils.Utils; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Objects; /** * High-level representation of a kafka record. This is useful when building record sets to * avoid depending on a specific magic version. */ public class SimpleRecord { private final ByteBuffer key; private final ByteBuffer value; private final long timestamp; private final Header[] headers; public SimpleRecord(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { Objects.requireNonNull(headers, "Headers must be non-null"); this.key = key; this.value = value; this.timestamp = timestamp; this.headers = headers; } public SimpleRecord(long timestamp, byte[] key, byte[] value, Header[] headers) { this(timestamp, Utils.wrapNullable(key), Utils.wrapNullable(value), headers); } public SimpleRecord(long timestamp, ByteBuffer key, ByteBuffer value) { this(timestamp, key, value, Record.EMPTY_HEADERS); } public SimpleRecord(long timestamp, byte[] key, byte[] value) { this(timestamp, Utils.wrapNullable(key), Utils.wrapNullable(value)); } public SimpleRecord(long timestamp, byte[] value) { this(timestamp, null, value); } public SimpleRecord(byte[] value) { this(RecordBatch.NO_TIMESTAMP, null, value); } public SimpleRecord(ByteBuffer value) { this(RecordBatch.NO_TIMESTAMP, null, value); } public SimpleRecord(byte[] key, byte[] value) { this(RecordBatch.NO_TIMESTAMP, key, value); } public SimpleRecord(Record record) { this(record.timestamp(), record.key(), record.value(), record.headers()); } public ByteBuffer key() { return key; } public ByteBuffer value() { return value; } public long timestamp() { return timestamp; } public Header[] headers() { return headers; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; SimpleRecord that = (SimpleRecord) o; return timestamp == that.timestamp && Objects.equals(key, that.key) && Objects.equals(value, that.value) && Arrays.equals(headers, that.headers); } @Override public int hashCode() { int result = key != null ? key.hashCode() : 0; result = 31 * result + (value != null ? value.hashCode() : 0); result = 31 * result + Long.hashCode(timestamp); result = 31 * result + Arrays.hashCode(headers); return result; } @Override public String toString() { return String.format("SimpleRecord(timestamp=%d, key=%d bytes, value=%d bytes)", timestamp(), key == null ? 0 : key.limit(), value == null ? 0 : value.limit()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/TimestampType.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import java.util.NoSuchElementException; /** * The timestamp type of the records. */ public enum TimestampType { NO_TIMESTAMP_TYPE(-1, "NoTimestampType"), CREATE_TIME(0, "CreateTime"), LOG_APPEND_TIME(1, "LogAppendTime"); public final int id; public final String name; TimestampType(int id, String name) { this.id = id; this.name = name; } public static TimestampType forName(String name) { for (TimestampType t : values()) if (t.name.equals(name)) return t; throw new NoSuchElementException("Invalid timestamp type " + name); } @Override public String toString() { return name; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/TransferableRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.network.TransferableChannel; import java.io.IOException; /** * Represents a record set which can be transferred to a channel * @see Records * @see UnalignedRecords */ public interface TransferableRecords extends BaseRecords { /** * Attempts to write the contents of this buffer to a channel. * @param channel The channel to write to * @param position The position in the buffer to write from * @param length The number of bytes to write * @return The number of bytes actually written * @throws IOException For any IO errors */ long writeTo(TransferableChannel channel, long position, int length) throws IOException; }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/UnalignedFileRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.network.TransferableChannel; import java.io.IOException; import java.nio.channels.FileChannel; /** * Represents a file record set which is not necessarily offset-aligned */ public class UnalignedFileRecords implements UnalignedRecords { private final FileChannel channel; private final long position; private final int size; public UnalignedFileRecords(FileChannel channel, long position, int size) { this.channel = channel; this.position = position; this.size = size; } @Override public int sizeInBytes() { return size; } @Override public long writeTo(TransferableChannel destChannel, long previouslyWritten, int remaining) throws IOException { long position = this.position + previouslyWritten; long count = Math.min(remaining, sizeInBytes() - previouslyWritten); return destChannel.transferFrom(channel, position, count); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/UnalignedMemoryRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; import org.apache.kafka.common.network.TransferableChannel; import org.apache.kafka.common.utils.Utils; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Objects; /** * Represents a memory record set which is not necessarily offset-aligned */ public class UnalignedMemoryRecords implements UnalignedRecords { private final ByteBuffer buffer; public UnalignedMemoryRecords(ByteBuffer buffer) { this.buffer = Objects.requireNonNull(buffer); } public ByteBuffer buffer() { return buffer.duplicate(); } @Override public int sizeInBytes() { return buffer.remaining(); } @Override public long writeTo(TransferableChannel channel, long position, int length) throws IOException { if (position > Integer.MAX_VALUE) throw new IllegalArgumentException("position should not be greater than Integer.MAX_VALUE: " + position); if (position + length > buffer.limit()) throw new IllegalArgumentException("position+length should not be greater than buffer.limit(), position: " + position + ", length: " + length + ", buffer.limit(): " + buffer.limit()); return Utils.tryWriteTo(channel, (int) position, length, buffer); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/UnalignedRecords.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.record; /** * Represents a record set which is not necessarily offset-aligned, and is * only used when fetching raft snapshot */ public interface UnalignedRecords extends TransferableRecords { @Override default RecordsSend<? extends BaseRecords> toSend() { return new DefaultRecordsSend<>(this, sizeInBytes()); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/record/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides the low-level representation of records and record batches used by clients and servers. * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong> */ package org.apache.kafka.common.record;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/replica/ClientMetadata.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.replica; import org.apache.kafka.common.security.auth.KafkaPrincipal; import java.net.InetAddress; import java.util.Objects; /** * Holder for all the client metadata required to determine a preferred replica. */ public interface ClientMetadata { /** * Rack ID sent by the client */ String rackId(); /** * Client ID sent by the client */ String clientId(); /** * Incoming address of the client */ InetAddress clientAddress(); /** * Security principal of the client */ KafkaPrincipal principal(); /** * Listener name for the client */ String listenerName(); class DefaultClientMetadata implements ClientMetadata { private final String rackId; private final String clientId; private final InetAddress clientAddress; private final KafkaPrincipal principal; private final String listenerName; public DefaultClientMetadata(String rackId, String clientId, InetAddress clientAddress, KafkaPrincipal principal, String listenerName) { this.rackId = rackId; this.clientId = clientId; this.clientAddress = clientAddress; this.principal = principal; this.listenerName = listenerName; } @Override public String rackId() { return rackId; } @Override public String clientId() { return clientId; } @Override public InetAddress clientAddress() { return clientAddress; } @Override public KafkaPrincipal principal() { return principal; } @Override public String listenerName() { return listenerName; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DefaultClientMetadata that = (DefaultClientMetadata) o; return Objects.equals(rackId, that.rackId) && Objects.equals(clientId, that.clientId) && Objects.equals(clientAddress, that.clientAddress) && Objects.equals(principal, that.principal) && Objects.equals(listenerName, that.listenerName); } @Override public int hashCode() { return Objects.hash(rackId, clientId, clientAddress, principal, listenerName); } @Override public String toString() { return "DefaultClientMetadata{" + "rackId='" + rackId + '\'' + ", clientId='" + clientId + '\'' + ", clientAddress=" + clientAddress + ", principal=" + principal + ", listenerName='" + listenerName + '\'' + '}'; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/replica/PartitionView.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.replica; import java.util.Collections; import java.util.Objects; import java.util.Set; /** * View of a partition used by {@link ReplicaSelector} to determine a preferred replica. */ public interface PartitionView { Set<ReplicaView> replicas(); ReplicaView leader(); class DefaultPartitionView implements PartitionView { private final Set<ReplicaView> replicas; private final ReplicaView leader; public DefaultPartitionView(Set<ReplicaView> replicas, ReplicaView leader) { this.replicas = Collections.unmodifiableSet(replicas); this.leader = leader; } @Override public Set<ReplicaView> replicas() { return replicas; } @Override public ReplicaView leader() { return leader; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DefaultPartitionView that = (DefaultPartitionView) o; return Objects.equals(replicas, that.replicas) && Objects.equals(leader, that.leader); } @Override public int hashCode() { return Objects.hash(replicas, leader); } @Override public String toString() { return "DefaultPartitionView{" + "replicas=" + replicas + ", leader=" + leader + '}'; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/replica/RackAwareReplicaSelector.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.replica; import org.apache.kafka.common.TopicPartition; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; /** * Returns a replica whose rack id is equal to the rack id specified in the client request metadata. If no such replica * is found, returns the leader. */ public class RackAwareReplicaSelector implements ReplicaSelector { @Override public Optional<ReplicaView> select(TopicPartition topicPartition, ClientMetadata clientMetadata, PartitionView partitionView) { if (clientMetadata.rackId() != null && !clientMetadata.rackId().isEmpty()) { Set<ReplicaView> sameRackReplicas = partitionView.replicas().stream() .filter(replicaInfo -> clientMetadata.rackId().equals(replicaInfo.endpoint().rack())) .collect(Collectors.toSet()); if (sameRackReplicas.isEmpty()) { return Optional.of(partitionView.leader()); } else { if (sameRackReplicas.contains(partitionView.leader())) { // Use the leader if it's in this rack return Optional.of(partitionView.leader()); } else { // Otherwise, get the most caught-up replica return sameRackReplicas.stream().max(ReplicaView.comparator()); } } } else { return Optional.of(partitionView.leader()); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/replica/ReplicaSelector.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.replica; import org.apache.kafka.common.Configurable; import org.apache.kafka.common.TopicPartition; import java.io.Closeable; import java.io.IOException; import java.util.Map; import java.util.Optional; /** * Plug-able interface for selecting a preferred read replica given the current set of replicas for a partition * and metadata from the client. */ public interface ReplicaSelector extends Configurable, Closeable { /** * Select the preferred replica a client should use for fetching. If no replica is available, this will return an * empty optional. */ Optional<ReplicaView> select(TopicPartition topicPartition, ClientMetadata clientMetadata, PartitionView partitionView); @Override default void close() throws IOException { // No-op by default } @Override default void configure(Map<String, ?> configs) { // No-op by default } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/replica/ReplicaView.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.replica; import org.apache.kafka.common.Node; import java.util.Comparator; import java.util.Objects; /** * View of a replica used by {@link ReplicaSelector} to determine a preferred replica. */ public interface ReplicaView { /** * The endpoint information for this replica (hostname, port, rack, etc) */ Node endpoint(); /** * The log end offset for this replica */ long logEndOffset(); /** * The number of milliseconds (if any) since the last time this replica was caught up to the high watermark. * For a leader replica, this is always zero. */ long timeSinceLastCaughtUpMs(); /** * Comparator for ReplicaView that returns in the order of "most caught up". This is used for deterministic * selection of a replica when there is a tie from a selector. */ static Comparator<ReplicaView> comparator() { return Comparator.comparingLong(ReplicaView::logEndOffset) .thenComparing(Comparator.comparingLong(ReplicaView::timeSinceLastCaughtUpMs).reversed()) .thenComparing(replicaInfo -> replicaInfo.endpoint().id()); } class DefaultReplicaView implements ReplicaView { private final Node endpoint; private final long logEndOffset; private final long timeSinceLastCaughtUpMs; public DefaultReplicaView(Node endpoint, long logEndOffset, long timeSinceLastCaughtUpMs) { this.endpoint = endpoint; this.logEndOffset = logEndOffset; this.timeSinceLastCaughtUpMs = timeSinceLastCaughtUpMs; } @Override public Node endpoint() { return endpoint; } @Override public long logEndOffset() { return logEndOffset; } @Override public long timeSinceLastCaughtUpMs() { return timeSinceLastCaughtUpMs; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; DefaultReplicaView that = (DefaultReplicaView) o; return logEndOffset == that.logEndOffset && Objects.equals(endpoint, that.endpoint) && Objects.equals(timeSinceLastCaughtUpMs, that.timeSinceLastCaughtUpMs); } @Override public int hashCode() { return Objects.hash(endpoint, logEndOffset, timeSinceLastCaughtUpMs); } @Override public String toString() { return "DefaultReplicaView{" + "endpoint=" + endpoint + ", logEndOffset=" + logEndOffset + ", timeSinceLastCaughtUpMs=" + timeSinceLastCaughtUpMs + '}'; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/replica/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides mechanism for determining placement of Kafka log replicas. * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong> */ package org.apache.kafka.common.replica;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AbstractControlRequest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.protocol.ApiKeys; // Abstract class for all control requests including UpdateMetadataRequest, LeaderAndIsrRequest and StopReplicaRequest public abstract class AbstractControlRequest extends AbstractRequest { public static final long UNKNOWN_BROKER_EPOCH = -1L; public static abstract class Builder<T extends AbstractRequest> extends AbstractRequest.Builder<T> { protected final int controllerId; protected final int controllerEpoch; protected final long brokerEpoch; protected final boolean kraftController; protected Builder(ApiKeys api, short version, int controllerId, int controllerEpoch, long brokerEpoch) { this(api, version, controllerId, controllerEpoch, brokerEpoch, false); } protected Builder(ApiKeys api, short version, int controllerId, int controllerEpoch, long brokerEpoch, boolean kraftController) { super(api, version); this.controllerId = controllerId; this.controllerEpoch = controllerEpoch; this.brokerEpoch = brokerEpoch; this.kraftController = kraftController; } public int controllerId() { return controllerId; } public int controllerEpoch() { return controllerEpoch; } public long brokerEpoch() { return brokerEpoch; } } protected AbstractControlRequest(ApiKeys api, short version) { super(api, version); } public abstract int controllerId(); public abstract boolean isKRaftController(); public abstract int controllerEpoch(); public abstract long brokerEpoch(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AbstractRequest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.network.Send; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.SendBuilder; import java.nio.ByteBuffer; import java.util.Map; public abstract class AbstractRequest implements AbstractRequestResponse { public static abstract class Builder<T extends AbstractRequest> { private final ApiKeys apiKey; private final short oldestAllowedVersion; private final short latestAllowedVersion; /** * Construct a new builder which allows any supported version */ public Builder(ApiKeys apiKey) { this(apiKey, apiKey.oldestVersion(), apiKey.latestVersion()); } /** * Construct a new builder which allows only a specific version */ public Builder(ApiKeys apiKey, short allowedVersion) { this(apiKey, allowedVersion, allowedVersion); } /** * Construct a new builder which allows an inclusive range of versions */ public Builder(ApiKeys apiKey, short oldestAllowedVersion, short latestAllowedVersion) { this.apiKey = apiKey; this.oldestAllowedVersion = oldestAllowedVersion; this.latestAllowedVersion = latestAllowedVersion; } public ApiKeys apiKey() { return apiKey; } public short oldestAllowedVersion() { return oldestAllowedVersion; } public short latestAllowedVersion() { return latestAllowedVersion; } public T build() { return build(latestAllowedVersion()); } public abstract T build(short version); } private final short version; private final ApiKeys apiKey; public AbstractRequest(ApiKeys apiKey, short version) { if (!apiKey.isVersionSupported(version)) throw new UnsupportedVersionException("The " + apiKey + " protocol does not support version " + version); this.version = version; this.apiKey = apiKey; } /** * Get the version of this AbstractRequest object. */ public short version() { return version; } public ApiKeys apiKey() { return apiKey; } public final Send toSend(RequestHeader header) { return SendBuilder.buildRequestSend(header, data()); } /** * Serializes header and body without prefixing with size (unlike `toSend`, which does include a size prefix). */ public final ByteBuffer serializeWithHeader(RequestHeader header) { if (header.apiKey() != apiKey) { throw new IllegalArgumentException("Could not build request " + apiKey + " with header api key " + header.apiKey()); } if (header.apiVersion() != version) { throw new IllegalArgumentException("Could not build request version " + version + " with header version " + header.apiVersion()); } return RequestUtils.serialize(header.data(), header.headerVersion(), data(), version); } // Visible for testing public final ByteBuffer serialize() { return MessageUtil.toByteBuffer(data(), version); } // Visible for testing final int sizeInBytes() { return data().size(new ObjectSerializationCache(), version); } public String toString(boolean verbose) { return data().toString(); } @Override public final String toString() { return toString(true); } /** * Get an error response for a request */ public AbstractResponse getErrorResponse(Throwable e) { return getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e); } /** * Get an error response for a request with specified throttle time in the response if applicable */ public abstract AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e); /** * Get the error counts corresponding to an error response. This is overridden for requests * where response may be null (e.g produce with acks=0). */ public Map<Errors, Integer> errorCounts(Throwable e) { AbstractResponse response = getErrorResponse(0, e); if (response == null) throw new IllegalStateException("Error counts could not be obtained for request " + this); else return response.errorCounts(); } /** * Factory method for getting a request object based on ApiKey ID and a version */ public static RequestAndSize parseRequest(ApiKeys apiKey, short apiVersion, ByteBuffer buffer) { int bufferSize = buffer.remaining(); return new RequestAndSize(doParseRequest(apiKey, apiVersion, buffer), bufferSize); } private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, ByteBuffer buffer) { switch (apiKey) { case PRODUCE: return ProduceRequest.parse(buffer, apiVersion); case FETCH: return FetchRequest.parse(buffer, apiVersion); case LIST_OFFSETS: return ListOffsetsRequest.parse(buffer, apiVersion); case METADATA: return MetadataRequest.parse(buffer, apiVersion); case OFFSET_COMMIT: return OffsetCommitRequest.parse(buffer, apiVersion); case OFFSET_FETCH: return OffsetFetchRequest.parse(buffer, apiVersion); case FIND_COORDINATOR: return FindCoordinatorRequest.parse(buffer, apiVersion); case JOIN_GROUP: return JoinGroupRequest.parse(buffer, apiVersion); case HEARTBEAT: return HeartbeatRequest.parse(buffer, apiVersion); case LEAVE_GROUP: return LeaveGroupRequest.parse(buffer, apiVersion); case SYNC_GROUP: return SyncGroupRequest.parse(buffer, apiVersion); case STOP_REPLICA: return StopReplicaRequest.parse(buffer, apiVersion); case CONTROLLED_SHUTDOWN: return ControlledShutdownRequest.parse(buffer, apiVersion); case UPDATE_METADATA: return UpdateMetadataRequest.parse(buffer, apiVersion); case LEADER_AND_ISR: return LeaderAndIsrRequest.parse(buffer, apiVersion); case DESCRIBE_GROUPS: return DescribeGroupsRequest.parse(buffer, apiVersion); case LIST_GROUPS: return ListGroupsRequest.parse(buffer, apiVersion); case SASL_HANDSHAKE: return SaslHandshakeRequest.parse(buffer, apiVersion); case API_VERSIONS: return ApiVersionsRequest.parse(buffer, apiVersion); case CREATE_TOPICS: return CreateTopicsRequest.parse(buffer, apiVersion); case DELETE_TOPICS: return DeleteTopicsRequest.parse(buffer, apiVersion); case DELETE_RECORDS: return DeleteRecordsRequest.parse(buffer, apiVersion); case INIT_PRODUCER_ID: return InitProducerIdRequest.parse(buffer, apiVersion); case OFFSET_FOR_LEADER_EPOCH: return OffsetsForLeaderEpochRequest.parse(buffer, apiVersion); case ADD_PARTITIONS_TO_TXN: return AddPartitionsToTxnRequest.parse(buffer, apiVersion); case ADD_OFFSETS_TO_TXN: return AddOffsetsToTxnRequest.parse(buffer, apiVersion); case END_TXN: return EndTxnRequest.parse(buffer, apiVersion); case WRITE_TXN_MARKERS: return WriteTxnMarkersRequest.parse(buffer, apiVersion); case TXN_OFFSET_COMMIT: return TxnOffsetCommitRequest.parse(buffer, apiVersion); case DESCRIBE_ACLS: return DescribeAclsRequest.parse(buffer, apiVersion); case CREATE_ACLS: return CreateAclsRequest.parse(buffer, apiVersion); case DELETE_ACLS: return DeleteAclsRequest.parse(buffer, apiVersion); case DESCRIBE_CONFIGS: return DescribeConfigsRequest.parse(buffer, apiVersion); case ALTER_CONFIGS: return AlterConfigsRequest.parse(buffer, apiVersion); case ALTER_REPLICA_LOG_DIRS: return AlterReplicaLogDirsRequest.parse(buffer, apiVersion); case DESCRIBE_LOG_DIRS: return DescribeLogDirsRequest.parse(buffer, apiVersion); case SASL_AUTHENTICATE: return SaslAuthenticateRequest.parse(buffer, apiVersion); case CREATE_PARTITIONS: return CreatePartitionsRequest.parse(buffer, apiVersion); case CREATE_DELEGATION_TOKEN: return CreateDelegationTokenRequest.parse(buffer, apiVersion); case RENEW_DELEGATION_TOKEN: return RenewDelegationTokenRequest.parse(buffer, apiVersion); case EXPIRE_DELEGATION_TOKEN: return ExpireDelegationTokenRequest.parse(buffer, apiVersion); case DESCRIBE_DELEGATION_TOKEN: return DescribeDelegationTokenRequest.parse(buffer, apiVersion); case DELETE_GROUPS: return DeleteGroupsRequest.parse(buffer, apiVersion); case ELECT_LEADERS: return ElectLeadersRequest.parse(buffer, apiVersion); case INCREMENTAL_ALTER_CONFIGS: return IncrementalAlterConfigsRequest.parse(buffer, apiVersion); case ALTER_PARTITION_REASSIGNMENTS: return AlterPartitionReassignmentsRequest.parse(buffer, apiVersion); case LIST_PARTITION_REASSIGNMENTS: return ListPartitionReassignmentsRequest.parse(buffer, apiVersion); case OFFSET_DELETE: return OffsetDeleteRequest.parse(buffer, apiVersion); case DESCRIBE_CLIENT_QUOTAS: return DescribeClientQuotasRequest.parse(buffer, apiVersion); case ALTER_CLIENT_QUOTAS: return AlterClientQuotasRequest.parse(buffer, apiVersion); case DESCRIBE_USER_SCRAM_CREDENTIALS: return DescribeUserScramCredentialsRequest.parse(buffer, apiVersion); case ALTER_USER_SCRAM_CREDENTIALS: return AlterUserScramCredentialsRequest.parse(buffer, apiVersion); case VOTE: return VoteRequest.parse(buffer, apiVersion); case BEGIN_QUORUM_EPOCH: return BeginQuorumEpochRequest.parse(buffer, apiVersion); case END_QUORUM_EPOCH: return EndQuorumEpochRequest.parse(buffer, apiVersion); case DESCRIBE_QUORUM: return DescribeQuorumRequest.parse(buffer, apiVersion); case ALTER_PARTITION: return AlterPartitionRequest.parse(buffer, apiVersion); case UPDATE_FEATURES: return UpdateFeaturesRequest.parse(buffer, apiVersion); case ENVELOPE: return EnvelopeRequest.parse(buffer, apiVersion); case FETCH_SNAPSHOT: return FetchSnapshotRequest.parse(buffer, apiVersion); case DESCRIBE_CLUSTER: return DescribeClusterRequest.parse(buffer, apiVersion); case DESCRIBE_PRODUCERS: return DescribeProducersRequest.parse(buffer, apiVersion); case BROKER_REGISTRATION: return BrokerRegistrationRequest.parse(buffer, apiVersion); case BROKER_HEARTBEAT: return BrokerHeartbeatRequest.parse(buffer, apiVersion); case UNREGISTER_BROKER: return UnregisterBrokerRequest.parse(buffer, apiVersion); case DESCRIBE_TRANSACTIONS: return DescribeTransactionsRequest.parse(buffer, apiVersion); case LIST_TRANSACTIONS: return ListTransactionsRequest.parse(buffer, apiVersion); case ALLOCATE_PRODUCER_IDS: return AllocateProducerIdsRequest.parse(buffer, apiVersion); case CONSUMER_GROUP_HEARTBEAT: return ConsumerGroupHeartbeatRequest.parse(buffer, apiVersion); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseRequest`, the " + "code should be updated to do so.", apiKey)); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AbstractRequestResponse.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.protocol.ApiMessage; public interface AbstractRequestResponse { ApiMessage data(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AbstractResponse.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.network.Send; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.SendBuilder; import java.nio.ByteBuffer; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; public abstract class AbstractResponse implements AbstractRequestResponse { public static final int DEFAULT_THROTTLE_TIME = 0; private final ApiKeys apiKey; protected AbstractResponse(ApiKeys apiKey) { this.apiKey = apiKey; } public final Send toSend(ResponseHeader header, short version) { return SendBuilder.buildResponseSend(header, data(), version); } /** * Serializes header and body without prefixing with size (unlike `toSend`, which does include a size prefix). */ final ByteBuffer serializeWithHeader(ResponseHeader header, short version) { return RequestUtils.serialize(header.data(), header.headerVersion(), data(), version); } // Visible for testing final ByteBuffer serialize(short version) { return MessageUtil.toByteBuffer(data(), version); } /** * The number of each type of error in the response, including {@link Errors#NONE} and top-level errors as well as * more specifically scoped errors (such as topic or partition-level errors). * @return A count of errors. */ public abstract Map<Errors, Integer> errorCounts(); protected Map<Errors, Integer> errorCounts(Errors error) { return Collections.singletonMap(error, 1); } protected Map<Errors, Integer> errorCounts(Stream<Errors> errors) { return errors.collect(Collectors.groupingBy(e -> e, Collectors.summingInt(e -> 1))); } protected Map<Errors, Integer> errorCounts(Collection<Errors> errors) { Map<Errors, Integer> errorCounts = new HashMap<>(); for (Errors error : errors) updateErrorCounts(errorCounts, error); return errorCounts; } protected Map<Errors, Integer> apiErrorCounts(Map<?, ApiError> errors) { Map<Errors, Integer> errorCounts = new HashMap<>(); for (ApiError apiError : errors.values()) updateErrorCounts(errorCounts, apiError.error()); return errorCounts; } protected void updateErrorCounts(Map<Errors, Integer> errorCounts, Errors error) { Integer count = errorCounts.getOrDefault(error, 0); errorCounts.put(error, count + 1); } /** * Parse a response from the provided buffer. The buffer is expected to hold both * the {@link ResponseHeader} as well as the response payload. */ public static AbstractResponse parseResponse(ByteBuffer buffer, RequestHeader requestHeader) { ApiKeys apiKey = requestHeader.apiKey(); short apiVersion = requestHeader.apiVersion(); ResponseHeader responseHeader = ResponseHeader.parse(buffer, apiKey.responseHeaderVersion(apiVersion)); if (requestHeader.correlationId() != responseHeader.correlationId()) { throw new CorrelationIdMismatchException("Correlation id for response (" + responseHeader.correlationId() + ") does not match request (" + requestHeader.correlationId() + "), request header: " + requestHeader, requestHeader.correlationId(), responseHeader.correlationId()); } return AbstractResponse.parseResponse(apiKey, buffer, apiVersion); } public static AbstractResponse parseResponse(ApiKeys apiKey, ByteBuffer responseBuffer, short version) { switch (apiKey) { case PRODUCE: return ProduceResponse.parse(responseBuffer, version); case FETCH: return FetchResponse.parse(responseBuffer, version); case LIST_OFFSETS: return ListOffsetsResponse.parse(responseBuffer, version); case METADATA: return MetadataResponse.parse(responseBuffer, version); case OFFSET_COMMIT: return OffsetCommitResponse.parse(responseBuffer, version); case OFFSET_FETCH: return OffsetFetchResponse.parse(responseBuffer, version); case FIND_COORDINATOR: return FindCoordinatorResponse.parse(responseBuffer, version); case JOIN_GROUP: return JoinGroupResponse.parse(responseBuffer, version); case HEARTBEAT: return HeartbeatResponse.parse(responseBuffer, version); case LEAVE_GROUP: return LeaveGroupResponse.parse(responseBuffer, version); case SYNC_GROUP: return SyncGroupResponse.parse(responseBuffer, version); case STOP_REPLICA: return StopReplicaResponse.parse(responseBuffer, version); case CONTROLLED_SHUTDOWN: return ControlledShutdownResponse.parse(responseBuffer, version); case UPDATE_METADATA: return UpdateMetadataResponse.parse(responseBuffer, version); case LEADER_AND_ISR: return LeaderAndIsrResponse.parse(responseBuffer, version); case DESCRIBE_GROUPS: return DescribeGroupsResponse.parse(responseBuffer, version); case LIST_GROUPS: return ListGroupsResponse.parse(responseBuffer, version); case SASL_HANDSHAKE: return SaslHandshakeResponse.parse(responseBuffer, version); case API_VERSIONS: return ApiVersionsResponse.parse(responseBuffer, version); case CREATE_TOPICS: return CreateTopicsResponse.parse(responseBuffer, version); case DELETE_TOPICS: return DeleteTopicsResponse.parse(responseBuffer, version); case DELETE_RECORDS: return DeleteRecordsResponse.parse(responseBuffer, version); case INIT_PRODUCER_ID: return InitProducerIdResponse.parse(responseBuffer, version); case OFFSET_FOR_LEADER_EPOCH: return OffsetsForLeaderEpochResponse.parse(responseBuffer, version); case ADD_PARTITIONS_TO_TXN: return AddPartitionsToTxnResponse.parse(responseBuffer, version); case ADD_OFFSETS_TO_TXN: return AddOffsetsToTxnResponse.parse(responseBuffer, version); case END_TXN: return EndTxnResponse.parse(responseBuffer, version); case WRITE_TXN_MARKERS: return WriteTxnMarkersResponse.parse(responseBuffer, version); case TXN_OFFSET_COMMIT: return TxnOffsetCommitResponse.parse(responseBuffer, version); case DESCRIBE_ACLS: return DescribeAclsResponse.parse(responseBuffer, version); case CREATE_ACLS: return CreateAclsResponse.parse(responseBuffer, version); case DELETE_ACLS: return DeleteAclsResponse.parse(responseBuffer, version); case DESCRIBE_CONFIGS: return DescribeConfigsResponse.parse(responseBuffer, version); case ALTER_CONFIGS: return AlterConfigsResponse.parse(responseBuffer, version); case ALTER_REPLICA_LOG_DIRS: return AlterReplicaLogDirsResponse.parse(responseBuffer, version); case DESCRIBE_LOG_DIRS: return DescribeLogDirsResponse.parse(responseBuffer, version); case SASL_AUTHENTICATE: return SaslAuthenticateResponse.parse(responseBuffer, version); case CREATE_PARTITIONS: return CreatePartitionsResponse.parse(responseBuffer, version); case CREATE_DELEGATION_TOKEN: return CreateDelegationTokenResponse.parse(responseBuffer, version); case RENEW_DELEGATION_TOKEN: return RenewDelegationTokenResponse.parse(responseBuffer, version); case EXPIRE_DELEGATION_TOKEN: return ExpireDelegationTokenResponse.parse(responseBuffer, version); case DESCRIBE_DELEGATION_TOKEN: return DescribeDelegationTokenResponse.parse(responseBuffer, version); case DELETE_GROUPS: return DeleteGroupsResponse.parse(responseBuffer, version); case ELECT_LEADERS: return ElectLeadersResponse.parse(responseBuffer, version); case INCREMENTAL_ALTER_CONFIGS: return IncrementalAlterConfigsResponse.parse(responseBuffer, version); case ALTER_PARTITION_REASSIGNMENTS: return AlterPartitionReassignmentsResponse.parse(responseBuffer, version); case LIST_PARTITION_REASSIGNMENTS: return ListPartitionReassignmentsResponse.parse(responseBuffer, version); case OFFSET_DELETE: return OffsetDeleteResponse.parse(responseBuffer, version); case DESCRIBE_CLIENT_QUOTAS: return DescribeClientQuotasResponse.parse(responseBuffer, version); case ALTER_CLIENT_QUOTAS: return AlterClientQuotasResponse.parse(responseBuffer, version); case DESCRIBE_USER_SCRAM_CREDENTIALS: return DescribeUserScramCredentialsResponse.parse(responseBuffer, version); case ALTER_USER_SCRAM_CREDENTIALS: return AlterUserScramCredentialsResponse.parse(responseBuffer, version); case VOTE: return VoteResponse.parse(responseBuffer, version); case BEGIN_QUORUM_EPOCH: return BeginQuorumEpochResponse.parse(responseBuffer, version); case END_QUORUM_EPOCH: return EndQuorumEpochResponse.parse(responseBuffer, version); case DESCRIBE_QUORUM: return DescribeQuorumResponse.parse(responseBuffer, version); case ALTER_PARTITION: return AlterPartitionResponse.parse(responseBuffer, version); case UPDATE_FEATURES: return UpdateFeaturesResponse.parse(responseBuffer, version); case ENVELOPE: return EnvelopeResponse.parse(responseBuffer, version); case FETCH_SNAPSHOT: return FetchSnapshotResponse.parse(responseBuffer, version); case DESCRIBE_CLUSTER: return DescribeClusterResponse.parse(responseBuffer, version); case DESCRIBE_PRODUCERS: return DescribeProducersResponse.parse(responseBuffer, version); case BROKER_REGISTRATION: return BrokerRegistrationResponse.parse(responseBuffer, version); case BROKER_HEARTBEAT: return BrokerHeartbeatResponse.parse(responseBuffer, version); case UNREGISTER_BROKER: return UnregisterBrokerResponse.parse(responseBuffer, version); case DESCRIBE_TRANSACTIONS: return DescribeTransactionsResponse.parse(responseBuffer, version); case LIST_TRANSACTIONS: return ListTransactionsResponse.parse(responseBuffer, version); case ALLOCATE_PRODUCER_IDS: return AllocateProducerIdsResponse.parse(responseBuffer, version); case CONSUMER_GROUP_HEARTBEAT: return ConsumerGroupHeartbeatResponse.parse(responseBuffer, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); } } /** * Returns whether or not client should throttle upon receiving a response of the specified version with a non-zero * throttle time. Client-side throttling is needed when communicating with a newer version of broker which, on * quota violation, sends out responses before throttling. */ public boolean shouldClientThrottle(short version) { return false; } public ApiKeys apiKey() { return apiKey; } /** * Get the throttle time in milliseconds. If the response schema does not * support this field, then 0 will be returned. */ public abstract int throttleTimeMs(); /** * Set the throttle time in the response if the schema supports it. Otherwise, * this is a no-op. * * @param throttleTimeMs The throttle time in milliseconds */ public abstract void maybeSetThrottleTimeMs(int throttleTimeMs); public String toString() { return data().toString(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AddOffsetsToTxnRequest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.message.AddOffsetsToTxnRequestData; import org.apache.kafka.common.message.AddOffsetsToTxnResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import java.nio.ByteBuffer; public class AddOffsetsToTxnRequest extends AbstractRequest { private final AddOffsetsToTxnRequestData data; public static class Builder extends AbstractRequest.Builder<AddOffsetsToTxnRequest> { public AddOffsetsToTxnRequestData data; public Builder(AddOffsetsToTxnRequestData data) { super(ApiKeys.ADD_OFFSETS_TO_TXN); this.data = data; } @Override public AddOffsetsToTxnRequest build(short version) { return new AddOffsetsToTxnRequest(data, version); } @Override public String toString() { return data.toString(); } } public AddOffsetsToTxnRequest(AddOffsetsToTxnRequestData data, short version) { super(ApiKeys.ADD_OFFSETS_TO_TXN, version); this.data = data; } @Override public AddOffsetsToTxnRequestData data() { return data; } @Override public AddOffsetsToTxnResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new AddOffsetsToTxnResponse(new AddOffsetsToTxnResponseData() .setErrorCode(Errors.forException(e).code()) .setThrottleTimeMs(throttleTimeMs)); } public static AddOffsetsToTxnRequest parse(ByteBuffer buffer, short version) { return new AddOffsetsToTxnRequest(new AddOffsetsToTxnRequestData(new ByteBufferAccessor(buffer), version), version); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AddOffsetsToTxnResponse.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.message.AddOffsetsToTxnResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import java.nio.ByteBuffer; import java.util.Map; /** * Possible error codes: * * - {@link Errors#NOT_COORDINATOR} * - {@link Errors#COORDINATOR_NOT_AVAILABLE} * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} * - {@link Errors#INVALID_PRODUCER_ID_MAPPING} * - {@link Errors#INVALID_PRODUCER_EPOCH} // for version <=1 * - {@link Errors#PRODUCER_FENCED} * - {@link Errors#INVALID_TXN_STATE} * - {@link Errors#GROUP_AUTHORIZATION_FAILED} * - {@link Errors#TRANSACTIONAL_ID_AUTHORIZATION_FAILED} */ public class AddOffsetsToTxnResponse extends AbstractResponse { private final AddOffsetsToTxnResponseData data; public AddOffsetsToTxnResponse(AddOffsetsToTxnResponseData data) { super(ApiKeys.ADD_OFFSETS_TO_TXN); this.data = data; } @Override public Map<Errors, Integer> errorCounts() { return errorCounts(Errors.forCode(data.errorCode())); } @Override public int throttleTimeMs() { return data.throttleTimeMs(); } @Override public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } @Override public AddOffsetsToTxnResponseData data() { return data; } public static AddOffsetsToTxnResponse parse(ByteBuffer buffer, short version) { return new AddOffsetsToTxnResponse(new AddOffsetsToTxnResponseData(new ByteBufferAccessor(buffer), version)); } @Override public String toString() { return data.toString(); } @Override public boolean shouldClientThrottle(short version) { return version >= 1; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AddPartitionsToTxnRequest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.AddPartitionsToTxnRequestData; import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTopic; import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTransaction; import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTransactionCollection; import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.AddPartitionsToTxnTopicCollection; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnPartitionResult; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnPartitionResultCollection; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnResult; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResult; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResultCollection; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; public class AddPartitionsToTxnRequest extends AbstractRequest { private final AddPartitionsToTxnRequestData data; public static class Builder extends AbstractRequest.Builder<AddPartitionsToTxnRequest> { public final AddPartitionsToTxnRequestData data; public static Builder forClient(String transactionalId, long producerId, short producerEpoch, List<TopicPartition> partitions) { AddPartitionsToTxnTopicCollection topics = buildTxnTopicCollection(partitions); return new Builder(ApiKeys.ADD_PARTITIONS_TO_TXN.oldestVersion(), (short) 3, new AddPartitionsToTxnRequestData() .setV3AndBelowTransactionalId(transactionalId) .setV3AndBelowProducerId(producerId) .setV3AndBelowProducerEpoch(producerEpoch) .setV3AndBelowTopics(topics)); } public static Builder forBroker(AddPartitionsToTxnTransactionCollection transactions) { return new Builder((short) 4, ApiKeys.ADD_PARTITIONS_TO_TXN.latestVersion(), new AddPartitionsToTxnRequestData() .setTransactions(transactions)); } private Builder(short minVersion, short maxVersion, AddPartitionsToTxnRequestData data) { super(ApiKeys.ADD_PARTITIONS_TO_TXN, minVersion, maxVersion); this.data = data; } private static AddPartitionsToTxnTopicCollection buildTxnTopicCollection(final List<TopicPartition> partitions) { Map<String, List<Integer>> partitionMap = new HashMap<>(); for (TopicPartition topicPartition : partitions) { String topicName = topicPartition.topic(); partitionMap.compute(topicName, (key, subPartitions) -> { if (subPartitions == null) { subPartitions = new ArrayList<>(); } subPartitions.add(topicPartition.partition()); return subPartitions; }); } AddPartitionsToTxnTopicCollection topics = new AddPartitionsToTxnTopicCollection(); for (Map.Entry<String, List<Integer>> partitionEntry : partitionMap.entrySet()) { topics.add(new AddPartitionsToTxnTopic() .setName(partitionEntry.getKey()) .setPartitions(partitionEntry.getValue())); } return topics; } @Override public AddPartitionsToTxnRequest build(short version) { return new AddPartitionsToTxnRequest(data, version); } @Override public String toString() { return data.toString(); } } public AddPartitionsToTxnRequest(final AddPartitionsToTxnRequestData data, short version) { super(ApiKeys.ADD_PARTITIONS_TO_TXN, version); this.data = data; } @Override public AddPartitionsToTxnRequestData data() { return data; } @Override public AddPartitionsToTxnResponse getErrorResponse(int throttleTimeMs, Throwable e) { Errors error = Errors.forException(e); AddPartitionsToTxnResponseData response = new AddPartitionsToTxnResponseData(); if (version() < 4) { response.setResultsByTopicV3AndBelow(errorResponseForTopics(data.v3AndBelowTopics(), error)); } else { response.setErrorCode(error.code()); } response.setThrottleTimeMs(throttleTimeMs); return new AddPartitionsToTxnResponse(response); } public static List<TopicPartition> getPartitions(AddPartitionsToTxnTopicCollection topics) { List<TopicPartition> partitions = new ArrayList<>(); for (AddPartitionsToTxnTopic topicCollection : topics) { for (Integer partition : topicCollection.partitions()) { partitions.add(new TopicPartition(topicCollection.name(), partition)); } } return partitions; } public Map<String, List<TopicPartition>> partitionsByTransaction() { Map<String, List<TopicPartition>> partitionsByTransaction = new HashMap<>(); for (AddPartitionsToTxnTransaction transaction : data.transactions()) { List<TopicPartition> partitions = getPartitions(transaction.topics()); partitionsByTransaction.put(transaction.transactionalId(), partitions); } return partitionsByTransaction; } // Takes a version 3 or below request and returns a v4+ singleton (one transaction ID) request. public AddPartitionsToTxnRequest normalizeRequest() { return new AddPartitionsToTxnRequest(new AddPartitionsToTxnRequestData().setTransactions(singletonTransaction()), version()); } private AddPartitionsToTxnTransactionCollection singletonTransaction() { AddPartitionsToTxnTransactionCollection singleTxn = new AddPartitionsToTxnTransactionCollection(); singleTxn.add(new AddPartitionsToTxnTransaction() .setTransactionalId(data.v3AndBelowTransactionalId()) .setProducerId(data.v3AndBelowProducerId()) .setProducerEpoch(data.v3AndBelowProducerEpoch()) .setTopics(data.v3AndBelowTopics())); return singleTxn; } public AddPartitionsToTxnResult errorResponseForTransaction(String transactionalId, Errors e) { AddPartitionsToTxnResult txnResult = new AddPartitionsToTxnResult().setTransactionalId(transactionalId); AddPartitionsToTxnTopicResultCollection topicResults = errorResponseForTopics(data.transactions().find(transactionalId).topics(), e); txnResult.setTopicResults(topicResults); return txnResult; } private AddPartitionsToTxnTopicResultCollection errorResponseForTopics(AddPartitionsToTxnTopicCollection topics, Errors e) { AddPartitionsToTxnTopicResultCollection topicResults = new AddPartitionsToTxnTopicResultCollection(); for (AddPartitionsToTxnTopic topic : topics) { AddPartitionsToTxnTopicResult topicResult = new AddPartitionsToTxnTopicResult().setName(topic.name()); AddPartitionsToTxnPartitionResultCollection partitionResult = new AddPartitionsToTxnPartitionResultCollection(); for (Integer partition : topic.partitions()) { partitionResult.add(new AddPartitionsToTxnPartitionResult() .setPartitionIndex(partition) .setPartitionErrorCode(e.code())); } topicResult.setResultsByPartition(partitionResult); topicResults.add(topicResult); } return topicResults; } public static AddPartitionsToTxnRequest parse(ByteBuffer buffer, short version) { return new AddPartitionsToTxnRequest(new AddPartitionsToTxnRequestData(new ByteBufferAccessor(buffer), version), version); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AddPartitionsToTxnResponse.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnResult; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnPartitionResult; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnPartitionResultCollection; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResult; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResultCollection; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; /** * Possible error codes: * * - {@link Errors#NOT_COORDINATOR} * - {@link Errors#COORDINATOR_NOT_AVAILABLE} * - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS} * - {@link Errors#INVALID_TXN_STATE} * - {@link Errors#INVALID_PRODUCER_ID_MAPPING} * - {@link Errors#INVALID_PRODUCER_EPOCH} // for version <=1 * - {@link Errors#PRODUCER_FENCED} * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} * - {@link Errors#TRANSACTIONAL_ID_AUTHORIZATION_FAILED} * - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} */ public class AddPartitionsToTxnResponse extends AbstractResponse { private final AddPartitionsToTxnResponseData data; public static final String V3_AND_BELOW_TXN_ID = ""; public AddPartitionsToTxnResponse(AddPartitionsToTxnResponseData data) { super(ApiKeys.ADD_PARTITIONS_TO_TXN); this.data = data; } @Override public int throttleTimeMs() { return data.throttleTimeMs(); } @Override public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } public Map<String, Map<TopicPartition, Errors>> errors() { Map<String, Map<TopicPartition, Errors>> errorsMap = new HashMap<>(); if (!this.data.resultsByTopicV3AndBelow().isEmpty()) { errorsMap.put(V3_AND_BELOW_TXN_ID, errorsForTransaction(this.data.resultsByTopicV3AndBelow())); } for (AddPartitionsToTxnResult result : this.data.resultsByTransaction()) { errorsMap.put(result.transactionalId(), errorsForTransaction(result.topicResults())); } return errorsMap; } private static AddPartitionsToTxnTopicResultCollection topicCollectionForErrors(Map<TopicPartition, Errors> errors) { Map<String, AddPartitionsToTxnPartitionResultCollection> resultMap = new HashMap<>(); for (Map.Entry<TopicPartition, Errors> entry : errors.entrySet()) { TopicPartition topicPartition = entry.getKey(); String topicName = topicPartition.topic(); AddPartitionsToTxnPartitionResult partitionResult = new AddPartitionsToTxnPartitionResult() .setPartitionErrorCode(entry.getValue().code()) .setPartitionIndex(topicPartition.partition()); AddPartitionsToTxnPartitionResultCollection partitionResultCollection = resultMap.getOrDefault( topicName, new AddPartitionsToTxnPartitionResultCollection() ); partitionResultCollection.add(partitionResult); resultMap.put(topicName, partitionResultCollection); } AddPartitionsToTxnTopicResultCollection topicCollection = new AddPartitionsToTxnTopicResultCollection(); for (Map.Entry<String, AddPartitionsToTxnPartitionResultCollection> entry : resultMap.entrySet()) { topicCollection.add(new AddPartitionsToTxnTopicResult() .setName(entry.getKey()) .setResultsByPartition(entry.getValue())); } return topicCollection; } public static AddPartitionsToTxnResult resultForTransaction(String transactionalId, Map<TopicPartition, Errors> errors) { return new AddPartitionsToTxnResult().setTransactionalId(transactionalId).setTopicResults(topicCollectionForErrors(errors)); } public AddPartitionsToTxnTopicResultCollection getTransactionTopicResults(String transactionalId) { return data.resultsByTransaction().find(transactionalId).topicResults(); } public static Map<TopicPartition, Errors> errorsForTransaction(AddPartitionsToTxnTopicResultCollection topicCollection) { Map<TopicPartition, Errors> topicResults = new HashMap<>(); for (AddPartitionsToTxnTopicResult topicResult : topicCollection) { for (AddPartitionsToTxnPartitionResult partitionResult : topicResult.resultsByPartition()) { topicResults.put( new TopicPartition(topicResult.name(), partitionResult.partitionIndex()), Errors.forCode(partitionResult.partitionErrorCode())); } } return topicResults; } @Override public Map<Errors, Integer> errorCounts() { List<Errors> allErrors = new ArrayList<>(); // If we are not using this field, we have request 4 or later if (this.data.resultsByTopicV3AndBelow().isEmpty()) { allErrors.add(Errors.forCode(data.errorCode())); } errors().forEach((txnId, errors) -> allErrors.addAll(errors.values()) ); return errorCounts(allErrors); } @Override public AddPartitionsToTxnResponseData data() { return data; } public static AddPartitionsToTxnResponse parse(ByteBuffer buffer, short version) { return new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData(new ByteBufferAccessor(buffer), version)); } @Override public String toString() { return data.toString(); } @Override public boolean shouldClientThrottle(short version) { return version >= 1; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AllocateProducerIdsRequest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.message.AllocateProducerIdsRequestData; import org.apache.kafka.common.message.AllocateProducerIdsResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import java.nio.ByteBuffer; public class AllocateProducerIdsRequest extends AbstractRequest { private final AllocateProducerIdsRequestData data; public AllocateProducerIdsRequest(AllocateProducerIdsRequestData data, short version) { super(ApiKeys.ALLOCATE_PRODUCER_IDS, version); this.data = data; } @Override public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new AllocateProducerIdsResponse(new AllocateProducerIdsResponseData() .setThrottleTimeMs(throttleTimeMs) .setErrorCode(Errors.forException(e).code())); } @Override public AllocateProducerIdsRequestData data() { return data; } public static class Builder extends AbstractRequest.Builder<AllocateProducerIdsRequest> { private final AllocateProducerIdsRequestData data; public Builder(AllocateProducerIdsRequestData data) { super(ApiKeys.ALLOCATE_PRODUCER_IDS); this.data = data; } @Override public AllocateProducerIdsRequest build(short version) { return new AllocateProducerIdsRequest(data, version); } @Override public String toString() { return data.toString(); } } public static AllocateProducerIdsRequest parse(ByteBuffer buffer, short version) { return new AllocateProducerIdsRequest(new AllocateProducerIdsRequestData( new ByteBufferAccessor(buffer), version), version); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.message.AllocateProducerIdsResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; public class AllocateProducerIdsResponse extends AbstractResponse { private final AllocateProducerIdsResponseData data; public AllocateProducerIdsResponse(AllocateProducerIdsResponseData data) { super(ApiKeys.ALLOCATE_PRODUCER_IDS); this.data = data; } @Override public AllocateProducerIdsResponseData data() { return data; } /** * The number of each type of error in the response, including {@link Errors#NONE} and top-level errors as well as * more specifically scoped errors (such as topic or partition-level errors). * * @return A count of errors. */ @Override public Map<Errors, Integer> errorCounts() { return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); } @Override public int throttleTimeMs() { return data.throttleTimeMs(); } @Override public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } public Errors error() { return Errors.forCode(data.errorCode()); } public static AllocateProducerIdsResponse parse(ByteBuffer buffer, short version) { return new AllocateProducerIdsResponse(new AllocateProducerIdsResponseData( new ByteBufferAccessor(buffer), version)); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterClientQuotasRequest.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.requests; import org.apache.kafka.common.message.AlterClientQuotasRequestData; import org.apache.kafka.common.message.AlterClientQuotasRequestData.EntityData; import org.apache.kafka.common.message.AlterClientQuotasRequestData.EntryData; import org.apache.kafka.common.message.AlterClientQuotasRequestData.OpData; import org.apache.kafka.common.message.AlterClientQuotasResponseData; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.quota.ClientQuotaAlteration; import org.apache.kafka.common.quota.ClientQuotaEntity; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; public class AlterClientQuotasRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder<AlterClientQuotasRequest> { private final AlterClientQuotasRequestData data; public Builder(Collection<ClientQuotaAlteration> entries, boolean validateOnly) { super(ApiKeys.ALTER_CLIENT_QUOTAS); List<EntryData> entryData = new ArrayList<>(entries.size()); for (ClientQuotaAlteration entry : entries) { List<EntityData> entityData = new ArrayList<>(entry.entity().entries().size()); for (Map.Entry<String, String> entityEntries : entry.entity().entries().entrySet()) { entityData.add(new EntityData() .setEntityType(entityEntries.getKey()) .setEntityName(entityEntries.getValue())); } List<OpData> opData = new ArrayList<>(entry.ops().size()); for (ClientQuotaAlteration.Op op : entry.ops()) { opData.add(new OpData() .setKey(op.key()) .setValue(op.value() == null ? 0.0 : op.value()) .setRemove(op.value() == null)); } entryData.add(new EntryData() .setEntity(entityData) .setOps(opData)); } this.data = new AlterClientQuotasRequestData() .setEntries(entryData) .setValidateOnly(validateOnly); } @Override public AlterClientQuotasRequest build(short version) { return new AlterClientQuotasRequest(data, version); } @Override public String toString() { return data.toString(); } } private final AlterClientQuotasRequestData data; public AlterClientQuotasRequest(AlterClientQuotasRequestData data, short version) { super(ApiKeys.ALTER_CLIENT_QUOTAS, version); this.data = data; } public List<ClientQuotaAlteration> entries() { List<ClientQuotaAlteration> entries = new ArrayList<>(data.entries().size()); for (EntryData entryData : data.entries()) { Map<String, String> entity = new HashMap<>(entryData.entity().size()); for (EntityData entityData : entryData.entity()) { entity.put(entityData.entityType(), entityData.entityName()); } List<ClientQuotaAlteration.Op> ops = new ArrayList<>(entryData.ops().size()); for (OpData opData : entryData.ops()) { Double value = opData.remove() ? null : opData.value(); ops.add(new ClientQuotaAlteration.Op(opData.key(), value)); } entries.add(new ClientQuotaAlteration(new ClientQuotaEntity(entity), ops)); } return entries; } public boolean validateOnly() { return data.validateOnly(); } @Override public AlterClientQuotasRequestData data() { return data; } @Override public AlterClientQuotasResponse getErrorResponse(int throttleTimeMs, Throwable e) { Errors error = Errors.forException(e); List<AlterClientQuotasResponseData.EntryData> responseEntries = new ArrayList<>(); for (EntryData entryData : data.entries()) { List<AlterClientQuotasResponseData.EntityData> responseEntities = new ArrayList<>(); for (EntityData entityData : entryData.entity()) { responseEntities.add(new AlterClientQuotasResponseData.EntityData() .setEntityType(entityData.entityType()) .setEntityName(entityData.entityName())); } responseEntries.add(new AlterClientQuotasResponseData.EntryData() .setEntity(responseEntities) .setErrorCode(error.code()) .setErrorMessage(error.message())); } AlterClientQuotasResponseData responseData = new AlterClientQuotasResponseData() .setThrottleTimeMs(throttleTimeMs) .setEntries(responseEntries); return new AlterClientQuotasResponse(responseData); } public static AlterClientQuotasRequest parse(ByteBuffer buffer, short version) { return new AlterClientQuotasRequest(new AlterClientQuotasRequestData(new ByteBufferAccessor(buffer), version), version); } }