index int64 | repo_id string | file_path string | content string |
|---|---|---|---|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SaslAuthenticateRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.BinaryNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import java.util.Arrays;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.SaslAuthenticateRequestData.*;
public class SaslAuthenticateRequestDataJsonConverter {
public static SaslAuthenticateRequestData read(JsonNode _node, short _version) {
SaslAuthenticateRequestData _object = new SaslAuthenticateRequestData();
JsonNode _authBytesNode = _node.get("authBytes");
if (_authBytesNode == null) {
throw new RuntimeException("SaslAuthenticateRequestData: unable to locate field 'authBytes', which is mandatory in version " + _version);
} else {
_object.authBytes = MessageUtil.jsonNodeToBinary(_authBytesNode, "SaslAuthenticateRequestData");
}
return _object;
}
public static JsonNode write(SaslAuthenticateRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("authBytes", new BinaryNode(Arrays.copyOf(_object.authBytes, _object.authBytes.length)));
return _node;
}
public static JsonNode write(SaslAuthenticateRequestData _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SaslAuthenticateResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.Bytes;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class SaslAuthenticateResponseData implements ApiMessage {
short errorCode;
String errorMessage;
byte[] authBytes;
long sessionLifetimeMs;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
new Field("error_message", Type.NULLABLE_STRING, "The error message, or null if there was no error."),
new Field("auth_bytes", Type.BYTES, "The SASL authentication bytes from the server, as defined by the SASL mechanism.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
new Field("error_message", Type.NULLABLE_STRING, "The error message, or null if there was no error."),
new Field("auth_bytes", Type.BYTES, "The SASL authentication bytes from the server, as defined by the SASL mechanism."),
new Field("session_lifetime_ms", Type.INT64, "The SASL authentication bytes from the server, as defined by the SASL mechanism.")
);
public static final Schema SCHEMA_2 =
new Schema(
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The error message, or null if there was no error."),
new Field("auth_bytes", Type.COMPACT_BYTES, "The SASL authentication bytes from the server, as defined by the SASL mechanism."),
new Field("session_lifetime_ms", Type.INT64, "The SASL authentication bytes from the server, as defined by the SASL mechanism."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 2;
public SaslAuthenticateResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public SaslAuthenticateResponseData() {
this.errorCode = (short) 0;
this.errorMessage = "";
this.authBytes = Bytes.EMPTY;
this.sessionLifetimeMs = 0L;
}
@Override
public short apiKey() {
return 36;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 2;
}
@Override
public void read(Readable _readable, short _version) {
this.errorCode = _readable.readShort();
{
int length;
if (_version >= 2) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
this.errorMessage = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field errorMessage had invalid length " + length);
} else {
this.errorMessage = _readable.readString(length);
}
}
{
int length;
if (_version >= 2) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readInt();
}
if (length < 0) {
throw new RuntimeException("non-nullable field authBytes was serialized as null");
} else {
byte[] newBytes = _readable.readArray(length);
this.authBytes = newBytes;
}
}
if (_version >= 1) {
this.sessionLifetimeMs = _readable.readLong();
} else {
this.sessionLifetimeMs = 0L;
}
this._unknownTaggedFields = null;
if (_version >= 2) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeShort(errorCode);
if (errorMessage == null) {
if (_version >= 2) {
_writable.writeUnsignedVarint(0);
} else {
_writable.writeShort((short) -1);
}
} else {
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
if (_version >= 2) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 2) {
_writable.writeUnsignedVarint(authBytes.length + 1);
} else {
_writable.writeInt(authBytes.length);
}
_writable.writeByteArray(authBytes);
if (_version >= 1) {
_writable.writeLong(sessionLifetimeMs);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 2) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(2);
if (errorMessage == null) {
if (_version >= 2) {
_size.addBytes(1);
} else {
_size.addBytes(2);
}
} else {
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'errorMessage' field is too long to be serialized");
}
_cache.cacheSerializedValue(errorMessage, _stringBytes);
if (_version >= 2) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
{
_size.addBytes(authBytes.length);
if (_version >= 2) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(authBytes.length + 1));
} else {
_size.addBytes(4);
}
}
if (_version >= 1) {
_size.addBytes(8);
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 2) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SaslAuthenticateResponseData)) return false;
SaslAuthenticateResponseData other = (SaslAuthenticateResponseData) obj;
if (errorCode != other.errorCode) return false;
if (this.errorMessage == null) {
if (other.errorMessage != null) return false;
} else {
if (!this.errorMessage.equals(other.errorMessage)) return false;
}
if (!Arrays.equals(this.authBytes, other.authBytes)) return false;
if (sessionLifetimeMs != other.sessionLifetimeMs) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode());
hashCode = 31 * hashCode + Arrays.hashCode(authBytes);
hashCode = 31 * hashCode + ((int) (sessionLifetimeMs >> 32) ^ (int) sessionLifetimeMs);
return hashCode;
}
@Override
public SaslAuthenticateResponseData duplicate() {
SaslAuthenticateResponseData _duplicate = new SaslAuthenticateResponseData();
_duplicate.errorCode = errorCode;
if (errorMessage == null) {
_duplicate.errorMessage = null;
} else {
_duplicate.errorMessage = errorMessage;
}
_duplicate.authBytes = MessageUtil.duplicate(authBytes);
_duplicate.sessionLifetimeMs = sessionLifetimeMs;
return _duplicate;
}
@Override
public String toString() {
return "SaslAuthenticateResponseData("
+ "errorCode=" + errorCode
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
+ ", authBytes=" + Arrays.toString(authBytes)
+ ", sessionLifetimeMs=" + sessionLifetimeMs
+ ")";
}
public short errorCode() {
return this.errorCode;
}
public String errorMessage() {
return this.errorMessage;
}
public byte[] authBytes() {
return this.authBytes;
}
public long sessionLifetimeMs() {
return this.sessionLifetimeMs;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public SaslAuthenticateResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
public SaslAuthenticateResponseData setErrorMessage(String v) {
this.errorMessage = v;
return this;
}
public SaslAuthenticateResponseData setAuthBytes(byte[] v) {
this.authBytes = v;
return this;
}
public SaslAuthenticateResponseData setSessionLifetimeMs(long v) {
this.sessionLifetimeMs = v;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SaslAuthenticateResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.BinaryNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.LongNode;
import com.fasterxml.jackson.databind.node.NullNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.Arrays;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.SaslAuthenticateResponseData.*;
public class SaslAuthenticateResponseDataJsonConverter {
public static SaslAuthenticateResponseData read(JsonNode _node, short _version) {
SaslAuthenticateResponseData _object = new SaslAuthenticateResponseData();
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("SaslAuthenticateResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "SaslAuthenticateResponseData");
}
JsonNode _errorMessageNode = _node.get("errorMessage");
if (_errorMessageNode == null) {
throw new RuntimeException("SaslAuthenticateResponseData: unable to locate field 'errorMessage', which is mandatory in version " + _version);
} else {
if (_errorMessageNode.isNull()) {
_object.errorMessage = null;
} else {
if (!_errorMessageNode.isTextual()) {
throw new RuntimeException("SaslAuthenticateResponseData expected a string type, but got " + _node.getNodeType());
}
_object.errorMessage = _errorMessageNode.asText();
}
}
JsonNode _authBytesNode = _node.get("authBytes");
if (_authBytesNode == null) {
throw new RuntimeException("SaslAuthenticateResponseData: unable to locate field 'authBytes', which is mandatory in version " + _version);
} else {
_object.authBytes = MessageUtil.jsonNodeToBinary(_authBytesNode, "SaslAuthenticateResponseData");
}
JsonNode _sessionLifetimeMsNode = _node.get("sessionLifetimeMs");
if (_sessionLifetimeMsNode == null) {
if (_version >= 1) {
throw new RuntimeException("SaslAuthenticateResponseData: unable to locate field 'sessionLifetimeMs', which is mandatory in version " + _version);
} else {
_object.sessionLifetimeMs = 0L;
}
} else {
_object.sessionLifetimeMs = MessageUtil.jsonNodeToLong(_sessionLifetimeMsNode, "SaslAuthenticateResponseData");
}
return _object;
}
public static JsonNode write(SaslAuthenticateResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("errorCode", new ShortNode(_object.errorCode));
if (_object.errorMessage == null) {
_node.set("errorMessage", NullNode.instance);
} else {
_node.set("errorMessage", new TextNode(_object.errorMessage));
}
_node.set("authBytes", new BinaryNode(Arrays.copyOf(_object.authBytes, _object.authBytes.length)));
if (_version >= 1) {
_node.set("sessionLifetimeMs", new LongNode(_object.sessionLifetimeMs));
}
return _node;
}
public static JsonNode write(SaslAuthenticateResponseData _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SaslHandshakeRequestData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
public class SaslHandshakeRequestData implements ApiMessage {
String mechanism;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("mechanism", Type.STRING, "The SASL mechanism chosen by the client.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public SaslHandshakeRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public SaslHandshakeRequestData() {
this.mechanism = "";
}
@Override
public short apiKey() {
return 17;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field mechanism was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field mechanism had invalid length " + length);
} else {
this.mechanism = _readable.readString(length);
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(mechanism);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = mechanism.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'mechanism' field is too long to be serialized");
}
_cache.cacheSerializedValue(mechanism, _stringBytes);
_size.addBytes(_stringBytes.length + 2);
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SaslHandshakeRequestData)) return false;
SaslHandshakeRequestData other = (SaslHandshakeRequestData) obj;
if (this.mechanism == null) {
if (other.mechanism != null) return false;
} else {
if (!this.mechanism.equals(other.mechanism)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (mechanism == null ? 0 : mechanism.hashCode());
return hashCode;
}
@Override
public SaslHandshakeRequestData duplicate() {
SaslHandshakeRequestData _duplicate = new SaslHandshakeRequestData();
_duplicate.mechanism = mechanism;
return _duplicate;
}
@Override
public String toString() {
return "SaslHandshakeRequestData("
+ "mechanism=" + ((mechanism == null) ? "null" : "'" + mechanism.toString() + "'")
+ ")";
}
public String mechanism() {
return this.mechanism;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public SaslHandshakeRequestData setMechanism(String v) {
this.mechanism = v;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SaslHandshakeRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import static org.apache.kafka.common.message.SaslHandshakeRequestData.*;
public class SaslHandshakeRequestDataJsonConverter {
public static SaslHandshakeRequestData read(JsonNode _node, short _version) {
SaslHandshakeRequestData _object = new SaslHandshakeRequestData();
JsonNode _mechanismNode = _node.get("mechanism");
if (_mechanismNode == null) {
throw new RuntimeException("SaslHandshakeRequestData: unable to locate field 'mechanism', which is mandatory in version " + _version);
} else {
if (!_mechanismNode.isTextual()) {
throw new RuntimeException("SaslHandshakeRequestData expected a string type, but got " + _node.getNodeType());
}
_object.mechanism = _mechanismNode.asText();
}
return _object;
}
public static JsonNode write(SaslHandshakeRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("mechanism", new TextNode(_object.mechanism));
return _node;
}
public static JsonNode write(SaslHandshakeRequestData _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SaslHandshakeResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
public class SaslHandshakeResponseData implements ApiMessage {
short errorCode;
List<String> mechanisms;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
new Field("mechanisms", new ArrayOf(Type.STRING), "The mechanisms enabled in the server.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public SaslHandshakeResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public SaslHandshakeResponseData() {
this.errorCode = (short) 0;
this.mechanisms = new ArrayList<String>(0);
}
@Override
public short apiKey() {
return 17;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
this.errorCode = _readable.readShort();
{
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field mechanisms was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<String> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field mechanisms element was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field mechanisms element had invalid length " + length);
} else {
newCollection.add(_readable.readString(length));
}
}
this.mechanisms = newCollection;
}
}
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeShort(errorCode);
_writable.writeInt(mechanisms.size());
for (String mechanismsElement : mechanisms) {
{
byte[] _stringBytes = _cache.getSerializedValue(mechanismsElement);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(2);
{
_size.addBytes(4);
for (String mechanismsElement : mechanisms) {
byte[] _stringBytes = mechanismsElement.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'mechanismsElement' field is too long to be serialized");
}
_cache.cacheSerializedValue(mechanismsElement, _stringBytes);
_size.addBytes(_stringBytes.length + 2);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SaslHandshakeResponseData)) return false;
SaslHandshakeResponseData other = (SaslHandshakeResponseData) obj;
if (errorCode != other.errorCode) return false;
if (this.mechanisms == null) {
if (other.mechanisms != null) return false;
} else {
if (!this.mechanisms.equals(other.mechanisms)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (mechanisms == null ? 0 : mechanisms.hashCode());
return hashCode;
}
@Override
public SaslHandshakeResponseData duplicate() {
SaslHandshakeResponseData _duplicate = new SaslHandshakeResponseData();
_duplicate.errorCode = errorCode;
ArrayList<String> newMechanisms = new ArrayList<String>(mechanisms.size());
for (String _element : mechanisms) {
newMechanisms.add(_element);
}
_duplicate.mechanisms = newMechanisms;
return _duplicate;
}
@Override
public String toString() {
return "SaslHandshakeResponseData("
+ "errorCode=" + errorCode
+ ", mechanisms=" + MessageUtil.deepToString(mechanisms.iterator())
+ ")";
}
public short errorCode() {
return this.errorCode;
}
public List<String> mechanisms() {
return this.mechanisms;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public SaslHandshakeResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
public SaslHandshakeResponseData setMechanisms(List<String> v) {
this.mechanisms = v;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SaslHandshakeResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.SaslHandshakeResponseData.*;
public class SaslHandshakeResponseDataJsonConverter {
public static SaslHandshakeResponseData read(JsonNode _node, short _version) {
SaslHandshakeResponseData _object = new SaslHandshakeResponseData();
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("SaslHandshakeResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "SaslHandshakeResponseData");
}
JsonNode _mechanismsNode = _node.get("mechanisms");
if (_mechanismsNode == null) {
throw new RuntimeException("SaslHandshakeResponseData: unable to locate field 'mechanisms', which is mandatory in version " + _version);
} else {
if (!_mechanismsNode.isArray()) {
throw new RuntimeException("SaslHandshakeResponseData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<String> _collection = new ArrayList<String>(_mechanismsNode.size());
_object.mechanisms = _collection;
for (JsonNode _element : _mechanismsNode) {
if (!_element.isTextual()) {
throw new RuntimeException("SaslHandshakeResponseData element expected a string type, but got " + _node.getNodeType());
}
_collection.add(_element.asText());
}
}
return _object;
}
public static JsonNode write(SaslHandshakeResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("errorCode", new ShortNode(_object.errorCode));
ArrayNode _mechanismsArray = new ArrayNode(JsonNodeFactory.instance);
for (String _element : _object.mechanisms) {
_mechanismsArray.add(new TextNode(_element));
}
_node.set("mechanisms", _mechanismsArray);
return _node;
}
public static JsonNode write(SaslHandshakeResponseData _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SnapshotFooterRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class SnapshotFooterRecord implements ApiMessage {
short version;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("version", Type.INT16, "The version of the snapshot footer record"),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public SnapshotFooterRecord(Readable _readable, short _version) {
read(_readable, _version);
}
public SnapshotFooterRecord() {
this.version = (short) 0;
}
@Override
public short apiKey() {
return -1;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
this.version = _readable.readShort();
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeShort(version);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(2);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SnapshotFooterRecord)) return false;
SnapshotFooterRecord other = (SnapshotFooterRecord) obj;
if (version != other.version) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + version;
return hashCode;
}
@Override
public SnapshotFooterRecord duplicate() {
SnapshotFooterRecord _duplicate = new SnapshotFooterRecord();
_duplicate.version = version;
return _duplicate;
}
@Override
public String toString() {
return "SnapshotFooterRecord("
+ "version=" + version
+ ")";
}
public short version() {
return this.version;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public SnapshotFooterRecord setVersion(short v) {
this.version = v;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SnapshotFooterRecordJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.SnapshotFooterRecord.*;
public class SnapshotFooterRecordJsonConverter {
public static SnapshotFooterRecord read(JsonNode _node, short _version) {
SnapshotFooterRecord _object = new SnapshotFooterRecord();
JsonNode _versionNode = _node.get("version");
if (_versionNode == null) {
throw new RuntimeException("SnapshotFooterRecord: unable to locate field 'version', which is mandatory in version " + _version);
} else {
_object.version = MessageUtil.jsonNodeToShort(_versionNode, "SnapshotFooterRecord");
}
return _object;
}
public static JsonNode write(SnapshotFooterRecord _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("version", new ShortNode(_object.version));
return _node;
}
public static JsonNode write(SnapshotFooterRecord _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SnapshotHeaderRecord.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class SnapshotHeaderRecord implements ApiMessage {
short version;
long lastContainedLogTimestamp;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("version", Type.INT16, "The version of the snapshot header record"),
new Field("last_contained_log_timestamp", Type.INT64, "The append time of the last record from the log contained in this snapshot"),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public SnapshotHeaderRecord(Readable _readable, short _version) {
read(_readable, _version);
}
public SnapshotHeaderRecord() {
this.version = (short) 0;
this.lastContainedLogTimestamp = 0L;
}
@Override
public short apiKey() {
return -1;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
this.version = _readable.readShort();
this.lastContainedLogTimestamp = _readable.readLong();
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeShort(version);
_writable.writeLong(lastContainedLogTimestamp);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(2);
_size.addBytes(8);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SnapshotHeaderRecord)) return false;
SnapshotHeaderRecord other = (SnapshotHeaderRecord) obj;
if (version != other.version) return false;
if (lastContainedLogTimestamp != other.lastContainedLogTimestamp) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + version;
hashCode = 31 * hashCode + ((int) (lastContainedLogTimestamp >> 32) ^ (int) lastContainedLogTimestamp);
return hashCode;
}
@Override
public SnapshotHeaderRecord duplicate() {
SnapshotHeaderRecord _duplicate = new SnapshotHeaderRecord();
_duplicate.version = version;
_duplicate.lastContainedLogTimestamp = lastContainedLogTimestamp;
return _duplicate;
}
@Override
public String toString() {
return "SnapshotHeaderRecord("
+ "version=" + version
+ ", lastContainedLogTimestamp=" + lastContainedLogTimestamp
+ ")";
}
public short version() {
return this.version;
}
public long lastContainedLogTimestamp() {
return this.lastContainedLogTimestamp;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public SnapshotHeaderRecord setVersion(short v) {
this.version = v;
return this;
}
public SnapshotHeaderRecord setLastContainedLogTimestamp(long v) {
this.lastContainedLogTimestamp = v;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SnapshotHeaderRecordJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.LongNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.SnapshotHeaderRecord.*;
public class SnapshotHeaderRecordJsonConverter {
public static SnapshotHeaderRecord read(JsonNode _node, short _version) {
SnapshotHeaderRecord _object = new SnapshotHeaderRecord();
JsonNode _versionNode = _node.get("version");
if (_versionNode == null) {
throw new RuntimeException("SnapshotHeaderRecord: unable to locate field 'version', which is mandatory in version " + _version);
} else {
_object.version = MessageUtil.jsonNodeToShort(_versionNode, "SnapshotHeaderRecord");
}
JsonNode _lastContainedLogTimestampNode = _node.get("lastContainedLogTimestamp");
if (_lastContainedLogTimestampNode == null) {
throw new RuntimeException("SnapshotHeaderRecord: unable to locate field 'lastContainedLogTimestamp', which is mandatory in version " + _version);
} else {
_object.lastContainedLogTimestamp = MessageUtil.jsonNodeToLong(_lastContainedLogTimestampNode, "SnapshotHeaderRecord");
}
return _object;
}
public static JsonNode write(SnapshotHeaderRecord _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("version", new ShortNode(_object.version));
_node.set("lastContainedLogTimestamp", new LongNode(_object.lastContainedLogTimestamp));
return _node;
}
public static JsonNode write(SnapshotHeaderRecord _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/StopReplicaRequestData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class StopReplicaRequestData implements ApiMessage {
int controllerId;
boolean isKRaftController;
int controllerEpoch;
long brokerEpoch;
boolean deletePartitions;
List<StopReplicaPartitionV0> ungroupedPartitions;
List<StopReplicaTopicV1> topics;
List<StopReplicaTopicState> topicStates;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("delete_partitions", Type.BOOLEAN, "Whether these partitions should be deleted."),
new Field("ungrouped_partitions", new ArrayOf(StopReplicaPartitionV0.SCHEMA_0), "The partitions to stop.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("broker_epoch", Type.INT64, "The broker epoch."),
new Field("delete_partitions", Type.BOOLEAN, "Whether these partitions should be deleted."),
new Field("topics", new ArrayOf(StopReplicaTopicV1.SCHEMA_1), "The topics to stop.")
);
public static final Schema SCHEMA_2 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("broker_epoch", Type.INT64, "The broker epoch."),
new Field("delete_partitions", Type.BOOLEAN, "Whether these partitions should be deleted."),
new Field("topics", new CompactArrayOf(StopReplicaTopicV1.SCHEMA_2), "The topics to stop."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_3 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("broker_epoch", Type.INT64, "The broker epoch."),
new Field("topic_states", new CompactArrayOf(StopReplicaTopicState.SCHEMA_3), "Each topic."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_4 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("is_kraft_controller", Type.BOOLEAN, "If KRaft controller id is used during migration. See KIP-866"),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("broker_epoch", Type.INT64, "The broker epoch."),
new Field("topic_states", new CompactArrayOf(StopReplicaTopicState.SCHEMA_3), "Each topic."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 4;
public StopReplicaRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public StopReplicaRequestData() {
this.controllerId = 0;
this.isKRaftController = false;
this.controllerEpoch = 0;
this.brokerEpoch = -1L;
this.deletePartitions = false;
this.ungroupedPartitions = new ArrayList<StopReplicaPartitionV0>(0);
this.topics = new ArrayList<StopReplicaTopicV1>(0);
this.topicStates = new ArrayList<StopReplicaTopicState>(0);
}
@Override
public short apiKey() {
return 5;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 4;
}
@Override
public void read(Readable _readable, short _version) {
this.controllerId = _readable.readInt();
if (_version >= 4) {
this.isKRaftController = _readable.readByte() != 0;
} else {
this.isKRaftController = false;
}
this.controllerEpoch = _readable.readInt();
if (_version >= 1) {
this.brokerEpoch = _readable.readLong();
} else {
this.brokerEpoch = -1L;
}
if (_version <= 2) {
this.deletePartitions = _readable.readByte() != 0;
} else {
this.deletePartitions = false;
}
if (_version <= 0) {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field ungroupedPartitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<StopReplicaPartitionV0> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new StopReplicaPartitionV0(_readable, _version));
}
this.ungroupedPartitions = newCollection;
}
} else {
this.ungroupedPartitions = new ArrayList<StopReplicaPartitionV0>(0);
}
if ((_version >= 1) && (_version <= 2)) {
if (_version >= 2) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<StopReplicaTopicV1> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new StopReplicaTopicV1(_readable, _version));
}
this.topics = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<StopReplicaTopicV1> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new StopReplicaTopicV1(_readable, _version));
}
this.topics = newCollection;
}
}
} else {
this.topics = new ArrayList<StopReplicaTopicV1>(0);
}
if (_version >= 3) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topicStates was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<StopReplicaTopicState> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new StopReplicaTopicState(_readable, _version));
}
this.topicStates = newCollection;
}
} else {
this.topicStates = new ArrayList<StopReplicaTopicState>(0);
}
this._unknownTaggedFields = null;
if (_version >= 2) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(controllerId);
if (_version >= 4) {
_writable.writeByte(isKRaftController ? (byte) 1 : (byte) 0);
} else {
if (this.isKRaftController) {
throw new UnsupportedVersionException("Attempted to write a non-default isKRaftController at version " + _version);
}
}
_writable.writeInt(controllerEpoch);
if (_version >= 1) {
_writable.writeLong(brokerEpoch);
}
if (_version <= 2) {
_writable.writeByte(deletePartitions ? (byte) 1 : (byte) 0);
} else {
if (this.deletePartitions) {
throw new UnsupportedVersionException("Attempted to write a non-default deletePartitions at version " + _version);
}
}
if (_version <= 0) {
_writable.writeInt(ungroupedPartitions.size());
for (StopReplicaPartitionV0 ungroupedPartitionsElement : ungroupedPartitions) {
ungroupedPartitionsElement.write(_writable, _cache, _version);
}
} else {
if (!this.ungroupedPartitions.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default ungroupedPartitions at version " + _version);
}
}
if ((_version >= 1) && (_version <= 2)) {
if (_version >= 2) {
_writable.writeUnsignedVarint(topics.size() + 1);
for (StopReplicaTopicV1 topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(topics.size());
for (StopReplicaTopicV1 topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
}
} else {
if (!this.topics.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default topics at version " + _version);
}
}
if (_version >= 3) {
_writable.writeUnsignedVarint(topicStates.size() + 1);
for (StopReplicaTopicState topicStatesElement : topicStates) {
topicStatesElement.write(_writable, _cache, _version);
}
} else {
if (!this.topicStates.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default topicStates at version " + _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 2) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(4);
if (_version >= 4) {
_size.addBytes(1);
}
_size.addBytes(4);
if (_version >= 1) {
_size.addBytes(8);
}
if (_version <= 2) {
_size.addBytes(1);
}
if (_version <= 0) {
{
_size.addBytes(4);
for (StopReplicaPartitionV0 ungroupedPartitionsElement : ungroupedPartitions) {
ungroupedPartitionsElement.addSize(_size, _cache, _version);
}
}
}
if ((_version >= 1) && (_version <= 2)) {
{
if (_version >= 2) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1));
} else {
_size.addBytes(4);
}
for (StopReplicaTopicV1 topicsElement : topics) {
topicsElement.addSize(_size, _cache, _version);
}
}
}
if (_version >= 3) {
{
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(topicStates.size() + 1));
for (StopReplicaTopicState topicStatesElement : topicStates) {
topicStatesElement.addSize(_size, _cache, _version);
}
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 2) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof StopReplicaRequestData)) return false;
StopReplicaRequestData other = (StopReplicaRequestData) obj;
if (controllerId != other.controllerId) return false;
if (isKRaftController != other.isKRaftController) return false;
if (controllerEpoch != other.controllerEpoch) return false;
if (brokerEpoch != other.brokerEpoch) return false;
if (deletePartitions != other.deletePartitions) return false;
if (this.ungroupedPartitions == null) {
if (other.ungroupedPartitions != null) return false;
} else {
if (!this.ungroupedPartitions.equals(other.ungroupedPartitions)) return false;
}
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
if (this.topicStates == null) {
if (other.topicStates != null) return false;
} else {
if (!this.topicStates.equals(other.topicStates)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + controllerId;
hashCode = 31 * hashCode + (isKRaftController ? 1231 : 1237);
hashCode = 31 * hashCode + controllerEpoch;
hashCode = 31 * hashCode + ((int) (brokerEpoch >> 32) ^ (int) brokerEpoch);
hashCode = 31 * hashCode + (deletePartitions ? 1231 : 1237);
hashCode = 31 * hashCode + (ungroupedPartitions == null ? 0 : ungroupedPartitions.hashCode());
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
hashCode = 31 * hashCode + (topicStates == null ? 0 : topicStates.hashCode());
return hashCode;
}
@Override
public StopReplicaRequestData duplicate() {
StopReplicaRequestData _duplicate = new StopReplicaRequestData();
_duplicate.controllerId = controllerId;
_duplicate.isKRaftController = isKRaftController;
_duplicate.controllerEpoch = controllerEpoch;
_duplicate.brokerEpoch = brokerEpoch;
_duplicate.deletePartitions = deletePartitions;
ArrayList<StopReplicaPartitionV0> newUngroupedPartitions = new ArrayList<StopReplicaPartitionV0>(ungroupedPartitions.size());
for (StopReplicaPartitionV0 _element : ungroupedPartitions) {
newUngroupedPartitions.add(_element.duplicate());
}
_duplicate.ungroupedPartitions = newUngroupedPartitions;
ArrayList<StopReplicaTopicV1> newTopics = new ArrayList<StopReplicaTopicV1>(topics.size());
for (StopReplicaTopicV1 _element : topics) {
newTopics.add(_element.duplicate());
}
_duplicate.topics = newTopics;
ArrayList<StopReplicaTopicState> newTopicStates = new ArrayList<StopReplicaTopicState>(topicStates.size());
for (StopReplicaTopicState _element : topicStates) {
newTopicStates.add(_element.duplicate());
}
_duplicate.topicStates = newTopicStates;
return _duplicate;
}
@Override
public String toString() {
return "StopReplicaRequestData("
+ "controllerId=" + controllerId
+ ", isKRaftController=" + (isKRaftController ? "true" : "false")
+ ", controllerEpoch=" + controllerEpoch
+ ", brokerEpoch=" + brokerEpoch
+ ", deletePartitions=" + (deletePartitions ? "true" : "false")
+ ", ungroupedPartitions=" + MessageUtil.deepToString(ungroupedPartitions.iterator())
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ", topicStates=" + MessageUtil.deepToString(topicStates.iterator())
+ ")";
}
public int controllerId() {
return this.controllerId;
}
public boolean isKRaftController() {
return this.isKRaftController;
}
public int controllerEpoch() {
return this.controllerEpoch;
}
public long brokerEpoch() {
return this.brokerEpoch;
}
public boolean deletePartitions() {
return this.deletePartitions;
}
public List<StopReplicaPartitionV0> ungroupedPartitions() {
return this.ungroupedPartitions;
}
public List<StopReplicaTopicV1> topics() {
return this.topics;
}
public List<StopReplicaTopicState> topicStates() {
return this.topicStates;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public StopReplicaRequestData setControllerId(int v) {
this.controllerId = v;
return this;
}
public StopReplicaRequestData setIsKRaftController(boolean v) {
this.isKRaftController = v;
return this;
}
public StopReplicaRequestData setControllerEpoch(int v) {
this.controllerEpoch = v;
return this;
}
public StopReplicaRequestData setBrokerEpoch(long v) {
this.brokerEpoch = v;
return this;
}
public StopReplicaRequestData setDeletePartitions(boolean v) {
this.deletePartitions = v;
return this;
}
public StopReplicaRequestData setUngroupedPartitions(List<StopReplicaPartitionV0> v) {
this.ungroupedPartitions = v;
return this;
}
public StopReplicaRequestData setTopics(List<StopReplicaTopicV1> v) {
this.topics = v;
return this;
}
public StopReplicaRequestData setTopicStates(List<StopReplicaTopicState> v) {
this.topicStates = v;
return this;
}
public static class StopReplicaPartitionV0 implements Message {
String topicName;
int partitionIndex;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("topic_name", Type.STRING, "The topic name."),
new Field("partition_index", Type.INT32, "The partition index.")
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public StopReplicaPartitionV0(Readable _readable, short _version) {
read(_readable, _version);
}
public StopReplicaPartitionV0() {
this.topicName = "";
this.partitionIndex = 0;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 4;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field topicName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field topicName had invalid length " + length);
} else {
this.topicName = _readable.readString(length);
}
}
this.partitionIndex = _readable.readInt();
this._unknownTaggedFields = null;
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of StopReplicaPartitionV0");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(topicName);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(partitionIndex);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'topicName' field is too long to be serialized");
}
_cache.cacheSerializedValue(topicName, _stringBytes);
_size.addBytes(_stringBytes.length + 2);
}
_size.addBytes(4);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof StopReplicaPartitionV0)) return false;
StopReplicaPartitionV0 other = (StopReplicaPartitionV0) obj;
if (this.topicName == null) {
if (other.topicName != null) return false;
} else {
if (!this.topicName.equals(other.topicName)) return false;
}
if (partitionIndex != other.partitionIndex) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode());
hashCode = 31 * hashCode + partitionIndex;
return hashCode;
}
@Override
public StopReplicaPartitionV0 duplicate() {
StopReplicaPartitionV0 _duplicate = new StopReplicaPartitionV0();
_duplicate.topicName = topicName;
_duplicate.partitionIndex = partitionIndex;
return _duplicate;
}
@Override
public String toString() {
return "StopReplicaPartitionV0("
+ "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'")
+ ", partitionIndex=" + partitionIndex
+ ")";
}
public String topicName() {
return this.topicName;
}
public int partitionIndex() {
return this.partitionIndex;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public StopReplicaPartitionV0 setTopicName(String v) {
this.topicName = v;
return this;
}
public StopReplicaPartitionV0 setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
}
public static class StopReplicaTopicV1 implements Message {
String name;
List<Integer> partitionIndexes;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_1 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("partition_indexes", new ArrayOf(Type.INT32), "The partition indexes.")
);
public static final Schema SCHEMA_2 =
new Schema(
new Field("name", Type.COMPACT_STRING, "The topic name."),
new Field("partition_indexes", new CompactArrayOf(Type.INT32), "The partition indexes."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
null,
SCHEMA_1,
SCHEMA_2
};
public static final short LOWEST_SUPPORTED_VERSION = 1;
public static final short HIGHEST_SUPPORTED_VERSION = 2;
public StopReplicaTopicV1(Readable _readable, short _version) {
read(_readable, _version);
}
public StopReplicaTopicV1() {
this.name = "";
this.partitionIndexes = new ArrayList<Integer>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 4;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
if (_version >= 2) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int arrayLength;
if (_version >= 2) {
arrayLength = _readable.readUnsignedVarint() - 1;
} else {
arrayLength = _readable.readInt();
}
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitionIndexes was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<Integer> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(_readable.readInt());
}
this.partitionIndexes = newCollection;
}
}
this._unknownTaggedFields = null;
if (_version >= 2) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if ((_version < 1) || (_version > 2)) {
throw new UnsupportedVersionException("Can't write version " + _version + " of StopReplicaTopicV1");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
if (_version >= 2) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 2) {
_writable.writeUnsignedVarint(partitionIndexes.size() + 1);
} else {
_writable.writeInt(partitionIndexes.size());
}
for (Integer partitionIndexesElement : partitionIndexes) {
_writable.writeInt(partitionIndexesElement);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 2) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
if (_version >= 2) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
{
if (_version >= 2) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionIndexes.size() + 1));
} else {
_size.addBytes(4);
}
_size.addBytes(partitionIndexes.size() * 4);
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 2) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof StopReplicaTopicV1)) return false;
StopReplicaTopicV1 other = (StopReplicaTopicV1) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
if (this.partitionIndexes == null) {
if (other.partitionIndexes != null) return false;
} else {
if (!this.partitionIndexes.equals(other.partitionIndexes)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
hashCode = 31 * hashCode + (partitionIndexes == null ? 0 : partitionIndexes.hashCode());
return hashCode;
}
@Override
public StopReplicaTopicV1 duplicate() {
StopReplicaTopicV1 _duplicate = new StopReplicaTopicV1();
_duplicate.name = name;
ArrayList<Integer> newPartitionIndexes = new ArrayList<Integer>(partitionIndexes.size());
for (Integer _element : partitionIndexes) {
newPartitionIndexes.add(_element);
}
_duplicate.partitionIndexes = newPartitionIndexes;
return _duplicate;
}
@Override
public String toString() {
return "StopReplicaTopicV1("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitionIndexes=" + MessageUtil.deepToString(partitionIndexes.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List<Integer> partitionIndexes() {
return this.partitionIndexes;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public StopReplicaTopicV1 setName(String v) {
this.name = v;
return this;
}
public StopReplicaTopicV1 setPartitionIndexes(List<Integer> v) {
this.partitionIndexes = v;
return this;
}
}
public static class StopReplicaTopicState implements Message {
String topicName;
List<StopReplicaPartitionState> partitionStates;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_3 =
new Schema(
new Field("topic_name", Type.COMPACT_STRING, "The topic name."),
new Field("partition_states", new CompactArrayOf(StopReplicaPartitionState.SCHEMA_3), "The state of each partition"),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_4 = SCHEMA_3;
public static final Schema[] SCHEMAS = new Schema[] {
null,
null,
null,
SCHEMA_3,
SCHEMA_4
};
public static final short LOWEST_SUPPORTED_VERSION = 3;
public static final short HIGHEST_SUPPORTED_VERSION = 4;
public StopReplicaTopicState(Readable _readable, short _version) {
read(_readable, _version);
}
public StopReplicaTopicState() {
this.topicName = "";
this.partitionStates = new ArrayList<StopReplicaPartitionState>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 4;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 4) {
throw new UnsupportedVersionException("Can't read version " + _version + " of StopReplicaTopicState");
}
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field topicName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field topicName had invalid length " + length);
} else {
this.topicName = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitionStates was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<StopReplicaPartitionState> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new StopReplicaPartitionState(_readable, _version));
}
this.partitionStates = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version < 3) {
throw new UnsupportedVersionException("Can't write version " + _version + " of StopReplicaTopicState");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(topicName);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeUnsignedVarint(partitionStates.size() + 1);
for (StopReplicaPartitionState partitionStatesElement : partitionStates) {
partitionStatesElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 4) {
throw new UnsupportedVersionException("Can't size version " + _version + " of StopReplicaTopicState");
}
{
byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'topicName' field is too long to be serialized");
}
_cache.cacheSerializedValue(topicName, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
{
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionStates.size() + 1));
for (StopReplicaPartitionState partitionStatesElement : partitionStates) {
partitionStatesElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof StopReplicaTopicState)) return false;
StopReplicaTopicState other = (StopReplicaTopicState) obj;
if (this.topicName == null) {
if (other.topicName != null) return false;
} else {
if (!this.topicName.equals(other.topicName)) return false;
}
if (this.partitionStates == null) {
if (other.partitionStates != null) return false;
} else {
if (!this.partitionStates.equals(other.partitionStates)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode());
hashCode = 31 * hashCode + (partitionStates == null ? 0 : partitionStates.hashCode());
return hashCode;
}
@Override
public StopReplicaTopicState duplicate() {
StopReplicaTopicState _duplicate = new StopReplicaTopicState();
_duplicate.topicName = topicName;
ArrayList<StopReplicaPartitionState> newPartitionStates = new ArrayList<StopReplicaPartitionState>(partitionStates.size());
for (StopReplicaPartitionState _element : partitionStates) {
newPartitionStates.add(_element.duplicate());
}
_duplicate.partitionStates = newPartitionStates;
return _duplicate;
}
@Override
public String toString() {
return "StopReplicaTopicState("
+ "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'")
+ ", partitionStates=" + MessageUtil.deepToString(partitionStates.iterator())
+ ")";
}
public String topicName() {
return this.topicName;
}
public List<StopReplicaPartitionState> partitionStates() {
return this.partitionStates;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public StopReplicaTopicState setTopicName(String v) {
this.topicName = v;
return this;
}
public StopReplicaTopicState setPartitionStates(List<StopReplicaPartitionState> v) {
this.partitionStates = v;
return this;
}
}
public static class StopReplicaPartitionState implements Message {
int partitionIndex;
int leaderEpoch;
boolean deletePartition;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_3 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("leader_epoch", Type.INT32, "The leader epoch."),
new Field("delete_partition", Type.BOOLEAN, "Whether this partition should be deleted."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_4 = SCHEMA_3;
public static final Schema[] SCHEMAS = new Schema[] {
null,
null,
null,
SCHEMA_3,
SCHEMA_4
};
public static final short LOWEST_SUPPORTED_VERSION = 3;
public static final short HIGHEST_SUPPORTED_VERSION = 4;
public StopReplicaPartitionState(Readable _readable, short _version) {
read(_readable, _version);
}
public StopReplicaPartitionState() {
this.partitionIndex = 0;
this.leaderEpoch = -1;
this.deletePartition = false;
}
@Override
public short lowestSupportedVersion() {
return 3;
}
@Override
public short highestSupportedVersion() {
return 4;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 4) {
throw new UnsupportedVersionException("Can't read version " + _version + " of StopReplicaPartitionState");
}
this.partitionIndex = _readable.readInt();
this.leaderEpoch = _readable.readInt();
this.deletePartition = _readable.readByte() != 0;
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
_writable.writeInt(leaderEpoch);
_writable.writeByte(deletePartition ? (byte) 1 : (byte) 0);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 4) {
throw new UnsupportedVersionException("Can't size version " + _version + " of StopReplicaPartitionState");
}
_size.addBytes(4);
_size.addBytes(4);
_size.addBytes(1);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof StopReplicaPartitionState)) return false;
StopReplicaPartitionState other = (StopReplicaPartitionState) obj;
if (partitionIndex != other.partitionIndex) return false;
if (leaderEpoch != other.leaderEpoch) return false;
if (deletePartition != other.deletePartition) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + leaderEpoch;
hashCode = 31 * hashCode + (deletePartition ? 1231 : 1237);
return hashCode;
}
@Override
public StopReplicaPartitionState duplicate() {
StopReplicaPartitionState _duplicate = new StopReplicaPartitionState();
_duplicate.partitionIndex = partitionIndex;
_duplicate.leaderEpoch = leaderEpoch;
_duplicate.deletePartition = deletePartition;
return _duplicate;
}
@Override
public String toString() {
return "StopReplicaPartitionState("
+ "partitionIndex=" + partitionIndex
+ ", leaderEpoch=" + leaderEpoch
+ ", deletePartition=" + (deletePartition ? "true" : "false")
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public int leaderEpoch() {
return this.leaderEpoch;
}
public boolean deletePartition() {
return this.deletePartition;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public StopReplicaPartitionState setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public StopReplicaPartitionState setLeaderEpoch(int v) {
this.leaderEpoch = v;
return this;
}
public StopReplicaPartitionState setDeletePartition(boolean v) {
this.deletePartition = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/StopReplicaRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.BooleanNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.LongNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.StopReplicaRequestData.*;
public class StopReplicaRequestDataJsonConverter {
public static StopReplicaRequestData read(JsonNode _node, short _version) {
StopReplicaRequestData _object = new StopReplicaRequestData();
JsonNode _controllerIdNode = _node.get("controllerId");
if (_controllerIdNode == null) {
throw new RuntimeException("StopReplicaRequestData: unable to locate field 'controllerId', which is mandatory in version " + _version);
} else {
_object.controllerId = MessageUtil.jsonNodeToInt(_controllerIdNode, "StopReplicaRequestData");
}
JsonNode _isKRaftControllerNode = _node.get("isKRaftController");
if (_isKRaftControllerNode == null) {
if (_version >= 4) {
throw new RuntimeException("StopReplicaRequestData: unable to locate field 'isKRaftController', which is mandatory in version " + _version);
} else {
_object.isKRaftController = false;
}
} else {
if (!_isKRaftControllerNode.isBoolean()) {
throw new RuntimeException("StopReplicaRequestData expected Boolean type, but got " + _node.getNodeType());
}
_object.isKRaftController = _isKRaftControllerNode.asBoolean();
}
JsonNode _controllerEpochNode = _node.get("controllerEpoch");
if (_controllerEpochNode == null) {
throw new RuntimeException("StopReplicaRequestData: unable to locate field 'controllerEpoch', which is mandatory in version " + _version);
} else {
_object.controllerEpoch = MessageUtil.jsonNodeToInt(_controllerEpochNode, "StopReplicaRequestData");
}
JsonNode _brokerEpochNode = _node.get("brokerEpoch");
if (_brokerEpochNode == null) {
if (_version >= 1) {
throw new RuntimeException("StopReplicaRequestData: unable to locate field 'brokerEpoch', which is mandatory in version " + _version);
} else {
_object.brokerEpoch = -1L;
}
} else {
_object.brokerEpoch = MessageUtil.jsonNodeToLong(_brokerEpochNode, "StopReplicaRequestData");
}
JsonNode _deletePartitionsNode = _node.get("deletePartitions");
if (_deletePartitionsNode == null) {
if (_version <= 2) {
throw new RuntimeException("StopReplicaRequestData: unable to locate field 'deletePartitions', which is mandatory in version " + _version);
} else {
_object.deletePartitions = false;
}
} else {
if (!_deletePartitionsNode.isBoolean()) {
throw new RuntimeException("StopReplicaRequestData expected Boolean type, but got " + _node.getNodeType());
}
_object.deletePartitions = _deletePartitionsNode.asBoolean();
}
JsonNode _ungroupedPartitionsNode = _node.get("ungroupedPartitions");
if (_ungroupedPartitionsNode == null) {
if (_version <= 0) {
throw new RuntimeException("StopReplicaRequestData: unable to locate field 'ungroupedPartitions', which is mandatory in version " + _version);
} else {
_object.ungroupedPartitions = new ArrayList<StopReplicaPartitionV0>(0);
}
} else {
if (!_ungroupedPartitionsNode.isArray()) {
throw new RuntimeException("StopReplicaRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<StopReplicaPartitionV0> _collection = new ArrayList<StopReplicaPartitionV0>(_ungroupedPartitionsNode.size());
_object.ungroupedPartitions = _collection;
for (JsonNode _element : _ungroupedPartitionsNode) {
_collection.add(StopReplicaPartitionV0JsonConverter.read(_element, _version));
}
}
JsonNode _topicsNode = _node.get("topics");
if (_topicsNode == null) {
if ((_version >= 1) && (_version <= 2)) {
throw new RuntimeException("StopReplicaRequestData: unable to locate field 'topics', which is mandatory in version " + _version);
} else {
_object.topics = new ArrayList<StopReplicaTopicV1>(0);
}
} else {
if (!_topicsNode.isArray()) {
throw new RuntimeException("StopReplicaRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<StopReplicaTopicV1> _collection = new ArrayList<StopReplicaTopicV1>(_topicsNode.size());
_object.topics = _collection;
for (JsonNode _element : _topicsNode) {
_collection.add(StopReplicaTopicV1JsonConverter.read(_element, _version));
}
}
JsonNode _topicStatesNode = _node.get("topicStates");
if (_topicStatesNode == null) {
if (_version >= 3) {
throw new RuntimeException("StopReplicaRequestData: unable to locate field 'topicStates', which is mandatory in version " + _version);
} else {
_object.topicStates = new ArrayList<StopReplicaTopicState>(0);
}
} else {
if (!_topicStatesNode.isArray()) {
throw new RuntimeException("StopReplicaRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<StopReplicaTopicState> _collection = new ArrayList<StopReplicaTopicState>(_topicStatesNode.size());
_object.topicStates = _collection;
for (JsonNode _element : _topicStatesNode) {
_collection.add(StopReplicaTopicStateJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(StopReplicaRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("controllerId", new IntNode(_object.controllerId));
if (_version >= 4) {
_node.set("isKRaftController", BooleanNode.valueOf(_object.isKRaftController));
} else {
if (_object.isKRaftController) {
throw new UnsupportedVersionException("Attempted to write a non-default isKRaftController at version " + _version);
}
}
_node.set("controllerEpoch", new IntNode(_object.controllerEpoch));
if (_version >= 1) {
_node.set("brokerEpoch", new LongNode(_object.brokerEpoch));
}
if (_version <= 2) {
_node.set("deletePartitions", BooleanNode.valueOf(_object.deletePartitions));
} else {
if (_object.deletePartitions) {
throw new UnsupportedVersionException("Attempted to write a non-default deletePartitions at version " + _version);
}
}
if (_version <= 0) {
ArrayNode _ungroupedPartitionsArray = new ArrayNode(JsonNodeFactory.instance);
for (StopReplicaPartitionV0 _element : _object.ungroupedPartitions) {
_ungroupedPartitionsArray.add(StopReplicaPartitionV0JsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("ungroupedPartitions", _ungroupedPartitionsArray);
} else {
if (!_object.ungroupedPartitions.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default ungroupedPartitions at version " + _version);
}
}
if ((_version >= 1) && (_version <= 2)) {
ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance);
for (StopReplicaTopicV1 _element : _object.topics) {
_topicsArray.add(StopReplicaTopicV1JsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("topics", _topicsArray);
} else {
if (!_object.topics.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default topics at version " + _version);
}
}
if (_version >= 3) {
ArrayNode _topicStatesArray = new ArrayNode(JsonNodeFactory.instance);
for (StopReplicaTopicState _element : _object.topicStates) {
_topicStatesArray.add(StopReplicaTopicStateJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("topicStates", _topicStatesArray);
} else {
if (!_object.topicStates.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default topicStates at version " + _version);
}
}
return _node;
}
public static JsonNode write(StopReplicaRequestData _object, short _version) {
return write(_object, _version, true);
}
public static class StopReplicaPartitionStateJsonConverter {
public static StopReplicaPartitionState read(JsonNode _node, short _version) {
StopReplicaPartitionState _object = new StopReplicaPartitionState();
JsonNode _partitionIndexNode = _node.get("partitionIndex");
if (_partitionIndexNode == null) {
throw new RuntimeException("StopReplicaPartitionState: unable to locate field 'partitionIndex', which is mandatory in version " + _version);
} else {
_object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "StopReplicaPartitionState");
}
JsonNode _leaderEpochNode = _node.get("leaderEpoch");
if (_leaderEpochNode == null) {
throw new RuntimeException("StopReplicaPartitionState: unable to locate field 'leaderEpoch', which is mandatory in version " + _version);
} else {
_object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "StopReplicaPartitionState");
}
JsonNode _deletePartitionNode = _node.get("deletePartition");
if (_deletePartitionNode == null) {
throw new RuntimeException("StopReplicaPartitionState: unable to locate field 'deletePartition', which is mandatory in version " + _version);
} else {
if (!_deletePartitionNode.isBoolean()) {
throw new RuntimeException("StopReplicaPartitionState expected Boolean type, but got " + _node.getNodeType());
}
_object.deletePartition = _deletePartitionNode.asBoolean();
}
return _object;
}
public static JsonNode write(StopReplicaPartitionState _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("partitionIndex", new IntNode(_object.partitionIndex));
_node.set("leaderEpoch", new IntNode(_object.leaderEpoch));
_node.set("deletePartition", BooleanNode.valueOf(_object.deletePartition));
return _node;
}
public static JsonNode write(StopReplicaPartitionState _object, short _version) {
return write(_object, _version, true);
}
}
public static class StopReplicaPartitionV0JsonConverter {
public static StopReplicaPartitionV0 read(JsonNode _node, short _version) {
StopReplicaPartitionV0 _object = new StopReplicaPartitionV0();
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of StopReplicaPartitionV0");
}
JsonNode _topicNameNode = _node.get("topicName");
if (_topicNameNode == null) {
throw new RuntimeException("StopReplicaPartitionV0: unable to locate field 'topicName', which is mandatory in version " + _version);
} else {
if (!_topicNameNode.isTextual()) {
throw new RuntimeException("StopReplicaPartitionV0 expected a string type, but got " + _node.getNodeType());
}
_object.topicName = _topicNameNode.asText();
}
JsonNode _partitionIndexNode = _node.get("partitionIndex");
if (_partitionIndexNode == null) {
throw new RuntimeException("StopReplicaPartitionV0: unable to locate field 'partitionIndex', which is mandatory in version " + _version);
} else {
_object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "StopReplicaPartitionV0");
}
return _object;
}
public static JsonNode write(StopReplicaPartitionV0 _object, short _version, boolean _serializeRecords) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't write version " + _version + " of StopReplicaPartitionV0");
}
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("topicName", new TextNode(_object.topicName));
_node.set("partitionIndex", new IntNode(_object.partitionIndex));
return _node;
}
public static JsonNode write(StopReplicaPartitionV0 _object, short _version) {
return write(_object, _version, true);
}
}
public static class StopReplicaTopicStateJsonConverter {
public static StopReplicaTopicState read(JsonNode _node, short _version) {
StopReplicaTopicState _object = new StopReplicaTopicState();
if (_version < 3) {
throw new UnsupportedVersionException("Can't read version " + _version + " of StopReplicaTopicState");
}
JsonNode _topicNameNode = _node.get("topicName");
if (_topicNameNode == null) {
throw new RuntimeException("StopReplicaTopicState: unable to locate field 'topicName', which is mandatory in version " + _version);
} else {
if (!_topicNameNode.isTextual()) {
throw new RuntimeException("StopReplicaTopicState expected a string type, but got " + _node.getNodeType());
}
_object.topicName = _topicNameNode.asText();
}
JsonNode _partitionStatesNode = _node.get("partitionStates");
if (_partitionStatesNode == null) {
throw new RuntimeException("StopReplicaTopicState: unable to locate field 'partitionStates', which is mandatory in version " + _version);
} else {
if (!_partitionStatesNode.isArray()) {
throw new RuntimeException("StopReplicaTopicState expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<StopReplicaPartitionState> _collection = new ArrayList<StopReplicaPartitionState>(_partitionStatesNode.size());
_object.partitionStates = _collection;
for (JsonNode _element : _partitionStatesNode) {
_collection.add(StopReplicaPartitionStateJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(StopReplicaTopicState _object, short _version, boolean _serializeRecords) {
if (_version < 3) {
throw new UnsupportedVersionException("Can't write version " + _version + " of StopReplicaTopicState");
}
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("topicName", new TextNode(_object.topicName));
ArrayNode _partitionStatesArray = new ArrayNode(JsonNodeFactory.instance);
for (StopReplicaPartitionState _element : _object.partitionStates) {
_partitionStatesArray.add(StopReplicaPartitionStateJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("partitionStates", _partitionStatesArray);
return _node;
}
public static JsonNode write(StopReplicaTopicState _object, short _version) {
return write(_object, _version, true);
}
}
public static class StopReplicaTopicV1JsonConverter {
public static StopReplicaTopicV1 read(JsonNode _node, short _version) {
StopReplicaTopicV1 _object = new StopReplicaTopicV1();
if ((_version < 1) || (_version > 2)) {
throw new UnsupportedVersionException("Can't read version " + _version + " of StopReplicaTopicV1");
}
JsonNode _nameNode = _node.get("name");
if (_nameNode == null) {
throw new RuntimeException("StopReplicaTopicV1: unable to locate field 'name', which is mandatory in version " + _version);
} else {
if (!_nameNode.isTextual()) {
throw new RuntimeException("StopReplicaTopicV1 expected a string type, but got " + _node.getNodeType());
}
_object.name = _nameNode.asText();
}
JsonNode _partitionIndexesNode = _node.get("partitionIndexes");
if (_partitionIndexesNode == null) {
throw new RuntimeException("StopReplicaTopicV1: unable to locate field 'partitionIndexes', which is mandatory in version " + _version);
} else {
if (!_partitionIndexesNode.isArray()) {
throw new RuntimeException("StopReplicaTopicV1 expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<Integer> _collection = new ArrayList<Integer>(_partitionIndexesNode.size());
_object.partitionIndexes = _collection;
for (JsonNode _element : _partitionIndexesNode) {
_collection.add(MessageUtil.jsonNodeToInt(_element, "StopReplicaTopicV1 element"));
}
}
return _object;
}
public static JsonNode write(StopReplicaTopicV1 _object, short _version, boolean _serializeRecords) {
if ((_version < 1) || (_version > 2)) {
throw new UnsupportedVersionException("Can't write version " + _version + " of StopReplicaTopicV1");
}
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("name", new TextNode(_object.name));
ArrayNode _partitionIndexesArray = new ArrayNode(JsonNodeFactory.instance);
for (Integer _element : _object.partitionIndexes) {
_partitionIndexesArray.add(new IntNode(_element));
}
_node.set("partitionIndexes", _partitionIndexesArray);
return _node;
}
public static JsonNode write(StopReplicaTopicV1 _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/StopReplicaResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class StopReplicaResponseData implements ApiMessage {
short errorCode;
List<StopReplicaPartitionError> partitionErrors;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no top-level error."),
new Field("partition_errors", new ArrayOf(StopReplicaPartitionError.SCHEMA_0), "The responses for each partition.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 =
new Schema(
new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no top-level error."),
new Field("partition_errors", new CompactArrayOf(StopReplicaPartitionError.SCHEMA_2), "The responses for each partition."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_3 = SCHEMA_2;
public static final Schema SCHEMA_4 = SCHEMA_3;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 4;
public StopReplicaResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public StopReplicaResponseData() {
this.errorCode = (short) 0;
this.partitionErrors = new ArrayList<StopReplicaPartitionError>(0);
}
@Override
public short apiKey() {
return 5;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 4;
}
@Override
public void read(Readable _readable, short _version) {
this.errorCode = _readable.readShort();
{
if (_version >= 2) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitionErrors was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<StopReplicaPartitionError> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new StopReplicaPartitionError(_readable, _version));
}
this.partitionErrors = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitionErrors was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<StopReplicaPartitionError> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new StopReplicaPartitionError(_readable, _version));
}
this.partitionErrors = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 2) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeShort(errorCode);
if (_version >= 2) {
_writable.writeUnsignedVarint(partitionErrors.size() + 1);
for (StopReplicaPartitionError partitionErrorsElement : partitionErrors) {
partitionErrorsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(partitionErrors.size());
for (StopReplicaPartitionError partitionErrorsElement : partitionErrors) {
partitionErrorsElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 2) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(2);
{
if (_version >= 2) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionErrors.size() + 1));
} else {
_size.addBytes(4);
}
for (StopReplicaPartitionError partitionErrorsElement : partitionErrors) {
partitionErrorsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 2) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof StopReplicaResponseData)) return false;
StopReplicaResponseData other = (StopReplicaResponseData) obj;
if (errorCode != other.errorCode) return false;
if (this.partitionErrors == null) {
if (other.partitionErrors != null) return false;
} else {
if (!this.partitionErrors.equals(other.partitionErrors)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (partitionErrors == null ? 0 : partitionErrors.hashCode());
return hashCode;
}
@Override
public StopReplicaResponseData duplicate() {
StopReplicaResponseData _duplicate = new StopReplicaResponseData();
_duplicate.errorCode = errorCode;
ArrayList<StopReplicaPartitionError> newPartitionErrors = new ArrayList<StopReplicaPartitionError>(partitionErrors.size());
for (StopReplicaPartitionError _element : partitionErrors) {
newPartitionErrors.add(_element.duplicate());
}
_duplicate.partitionErrors = newPartitionErrors;
return _duplicate;
}
@Override
public String toString() {
return "StopReplicaResponseData("
+ "errorCode=" + errorCode
+ ", partitionErrors=" + MessageUtil.deepToString(partitionErrors.iterator())
+ ")";
}
public short errorCode() {
return this.errorCode;
}
public List<StopReplicaPartitionError> partitionErrors() {
return this.partitionErrors;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public StopReplicaResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
public StopReplicaResponseData setPartitionErrors(List<StopReplicaPartitionError> v) {
this.partitionErrors = v;
return this;
}
public static class StopReplicaPartitionError implements Message {
String topicName;
int partitionIndex;
short errorCode;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("topic_name", Type.STRING, "The topic name."),
new Field("partition_index", Type.INT32, "The partition index."),
new Field("error_code", Type.INT16, "The partition error code, or 0 if there was no partition error.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 =
new Schema(
new Field("topic_name", Type.COMPACT_STRING, "The topic name."),
new Field("partition_index", Type.INT32, "The partition index."),
new Field("error_code", Type.INT16, "The partition error code, or 0 if there was no partition error."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_3 = SCHEMA_2;
public static final Schema SCHEMA_4 = SCHEMA_3;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 4;
public StopReplicaPartitionError(Readable _readable, short _version) {
read(_readable, _version);
}
public StopReplicaPartitionError() {
this.topicName = "";
this.partitionIndex = 0;
this.errorCode = (short) 0;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 4;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 4) {
throw new UnsupportedVersionException("Can't read version " + _version + " of StopReplicaPartitionError");
}
{
int length;
if (_version >= 2) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field topicName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field topicName had invalid length " + length);
} else {
this.topicName = _readable.readString(length);
}
}
this.partitionIndex = _readable.readInt();
this.errorCode = _readable.readShort();
this._unknownTaggedFields = null;
if (_version >= 2) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(topicName);
if (_version >= 2) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(partitionIndex);
_writable.writeShort(errorCode);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 2) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 4) {
throw new UnsupportedVersionException("Can't size version " + _version + " of StopReplicaPartitionError");
}
{
byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'topicName' field is too long to be serialized");
}
_cache.cacheSerializedValue(topicName, _stringBytes);
if (_version >= 2) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
_size.addBytes(4);
_size.addBytes(2);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 2) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof StopReplicaPartitionError)) return false;
StopReplicaPartitionError other = (StopReplicaPartitionError) obj;
if (this.topicName == null) {
if (other.topicName != null) return false;
} else {
if (!this.topicName.equals(other.topicName)) return false;
}
if (partitionIndex != other.partitionIndex) return false;
if (errorCode != other.errorCode) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode());
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + errorCode;
return hashCode;
}
@Override
public StopReplicaPartitionError duplicate() {
StopReplicaPartitionError _duplicate = new StopReplicaPartitionError();
_duplicate.topicName = topicName;
_duplicate.partitionIndex = partitionIndex;
_duplicate.errorCode = errorCode;
return _duplicate;
}
@Override
public String toString() {
return "StopReplicaPartitionError("
+ "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'")
+ ", partitionIndex=" + partitionIndex
+ ", errorCode=" + errorCode
+ ")";
}
public String topicName() {
return this.topicName;
}
public int partitionIndex() {
return this.partitionIndex;
}
public short errorCode() {
return this.errorCode;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public StopReplicaPartitionError setTopicName(String v) {
this.topicName = v;
return this;
}
public StopReplicaPartitionError setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public StopReplicaPartitionError setErrorCode(short v) {
this.errorCode = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/StopReplicaResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.StopReplicaResponseData.*;
public class StopReplicaResponseDataJsonConverter {
public static StopReplicaResponseData read(JsonNode _node, short _version) {
StopReplicaResponseData _object = new StopReplicaResponseData();
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("StopReplicaResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "StopReplicaResponseData");
}
JsonNode _partitionErrorsNode = _node.get("partitionErrors");
if (_partitionErrorsNode == null) {
throw new RuntimeException("StopReplicaResponseData: unable to locate field 'partitionErrors', which is mandatory in version " + _version);
} else {
if (!_partitionErrorsNode.isArray()) {
throw new RuntimeException("StopReplicaResponseData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<StopReplicaPartitionError> _collection = new ArrayList<StopReplicaPartitionError>(_partitionErrorsNode.size());
_object.partitionErrors = _collection;
for (JsonNode _element : _partitionErrorsNode) {
_collection.add(StopReplicaPartitionErrorJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(StopReplicaResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("errorCode", new ShortNode(_object.errorCode));
ArrayNode _partitionErrorsArray = new ArrayNode(JsonNodeFactory.instance);
for (StopReplicaPartitionError _element : _object.partitionErrors) {
_partitionErrorsArray.add(StopReplicaPartitionErrorJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("partitionErrors", _partitionErrorsArray);
return _node;
}
public static JsonNode write(StopReplicaResponseData _object, short _version) {
return write(_object, _version, true);
}
public static class StopReplicaPartitionErrorJsonConverter {
public static StopReplicaPartitionError read(JsonNode _node, short _version) {
StopReplicaPartitionError _object = new StopReplicaPartitionError();
JsonNode _topicNameNode = _node.get("topicName");
if (_topicNameNode == null) {
throw new RuntimeException("StopReplicaPartitionError: unable to locate field 'topicName', which is mandatory in version " + _version);
} else {
if (!_topicNameNode.isTextual()) {
throw new RuntimeException("StopReplicaPartitionError expected a string type, but got " + _node.getNodeType());
}
_object.topicName = _topicNameNode.asText();
}
JsonNode _partitionIndexNode = _node.get("partitionIndex");
if (_partitionIndexNode == null) {
throw new RuntimeException("StopReplicaPartitionError: unable to locate field 'partitionIndex', which is mandatory in version " + _version);
} else {
_object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "StopReplicaPartitionError");
}
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("StopReplicaPartitionError: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "StopReplicaPartitionError");
}
return _object;
}
public static JsonNode write(StopReplicaPartitionError _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("topicName", new TextNode(_object.topicName));
_node.set("partitionIndex", new IntNode(_object.partitionIndex));
_node.set("errorCode", new ShortNode(_object.errorCode));
return _node;
}
public static JsonNode write(StopReplicaPartitionError _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SyncGroupRequestData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.Bytes;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class SyncGroupRequestData implements ApiMessage {
String groupId;
int generationId;
String memberId;
String groupInstanceId;
String protocolType;
String protocolName;
List<SyncGroupRequestAssignment> assignments;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("group_id", Type.STRING, "The unique group identifier."),
new Field("generation_id", Type.INT32, "The generation of the group."),
new Field("member_id", Type.STRING, "The member ID assigned by the group."),
new Field("assignments", new ArrayOf(SyncGroupRequestAssignment.SCHEMA_0), "Each assignment.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 = SCHEMA_1;
public static final Schema SCHEMA_3 =
new Schema(
new Field("group_id", Type.STRING, "The unique group identifier."),
new Field("generation_id", Type.INT32, "The generation of the group."),
new Field("member_id", Type.STRING, "The member ID assigned by the group."),
new Field("group_instance_id", Type.NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."),
new Field("assignments", new ArrayOf(SyncGroupRequestAssignment.SCHEMA_0), "Each assignment.")
);
public static final Schema SCHEMA_4 =
new Schema(
new Field("group_id", Type.COMPACT_STRING, "The unique group identifier."),
new Field("generation_id", Type.INT32, "The generation of the group."),
new Field("member_id", Type.COMPACT_STRING, "The member ID assigned by the group."),
new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."),
new Field("assignments", new CompactArrayOf(SyncGroupRequestAssignment.SCHEMA_4), "Each assignment."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_5 =
new Schema(
new Field("group_id", Type.COMPACT_STRING, "The unique group identifier."),
new Field("generation_id", Type.INT32, "The generation of the group."),
new Field("member_id", Type.COMPACT_STRING, "The member ID assigned by the group."),
new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."),
new Field("protocol_type", Type.COMPACT_NULLABLE_STRING, "The group protocol type."),
new Field("protocol_name", Type.COMPACT_NULLABLE_STRING, "The group protocol name."),
new Field("assignments", new CompactArrayOf(SyncGroupRequestAssignment.SCHEMA_4), "Each assignment."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4,
SCHEMA_5
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 5;
public SyncGroupRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public SyncGroupRequestData() {
this.groupId = "";
this.generationId = 0;
this.memberId = "";
this.groupInstanceId = null;
this.protocolType = null;
this.protocolName = null;
this.assignments = new ArrayList<SyncGroupRequestAssignment>(0);
}
@Override
public short apiKey() {
return 14;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 5;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
if (_version >= 4) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field groupId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field groupId had invalid length " + length);
} else {
this.groupId = _readable.readString(length);
}
}
this.generationId = _readable.readInt();
{
int length;
if (_version >= 4) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field memberId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field memberId had invalid length " + length);
} else {
this.memberId = _readable.readString(length);
}
}
if (_version >= 3) {
int length;
if (_version >= 4) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
this.groupInstanceId = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field groupInstanceId had invalid length " + length);
} else {
this.groupInstanceId = _readable.readString(length);
}
} else {
this.groupInstanceId = null;
}
if (_version >= 5) {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.protocolType = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field protocolType had invalid length " + length);
} else {
this.protocolType = _readable.readString(length);
}
} else {
this.protocolType = null;
}
if (_version >= 5) {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.protocolName = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field protocolName had invalid length " + length);
} else {
this.protocolName = _readable.readString(length);
}
} else {
this.protocolName = null;
}
{
if (_version >= 4) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field assignments was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<SyncGroupRequestAssignment> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new SyncGroupRequestAssignment(_readable, _version));
}
this.assignments = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field assignments was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<SyncGroupRequestAssignment> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new SyncGroupRequestAssignment(_readable, _version));
}
this.assignments = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 4) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(groupId);
if (_version >= 4) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
_writable.writeInt(generationId);
{
byte[] _stringBytes = _cache.getSerializedValue(memberId);
if (_version >= 4) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 3) {
if (groupInstanceId == null) {
if (_version >= 4) {
_writable.writeUnsignedVarint(0);
} else {
_writable.writeShort((short) -1);
}
} else {
byte[] _stringBytes = _cache.getSerializedValue(groupInstanceId);
if (_version >= 4) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
} else {
if (this.groupInstanceId != null) {
throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version);
}
}
if (_version >= 5) {
if (protocolType == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(protocolType);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
}
if (_version >= 5) {
if (protocolName == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(protocolName);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
}
if (_version >= 4) {
_writable.writeUnsignedVarint(assignments.size() + 1);
for (SyncGroupRequestAssignment assignmentsElement : assignments) {
assignmentsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(assignments.size());
for (SyncGroupRequestAssignment assignmentsElement : assignments) {
assignmentsElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 4) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'groupId' field is too long to be serialized");
}
_cache.cacheSerializedValue(groupId, _stringBytes);
if (_version >= 4) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
_size.addBytes(4);
{
byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'memberId' field is too long to be serialized");
}
_cache.cacheSerializedValue(memberId, _stringBytes);
if (_version >= 4) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
if (_version >= 3) {
if (groupInstanceId == null) {
if (_version >= 4) {
_size.addBytes(1);
} else {
_size.addBytes(2);
}
} else {
byte[] _stringBytes = groupInstanceId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'groupInstanceId' field is too long to be serialized");
}
_cache.cacheSerializedValue(groupInstanceId, _stringBytes);
if (_version >= 4) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
}
if (_version >= 5) {
if (protocolType == null) {
_size.addBytes(1);
} else {
byte[] _stringBytes = protocolType.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'protocolType' field is too long to be serialized");
}
_cache.cacheSerializedValue(protocolType, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
}
if (_version >= 5) {
if (protocolName == null) {
_size.addBytes(1);
} else {
byte[] _stringBytes = protocolName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'protocolName' field is too long to be serialized");
}
_cache.cacheSerializedValue(protocolName, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
}
{
if (_version >= 4) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(assignments.size() + 1));
} else {
_size.addBytes(4);
}
for (SyncGroupRequestAssignment assignmentsElement : assignments) {
assignmentsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 4) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SyncGroupRequestData)) return false;
SyncGroupRequestData other = (SyncGroupRequestData) obj;
if (this.groupId == null) {
if (other.groupId != null) return false;
} else {
if (!this.groupId.equals(other.groupId)) return false;
}
if (generationId != other.generationId) return false;
if (this.memberId == null) {
if (other.memberId != null) return false;
} else {
if (!this.memberId.equals(other.memberId)) return false;
}
if (this.groupInstanceId == null) {
if (other.groupInstanceId != null) return false;
} else {
if (!this.groupInstanceId.equals(other.groupInstanceId)) return false;
}
if (this.protocolType == null) {
if (other.protocolType != null) return false;
} else {
if (!this.protocolType.equals(other.protocolType)) return false;
}
if (this.protocolName == null) {
if (other.protocolName != null) return false;
} else {
if (!this.protocolName.equals(other.protocolName)) return false;
}
if (this.assignments == null) {
if (other.assignments != null) return false;
} else {
if (!this.assignments.equals(other.assignments)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode());
hashCode = 31 * hashCode + generationId;
hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode());
hashCode = 31 * hashCode + (groupInstanceId == null ? 0 : groupInstanceId.hashCode());
hashCode = 31 * hashCode + (protocolType == null ? 0 : protocolType.hashCode());
hashCode = 31 * hashCode + (protocolName == null ? 0 : protocolName.hashCode());
hashCode = 31 * hashCode + (assignments == null ? 0 : assignments.hashCode());
return hashCode;
}
@Override
public SyncGroupRequestData duplicate() {
SyncGroupRequestData _duplicate = new SyncGroupRequestData();
_duplicate.groupId = groupId;
_duplicate.generationId = generationId;
_duplicate.memberId = memberId;
if (groupInstanceId == null) {
_duplicate.groupInstanceId = null;
} else {
_duplicate.groupInstanceId = groupInstanceId;
}
if (protocolType == null) {
_duplicate.protocolType = null;
} else {
_duplicate.protocolType = protocolType;
}
if (protocolName == null) {
_duplicate.protocolName = null;
} else {
_duplicate.protocolName = protocolName;
}
ArrayList<SyncGroupRequestAssignment> newAssignments = new ArrayList<SyncGroupRequestAssignment>(assignments.size());
for (SyncGroupRequestAssignment _element : assignments) {
newAssignments.add(_element.duplicate());
}
_duplicate.assignments = newAssignments;
return _duplicate;
}
@Override
public String toString() {
return "SyncGroupRequestData("
+ "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'")
+ ", generationId=" + generationId
+ ", memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'")
+ ", groupInstanceId=" + ((groupInstanceId == null) ? "null" : "'" + groupInstanceId.toString() + "'")
+ ", protocolType=" + ((protocolType == null) ? "null" : "'" + protocolType.toString() + "'")
+ ", protocolName=" + ((protocolName == null) ? "null" : "'" + protocolName.toString() + "'")
+ ", assignments=" + MessageUtil.deepToString(assignments.iterator())
+ ")";
}
public String groupId() {
return this.groupId;
}
public int generationId() {
return this.generationId;
}
public String memberId() {
return this.memberId;
}
public String groupInstanceId() {
return this.groupInstanceId;
}
public String protocolType() {
return this.protocolType;
}
public String protocolName() {
return this.protocolName;
}
public List<SyncGroupRequestAssignment> assignments() {
return this.assignments;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public SyncGroupRequestData setGroupId(String v) {
this.groupId = v;
return this;
}
public SyncGroupRequestData setGenerationId(int v) {
this.generationId = v;
return this;
}
public SyncGroupRequestData setMemberId(String v) {
this.memberId = v;
return this;
}
public SyncGroupRequestData setGroupInstanceId(String v) {
this.groupInstanceId = v;
return this;
}
public SyncGroupRequestData setProtocolType(String v) {
this.protocolType = v;
return this;
}
public SyncGroupRequestData setProtocolName(String v) {
this.protocolName = v;
return this;
}
public SyncGroupRequestData setAssignments(List<SyncGroupRequestAssignment> v) {
this.assignments = v;
return this;
}
public static class SyncGroupRequestAssignment implements Message {
String memberId;
byte[] assignment;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("member_id", Type.STRING, "The ID of the member to assign."),
new Field("assignment", Type.BYTES, "The member assignment.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 = SCHEMA_1;
public static final Schema SCHEMA_3 = SCHEMA_2;
public static final Schema SCHEMA_4 =
new Schema(
new Field("member_id", Type.COMPACT_STRING, "The ID of the member to assign."),
new Field("assignment", Type.COMPACT_BYTES, "The member assignment."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_5 = SCHEMA_4;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4,
SCHEMA_5
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 5;
public SyncGroupRequestAssignment(Readable _readable, short _version) {
read(_readable, _version);
}
public SyncGroupRequestAssignment() {
this.memberId = "";
this.assignment = Bytes.EMPTY;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 5;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 5) {
throw new UnsupportedVersionException("Can't read version " + _version + " of SyncGroupRequestAssignment");
}
{
int length;
if (_version >= 4) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field memberId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field memberId had invalid length " + length);
} else {
this.memberId = _readable.readString(length);
}
}
{
int length;
if (_version >= 4) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readInt();
}
if (length < 0) {
throw new RuntimeException("non-nullable field assignment was serialized as null");
} else {
byte[] newBytes = _readable.readArray(length);
this.assignment = newBytes;
}
}
this._unknownTaggedFields = null;
if (_version >= 4) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(memberId);
if (_version >= 4) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 4) {
_writable.writeUnsignedVarint(assignment.length + 1);
} else {
_writable.writeInt(assignment.length);
}
_writable.writeByteArray(assignment);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 4) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 5) {
throw new UnsupportedVersionException("Can't size version " + _version + " of SyncGroupRequestAssignment");
}
{
byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'memberId' field is too long to be serialized");
}
_cache.cacheSerializedValue(memberId, _stringBytes);
if (_version >= 4) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
{
_size.addBytes(assignment.length);
if (_version >= 4) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(assignment.length + 1));
} else {
_size.addBytes(4);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 4) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SyncGroupRequestAssignment)) return false;
SyncGroupRequestAssignment other = (SyncGroupRequestAssignment) obj;
if (this.memberId == null) {
if (other.memberId != null) return false;
} else {
if (!this.memberId.equals(other.memberId)) return false;
}
if (!Arrays.equals(this.assignment, other.assignment)) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode());
hashCode = 31 * hashCode + Arrays.hashCode(assignment);
return hashCode;
}
@Override
public SyncGroupRequestAssignment duplicate() {
SyncGroupRequestAssignment _duplicate = new SyncGroupRequestAssignment();
_duplicate.memberId = memberId;
_duplicate.assignment = MessageUtil.duplicate(assignment);
return _duplicate;
}
@Override
public String toString() {
return "SyncGroupRequestAssignment("
+ "memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'")
+ ", assignment=" + Arrays.toString(assignment)
+ ")";
}
public String memberId() {
return this.memberId;
}
public byte[] assignment() {
return this.assignment;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public SyncGroupRequestAssignment setMemberId(String v) {
this.memberId = v;
return this;
}
public SyncGroupRequestAssignment setAssignment(byte[] v) {
this.assignment = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SyncGroupRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.BinaryNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.NullNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import java.util.Arrays;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.SyncGroupRequestData.*;
public class SyncGroupRequestDataJsonConverter {
public static SyncGroupRequestData read(JsonNode _node, short _version) {
SyncGroupRequestData _object = new SyncGroupRequestData();
JsonNode _groupIdNode = _node.get("groupId");
if (_groupIdNode == null) {
throw new RuntimeException("SyncGroupRequestData: unable to locate field 'groupId', which is mandatory in version " + _version);
} else {
if (!_groupIdNode.isTextual()) {
throw new RuntimeException("SyncGroupRequestData expected a string type, but got " + _node.getNodeType());
}
_object.groupId = _groupIdNode.asText();
}
JsonNode _generationIdNode = _node.get("generationId");
if (_generationIdNode == null) {
throw new RuntimeException("SyncGroupRequestData: unable to locate field 'generationId', which is mandatory in version " + _version);
} else {
_object.generationId = MessageUtil.jsonNodeToInt(_generationIdNode, "SyncGroupRequestData");
}
JsonNode _memberIdNode = _node.get("memberId");
if (_memberIdNode == null) {
throw new RuntimeException("SyncGroupRequestData: unable to locate field 'memberId', which is mandatory in version " + _version);
} else {
if (!_memberIdNode.isTextual()) {
throw new RuntimeException("SyncGroupRequestData expected a string type, but got " + _node.getNodeType());
}
_object.memberId = _memberIdNode.asText();
}
JsonNode _groupInstanceIdNode = _node.get("groupInstanceId");
if (_groupInstanceIdNode == null) {
if (_version >= 3) {
throw new RuntimeException("SyncGroupRequestData: unable to locate field 'groupInstanceId', which is mandatory in version " + _version);
} else {
_object.groupInstanceId = null;
}
} else {
if (_groupInstanceIdNode.isNull()) {
_object.groupInstanceId = null;
} else {
if (!_groupInstanceIdNode.isTextual()) {
throw new RuntimeException("SyncGroupRequestData expected a string type, but got " + _node.getNodeType());
}
_object.groupInstanceId = _groupInstanceIdNode.asText();
}
}
JsonNode _protocolTypeNode = _node.get("protocolType");
if (_protocolTypeNode == null) {
if (_version >= 5) {
throw new RuntimeException("SyncGroupRequestData: unable to locate field 'protocolType', which is mandatory in version " + _version);
} else {
_object.protocolType = null;
}
} else {
if (_protocolTypeNode.isNull()) {
_object.protocolType = null;
} else {
if (!_protocolTypeNode.isTextual()) {
throw new RuntimeException("SyncGroupRequestData expected a string type, but got " + _node.getNodeType());
}
_object.protocolType = _protocolTypeNode.asText();
}
}
JsonNode _protocolNameNode = _node.get("protocolName");
if (_protocolNameNode == null) {
if (_version >= 5) {
throw new RuntimeException("SyncGroupRequestData: unable to locate field 'protocolName', which is mandatory in version " + _version);
} else {
_object.protocolName = null;
}
} else {
if (_protocolNameNode.isNull()) {
_object.protocolName = null;
} else {
if (!_protocolNameNode.isTextual()) {
throw new RuntimeException("SyncGroupRequestData expected a string type, but got " + _node.getNodeType());
}
_object.protocolName = _protocolNameNode.asText();
}
}
JsonNode _assignmentsNode = _node.get("assignments");
if (_assignmentsNode == null) {
throw new RuntimeException("SyncGroupRequestData: unable to locate field 'assignments', which is mandatory in version " + _version);
} else {
if (!_assignmentsNode.isArray()) {
throw new RuntimeException("SyncGroupRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<SyncGroupRequestAssignment> _collection = new ArrayList<SyncGroupRequestAssignment>(_assignmentsNode.size());
_object.assignments = _collection;
for (JsonNode _element : _assignmentsNode) {
_collection.add(SyncGroupRequestAssignmentJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(SyncGroupRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("groupId", new TextNode(_object.groupId));
_node.set("generationId", new IntNode(_object.generationId));
_node.set("memberId", new TextNode(_object.memberId));
if (_version >= 3) {
if (_object.groupInstanceId == null) {
_node.set("groupInstanceId", NullNode.instance);
} else {
_node.set("groupInstanceId", new TextNode(_object.groupInstanceId));
}
} else {
if (_object.groupInstanceId != null) {
throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version);
}
}
if (_version >= 5) {
if (_object.protocolType == null) {
_node.set("protocolType", NullNode.instance);
} else {
_node.set("protocolType", new TextNode(_object.protocolType));
}
}
if (_version >= 5) {
if (_object.protocolName == null) {
_node.set("protocolName", NullNode.instance);
} else {
_node.set("protocolName", new TextNode(_object.protocolName));
}
}
ArrayNode _assignmentsArray = new ArrayNode(JsonNodeFactory.instance);
for (SyncGroupRequestAssignment _element : _object.assignments) {
_assignmentsArray.add(SyncGroupRequestAssignmentJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("assignments", _assignmentsArray);
return _node;
}
public static JsonNode write(SyncGroupRequestData _object, short _version) {
return write(_object, _version, true);
}
public static class SyncGroupRequestAssignmentJsonConverter {
public static SyncGroupRequestAssignment read(JsonNode _node, short _version) {
SyncGroupRequestAssignment _object = new SyncGroupRequestAssignment();
JsonNode _memberIdNode = _node.get("memberId");
if (_memberIdNode == null) {
throw new RuntimeException("SyncGroupRequestAssignment: unable to locate field 'memberId', which is mandatory in version " + _version);
} else {
if (!_memberIdNode.isTextual()) {
throw new RuntimeException("SyncGroupRequestAssignment expected a string type, but got " + _node.getNodeType());
}
_object.memberId = _memberIdNode.asText();
}
JsonNode _assignmentNode = _node.get("assignment");
if (_assignmentNode == null) {
throw new RuntimeException("SyncGroupRequestAssignment: unable to locate field 'assignment', which is mandatory in version " + _version);
} else {
_object.assignment = MessageUtil.jsonNodeToBinary(_assignmentNode, "SyncGroupRequestAssignment");
}
return _object;
}
public static JsonNode write(SyncGroupRequestAssignment _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("memberId", new TextNode(_object.memberId));
_node.set("assignment", new BinaryNode(Arrays.copyOf(_object.assignment, _object.assignment.length)));
return _node;
}
public static JsonNode write(SyncGroupRequestAssignment _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SyncGroupResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.Bytes;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class SyncGroupResponseData implements ApiMessage {
int throttleTimeMs;
short errorCode;
String protocolType;
String protocolName;
byte[] assignment;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
new Field("assignment", Type.BYTES, "The member assignment.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
new Field("assignment", Type.BYTES, "The member assignment.")
);
public static final Schema SCHEMA_2 = SCHEMA_1;
public static final Schema SCHEMA_3 = SCHEMA_2;
public static final Schema SCHEMA_4 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
new Field("assignment", Type.COMPACT_BYTES, "The member assignment."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_5 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
new Field("protocol_type", Type.COMPACT_NULLABLE_STRING, "The group protocol type."),
new Field("protocol_name", Type.COMPACT_NULLABLE_STRING, "The group protocol name."),
new Field("assignment", Type.COMPACT_BYTES, "The member assignment."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4,
SCHEMA_5
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 5;
public SyncGroupResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public SyncGroupResponseData() {
this.throttleTimeMs = 0;
this.errorCode = (short) 0;
this.protocolType = null;
this.protocolName = null;
this.assignment = Bytes.EMPTY;
}
@Override
public short apiKey() {
return 14;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 5;
}
@Override
public void read(Readable _readable, short _version) {
if (_version >= 1) {
this.throttleTimeMs = _readable.readInt();
} else {
this.throttleTimeMs = 0;
}
this.errorCode = _readable.readShort();
if (_version >= 5) {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.protocolType = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field protocolType had invalid length " + length);
} else {
this.protocolType = _readable.readString(length);
}
} else {
this.protocolType = null;
}
if (_version >= 5) {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.protocolName = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field protocolName had invalid length " + length);
} else {
this.protocolName = _readable.readString(length);
}
} else {
this.protocolName = null;
}
{
int length;
if (_version >= 4) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readInt();
}
if (length < 0) {
throw new RuntimeException("non-nullable field assignment was serialized as null");
} else {
byte[] newBytes = _readable.readArray(length);
this.assignment = newBytes;
}
}
this._unknownTaggedFields = null;
if (_version >= 4) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version >= 1) {
_writable.writeInt(throttleTimeMs);
}
_writable.writeShort(errorCode);
if (_version >= 5) {
if (protocolType == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(protocolType);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
}
if (_version >= 5) {
if (protocolName == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(protocolName);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
}
if (_version >= 4) {
_writable.writeUnsignedVarint(assignment.length + 1);
} else {
_writable.writeInt(assignment.length);
}
_writable.writeByteArray(assignment);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 4) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version >= 1) {
_size.addBytes(4);
}
_size.addBytes(2);
if (_version >= 5) {
if (protocolType == null) {
_size.addBytes(1);
} else {
byte[] _stringBytes = protocolType.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'protocolType' field is too long to be serialized");
}
_cache.cacheSerializedValue(protocolType, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
}
if (_version >= 5) {
if (protocolName == null) {
_size.addBytes(1);
} else {
byte[] _stringBytes = protocolName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'protocolName' field is too long to be serialized");
}
_cache.cacheSerializedValue(protocolName, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
}
{
_size.addBytes(assignment.length);
if (_version >= 4) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(assignment.length + 1));
} else {
_size.addBytes(4);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 4) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof SyncGroupResponseData)) return false;
SyncGroupResponseData other = (SyncGroupResponseData) obj;
if (throttleTimeMs != other.throttleTimeMs) return false;
if (errorCode != other.errorCode) return false;
if (this.protocolType == null) {
if (other.protocolType != null) return false;
} else {
if (!this.protocolType.equals(other.protocolType)) return false;
}
if (this.protocolName == null) {
if (other.protocolName != null) return false;
} else {
if (!this.protocolName.equals(other.protocolName)) return false;
}
if (!Arrays.equals(this.assignment, other.assignment)) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + throttleTimeMs;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (protocolType == null ? 0 : protocolType.hashCode());
hashCode = 31 * hashCode + (protocolName == null ? 0 : protocolName.hashCode());
hashCode = 31 * hashCode + Arrays.hashCode(assignment);
return hashCode;
}
@Override
public SyncGroupResponseData duplicate() {
SyncGroupResponseData _duplicate = new SyncGroupResponseData();
_duplicate.throttleTimeMs = throttleTimeMs;
_duplicate.errorCode = errorCode;
if (protocolType == null) {
_duplicate.protocolType = null;
} else {
_duplicate.protocolType = protocolType;
}
if (protocolName == null) {
_duplicate.protocolName = null;
} else {
_duplicate.protocolName = protocolName;
}
_duplicate.assignment = MessageUtil.duplicate(assignment);
return _duplicate;
}
@Override
public String toString() {
return "SyncGroupResponseData("
+ "throttleTimeMs=" + throttleTimeMs
+ ", errorCode=" + errorCode
+ ", protocolType=" + ((protocolType == null) ? "null" : "'" + protocolType.toString() + "'")
+ ", protocolName=" + ((protocolName == null) ? "null" : "'" + protocolName.toString() + "'")
+ ", assignment=" + Arrays.toString(assignment)
+ ")";
}
public int throttleTimeMs() {
return this.throttleTimeMs;
}
public short errorCode() {
return this.errorCode;
}
public String protocolType() {
return this.protocolType;
}
public String protocolName() {
return this.protocolName;
}
public byte[] assignment() {
return this.assignment;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public SyncGroupResponseData setThrottleTimeMs(int v) {
this.throttleTimeMs = v;
return this;
}
public SyncGroupResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
public SyncGroupResponseData setProtocolType(String v) {
this.protocolType = v;
return this;
}
public SyncGroupResponseData setProtocolName(String v) {
this.protocolName = v;
return this;
}
public SyncGroupResponseData setAssignment(byte[] v) {
this.assignment = v;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SyncGroupResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.BinaryNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.NullNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.Arrays;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.SyncGroupResponseData.*;
public class SyncGroupResponseDataJsonConverter {
public static SyncGroupResponseData read(JsonNode _node, short _version) {
SyncGroupResponseData _object = new SyncGroupResponseData();
JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs");
if (_throttleTimeMsNode == null) {
if (_version >= 1) {
throw new RuntimeException("SyncGroupResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version);
} else {
_object.throttleTimeMs = 0;
}
} else {
_object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "SyncGroupResponseData");
}
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("SyncGroupResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "SyncGroupResponseData");
}
JsonNode _protocolTypeNode = _node.get("protocolType");
if (_protocolTypeNode == null) {
if (_version >= 5) {
throw new RuntimeException("SyncGroupResponseData: unable to locate field 'protocolType', which is mandatory in version " + _version);
} else {
_object.protocolType = null;
}
} else {
if (_protocolTypeNode.isNull()) {
_object.protocolType = null;
} else {
if (!_protocolTypeNode.isTextual()) {
throw new RuntimeException("SyncGroupResponseData expected a string type, but got " + _node.getNodeType());
}
_object.protocolType = _protocolTypeNode.asText();
}
}
JsonNode _protocolNameNode = _node.get("protocolName");
if (_protocolNameNode == null) {
if (_version >= 5) {
throw new RuntimeException("SyncGroupResponseData: unable to locate field 'protocolName', which is mandatory in version " + _version);
} else {
_object.protocolName = null;
}
} else {
if (_protocolNameNode.isNull()) {
_object.protocolName = null;
} else {
if (!_protocolNameNode.isTextual()) {
throw new RuntimeException("SyncGroupResponseData expected a string type, but got " + _node.getNodeType());
}
_object.protocolName = _protocolNameNode.asText();
}
}
JsonNode _assignmentNode = _node.get("assignment");
if (_assignmentNode == null) {
throw new RuntimeException("SyncGroupResponseData: unable to locate field 'assignment', which is mandatory in version " + _version);
} else {
_object.assignment = MessageUtil.jsonNodeToBinary(_assignmentNode, "SyncGroupResponseData");
}
return _object;
}
public static JsonNode write(SyncGroupResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
if (_version >= 1) {
_node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs));
}
_node.set("errorCode", new ShortNode(_object.errorCode));
if (_version >= 5) {
if (_object.protocolType == null) {
_node.set("protocolType", NullNode.instance);
} else {
_node.set("protocolType", new TextNode(_object.protocolType));
}
}
if (_version >= 5) {
if (_object.protocolName == null) {
_node.set("protocolName", NullNode.instance);
} else {
_node.set("protocolName", new TextNode(_object.protocolName));
}
}
_node.set("assignment", new BinaryNode(Arrays.copyOf(_object.assignment, _object.assignment.length)));
return _node;
}
public static JsonNode write(SyncGroupResponseData _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/TxnOffsetCommitRequestData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class TxnOffsetCommitRequestData implements ApiMessage {
String transactionalId;
String groupId;
long producerId;
short producerEpoch;
int generationId;
String memberId;
String groupInstanceId;
List<TxnOffsetCommitRequestTopic> topics;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("transactional_id", Type.STRING, "The ID of the transaction."),
new Field("group_id", Type.STRING, "The ID of the group."),
new Field("producer_id", Type.INT64, "The current producer ID in use by the transactional ID."),
new Field("producer_epoch", Type.INT16, "The current epoch associated with the producer ID."),
new Field("topics", new ArrayOf(TxnOffsetCommitRequestTopic.SCHEMA_0), "Each topic that we want to commit offsets for.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 =
new Schema(
new Field("transactional_id", Type.STRING, "The ID of the transaction."),
new Field("group_id", Type.STRING, "The ID of the group."),
new Field("producer_id", Type.INT64, "The current producer ID in use by the transactional ID."),
new Field("producer_epoch", Type.INT16, "The current epoch associated with the producer ID."),
new Field("topics", new ArrayOf(TxnOffsetCommitRequestTopic.SCHEMA_2), "Each topic that we want to commit offsets for.")
);
public static final Schema SCHEMA_3 =
new Schema(
new Field("transactional_id", Type.COMPACT_STRING, "The ID of the transaction."),
new Field("group_id", Type.COMPACT_STRING, "The ID of the group."),
new Field("producer_id", Type.INT64, "The current producer ID in use by the transactional ID."),
new Field("producer_epoch", Type.INT16, "The current epoch associated with the producer ID."),
new Field("generation_id", Type.INT32, "The generation of the consumer."),
new Field("member_id", Type.COMPACT_STRING, "The member ID assigned by the group coordinator."),
new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."),
new Field("topics", new CompactArrayOf(TxnOffsetCommitRequestTopic.SCHEMA_3), "Each topic that we want to commit offsets for."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 3;
public TxnOffsetCommitRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public TxnOffsetCommitRequestData() {
this.transactionalId = "";
this.groupId = "";
this.producerId = 0L;
this.producerEpoch = (short) 0;
this.generationId = -1;
this.memberId = "";
this.groupInstanceId = null;
this.topics = new ArrayList<TxnOffsetCommitRequestTopic>(0);
}
@Override
public short apiKey() {
return 28;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 3;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
if (_version >= 3) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field transactionalId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field transactionalId had invalid length " + length);
} else {
this.transactionalId = _readable.readString(length);
}
}
{
int length;
if (_version >= 3) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field groupId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field groupId had invalid length " + length);
} else {
this.groupId = _readable.readString(length);
}
}
this.producerId = _readable.readLong();
this.producerEpoch = _readable.readShort();
if (_version >= 3) {
this.generationId = _readable.readInt();
} else {
this.generationId = -1;
}
if (_version >= 3) {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field memberId was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field memberId had invalid length " + length);
} else {
this.memberId = _readable.readString(length);
}
} else {
this.memberId = "";
}
if (_version >= 3) {
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.groupInstanceId = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field groupInstanceId had invalid length " + length);
} else {
this.groupInstanceId = _readable.readString(length);
}
} else {
this.groupInstanceId = null;
}
{
if (_version >= 3) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TxnOffsetCommitRequestTopic> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TxnOffsetCommitRequestTopic(_readable, _version));
}
this.topics = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TxnOffsetCommitRequestTopic> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TxnOffsetCommitRequestTopic(_readable, _version));
}
this.topics = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 3) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(transactionalId);
if (_version >= 3) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
{
byte[] _stringBytes = _cache.getSerializedValue(groupId);
if (_version >= 3) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
_writable.writeLong(producerId);
_writable.writeShort(producerEpoch);
if (_version >= 3) {
_writable.writeInt(generationId);
} else {
if (this.generationId != -1) {
throw new UnsupportedVersionException("Attempted to write a non-default generationId at version " + _version);
}
}
if (_version >= 3) {
{
byte[] _stringBytes = _cache.getSerializedValue(memberId);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
} else {
if (!this.memberId.equals("")) {
throw new UnsupportedVersionException("Attempted to write a non-default memberId at version " + _version);
}
}
if (_version >= 3) {
if (groupInstanceId == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(groupInstanceId);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
} else {
if (this.groupInstanceId != null) {
throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version);
}
}
if (_version >= 3) {
_writable.writeUnsignedVarint(topics.size() + 1);
for (TxnOffsetCommitRequestTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(topics.size());
for (TxnOffsetCommitRequestTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 3) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'transactionalId' field is too long to be serialized");
}
_cache.cacheSerializedValue(transactionalId, _stringBytes);
if (_version >= 3) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
{
byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'groupId' field is too long to be serialized");
}
_cache.cacheSerializedValue(groupId, _stringBytes);
if (_version >= 3) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
_size.addBytes(8);
_size.addBytes(2);
if (_version >= 3) {
_size.addBytes(4);
}
if (_version >= 3) {
{
byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'memberId' field is too long to be serialized");
}
_cache.cacheSerializedValue(memberId, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
}
if (_version >= 3) {
if (groupInstanceId == null) {
_size.addBytes(1);
} else {
byte[] _stringBytes = groupInstanceId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'groupInstanceId' field is too long to be serialized");
}
_cache.cacheSerializedValue(groupInstanceId, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
}
{
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1));
} else {
_size.addBytes(4);
}
for (TxnOffsetCommitRequestTopic topicsElement : topics) {
topicsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof TxnOffsetCommitRequestData)) return false;
TxnOffsetCommitRequestData other = (TxnOffsetCommitRequestData) obj;
if (this.transactionalId == null) {
if (other.transactionalId != null) return false;
} else {
if (!this.transactionalId.equals(other.transactionalId)) return false;
}
if (this.groupId == null) {
if (other.groupId != null) return false;
} else {
if (!this.groupId.equals(other.groupId)) return false;
}
if (producerId != other.producerId) return false;
if (producerEpoch != other.producerEpoch) return false;
if (generationId != other.generationId) return false;
if (this.memberId == null) {
if (other.memberId != null) return false;
} else {
if (!this.memberId.equals(other.memberId)) return false;
}
if (this.groupInstanceId == null) {
if (other.groupInstanceId != null) return false;
} else {
if (!this.groupInstanceId.equals(other.groupInstanceId)) return false;
}
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode());
hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode());
hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId);
hashCode = 31 * hashCode + producerEpoch;
hashCode = 31 * hashCode + generationId;
hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode());
hashCode = 31 * hashCode + (groupInstanceId == null ? 0 : groupInstanceId.hashCode());
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
return hashCode;
}
@Override
public TxnOffsetCommitRequestData duplicate() {
TxnOffsetCommitRequestData _duplicate = new TxnOffsetCommitRequestData();
_duplicate.transactionalId = transactionalId;
_duplicate.groupId = groupId;
_duplicate.producerId = producerId;
_duplicate.producerEpoch = producerEpoch;
_duplicate.generationId = generationId;
_duplicate.memberId = memberId;
if (groupInstanceId == null) {
_duplicate.groupInstanceId = null;
} else {
_duplicate.groupInstanceId = groupInstanceId;
}
ArrayList<TxnOffsetCommitRequestTopic> newTopics = new ArrayList<TxnOffsetCommitRequestTopic>(topics.size());
for (TxnOffsetCommitRequestTopic _element : topics) {
newTopics.add(_element.duplicate());
}
_duplicate.topics = newTopics;
return _duplicate;
}
@Override
public String toString() {
return "TxnOffsetCommitRequestData("
+ "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'")
+ ", groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'")
+ ", producerId=" + producerId
+ ", producerEpoch=" + producerEpoch
+ ", generationId=" + generationId
+ ", memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'")
+ ", groupInstanceId=" + ((groupInstanceId == null) ? "null" : "'" + groupInstanceId.toString() + "'")
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public String transactionalId() {
return this.transactionalId;
}
public String groupId() {
return this.groupId;
}
public long producerId() {
return this.producerId;
}
public short producerEpoch() {
return this.producerEpoch;
}
public int generationId() {
return this.generationId;
}
public String memberId() {
return this.memberId;
}
public String groupInstanceId() {
return this.groupInstanceId;
}
public List<TxnOffsetCommitRequestTopic> topics() {
return this.topics;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public TxnOffsetCommitRequestData setTransactionalId(String v) {
this.transactionalId = v;
return this;
}
public TxnOffsetCommitRequestData setGroupId(String v) {
this.groupId = v;
return this;
}
public TxnOffsetCommitRequestData setProducerId(long v) {
this.producerId = v;
return this;
}
public TxnOffsetCommitRequestData setProducerEpoch(short v) {
this.producerEpoch = v;
return this;
}
public TxnOffsetCommitRequestData setGenerationId(int v) {
this.generationId = v;
return this;
}
public TxnOffsetCommitRequestData setMemberId(String v) {
this.memberId = v;
return this;
}
public TxnOffsetCommitRequestData setGroupInstanceId(String v) {
this.groupInstanceId = v;
return this;
}
public TxnOffsetCommitRequestData setTopics(List<TxnOffsetCommitRequestTopic> v) {
this.topics = v;
return this;
}
public static class TxnOffsetCommitRequestTopic implements Message {
String name;
List<TxnOffsetCommitRequestPartition> partitions;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("partitions", new ArrayOf(TxnOffsetCommitRequestPartition.SCHEMA_0), "The partitions inside the topic that we want to committ offsets for.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("partitions", new ArrayOf(TxnOffsetCommitRequestPartition.SCHEMA_2), "The partitions inside the topic that we want to committ offsets for.")
);
public static final Schema SCHEMA_3 =
new Schema(
new Field("name", Type.COMPACT_STRING, "The topic name."),
new Field("partitions", new CompactArrayOf(TxnOffsetCommitRequestPartition.SCHEMA_3), "The partitions inside the topic that we want to committ offsets for."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 3;
public TxnOffsetCommitRequestTopic(Readable _readable, short _version) {
read(_readable, _version);
}
public TxnOffsetCommitRequestTopic() {
this.name = "";
this.partitions = new ArrayList<TxnOffsetCommitRequestPartition>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 3;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 3) {
throw new UnsupportedVersionException("Can't read version " + _version + " of TxnOffsetCommitRequestTopic");
}
{
int length;
if (_version >= 3) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
if (_version >= 3) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TxnOffsetCommitRequestPartition> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TxnOffsetCommitRequestPartition(_readable, _version));
}
this.partitions = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TxnOffsetCommitRequestPartition> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TxnOffsetCommitRequestPartition(_readable, _version));
}
this.partitions = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 3) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
if (_version >= 3) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 3) {
_writable.writeUnsignedVarint(partitions.size() + 1);
for (TxnOffsetCommitRequestPartition partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(partitions.size());
for (TxnOffsetCommitRequestPartition partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 3) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 3) {
throw new UnsupportedVersionException("Can't size version " + _version + " of TxnOffsetCommitRequestTopic");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
if (_version >= 3) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
{
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1));
} else {
_size.addBytes(4);
}
for (TxnOffsetCommitRequestPartition partitionsElement : partitions) {
partitionsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof TxnOffsetCommitRequestTopic)) return false;
TxnOffsetCommitRequestTopic other = (TxnOffsetCommitRequestTopic) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
if (this.partitions == null) {
if (other.partitions != null) return false;
} else {
if (!this.partitions.equals(other.partitions)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
return hashCode;
}
@Override
public TxnOffsetCommitRequestTopic duplicate() {
TxnOffsetCommitRequestTopic _duplicate = new TxnOffsetCommitRequestTopic();
_duplicate.name = name;
ArrayList<TxnOffsetCommitRequestPartition> newPartitions = new ArrayList<TxnOffsetCommitRequestPartition>(partitions.size());
for (TxnOffsetCommitRequestPartition _element : partitions) {
newPartitions.add(_element.duplicate());
}
_duplicate.partitions = newPartitions;
return _duplicate;
}
@Override
public String toString() {
return "TxnOffsetCommitRequestTopic("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List<TxnOffsetCommitRequestPartition> partitions() {
return this.partitions;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public TxnOffsetCommitRequestTopic setName(String v) {
this.name = v;
return this;
}
public TxnOffsetCommitRequestTopic setPartitions(List<TxnOffsetCommitRequestPartition> v) {
this.partitions = v;
return this;
}
}
public static class TxnOffsetCommitRequestPartition implements Message {
int partitionIndex;
long committedOffset;
int committedLeaderEpoch;
String committedMetadata;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The index of the partition within the topic."),
new Field("committed_offset", Type.INT64, "The message offset to be committed."),
new Field("committed_metadata", Type.NULLABLE_STRING, "Any associated metadata the client wants to keep.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 =
new Schema(
new Field("partition_index", Type.INT32, "The index of the partition within the topic."),
new Field("committed_offset", Type.INT64, "The message offset to be committed."),
new Field("committed_leader_epoch", Type.INT32, "The leader epoch of the last consumed record."),
new Field("committed_metadata", Type.NULLABLE_STRING, "Any associated metadata the client wants to keep.")
);
public static final Schema SCHEMA_3 =
new Schema(
new Field("partition_index", Type.INT32, "The index of the partition within the topic."),
new Field("committed_offset", Type.INT64, "The message offset to be committed."),
new Field("committed_leader_epoch", Type.INT32, "The leader epoch of the last consumed record."),
new Field("committed_metadata", Type.COMPACT_NULLABLE_STRING, "Any associated metadata the client wants to keep."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 3;
public TxnOffsetCommitRequestPartition(Readable _readable, short _version) {
read(_readable, _version);
}
public TxnOffsetCommitRequestPartition() {
this.partitionIndex = 0;
this.committedOffset = 0L;
this.committedLeaderEpoch = -1;
this.committedMetadata = "";
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 3;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 3) {
throw new UnsupportedVersionException("Can't read version " + _version + " of TxnOffsetCommitRequestPartition");
}
this.partitionIndex = _readable.readInt();
this.committedOffset = _readable.readLong();
if (_version >= 2) {
this.committedLeaderEpoch = _readable.readInt();
} else {
this.committedLeaderEpoch = -1;
}
{
int length;
if (_version >= 3) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
this.committedMetadata = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field committedMetadata had invalid length " + length);
} else {
this.committedMetadata = _readable.readString(length);
}
}
this._unknownTaggedFields = null;
if (_version >= 3) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
_writable.writeLong(committedOffset);
if (_version >= 2) {
_writable.writeInt(committedLeaderEpoch);
}
if (committedMetadata == null) {
if (_version >= 3) {
_writable.writeUnsignedVarint(0);
} else {
_writable.writeShort((short) -1);
}
} else {
byte[] _stringBytes = _cache.getSerializedValue(committedMetadata);
if (_version >= 3) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 3) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 3) {
throw new UnsupportedVersionException("Can't size version " + _version + " of TxnOffsetCommitRequestPartition");
}
_size.addBytes(4);
_size.addBytes(8);
if (_version >= 2) {
_size.addBytes(4);
}
if (committedMetadata == null) {
if (_version >= 3) {
_size.addBytes(1);
} else {
_size.addBytes(2);
}
} else {
byte[] _stringBytes = committedMetadata.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'committedMetadata' field is too long to be serialized");
}
_cache.cacheSerializedValue(committedMetadata, _stringBytes);
if (_version >= 3) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof TxnOffsetCommitRequestPartition)) return false;
TxnOffsetCommitRequestPartition other = (TxnOffsetCommitRequestPartition) obj;
if (partitionIndex != other.partitionIndex) return false;
if (committedOffset != other.committedOffset) return false;
if (committedLeaderEpoch != other.committedLeaderEpoch) return false;
if (this.committedMetadata == null) {
if (other.committedMetadata != null) return false;
} else {
if (!this.committedMetadata.equals(other.committedMetadata)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + ((int) (committedOffset >> 32) ^ (int) committedOffset);
hashCode = 31 * hashCode + committedLeaderEpoch;
hashCode = 31 * hashCode + (committedMetadata == null ? 0 : committedMetadata.hashCode());
return hashCode;
}
@Override
public TxnOffsetCommitRequestPartition duplicate() {
TxnOffsetCommitRequestPartition _duplicate = new TxnOffsetCommitRequestPartition();
_duplicate.partitionIndex = partitionIndex;
_duplicate.committedOffset = committedOffset;
_duplicate.committedLeaderEpoch = committedLeaderEpoch;
if (committedMetadata == null) {
_duplicate.committedMetadata = null;
} else {
_duplicate.committedMetadata = committedMetadata;
}
return _duplicate;
}
@Override
public String toString() {
return "TxnOffsetCommitRequestPartition("
+ "partitionIndex=" + partitionIndex
+ ", committedOffset=" + committedOffset
+ ", committedLeaderEpoch=" + committedLeaderEpoch
+ ", committedMetadata=" + ((committedMetadata == null) ? "null" : "'" + committedMetadata.toString() + "'")
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public long committedOffset() {
return this.committedOffset;
}
public int committedLeaderEpoch() {
return this.committedLeaderEpoch;
}
public String committedMetadata() {
return this.committedMetadata;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public TxnOffsetCommitRequestPartition setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public TxnOffsetCommitRequestPartition setCommittedOffset(long v) {
this.committedOffset = v;
return this;
}
public TxnOffsetCommitRequestPartition setCommittedLeaderEpoch(int v) {
this.committedLeaderEpoch = v;
return this;
}
public TxnOffsetCommitRequestPartition setCommittedMetadata(String v) {
this.committedMetadata = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/TxnOffsetCommitRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.LongNode;
import com.fasterxml.jackson.databind.node.NullNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.TxnOffsetCommitRequestData.*;
public class TxnOffsetCommitRequestDataJsonConverter {
public static TxnOffsetCommitRequestData read(JsonNode _node, short _version) {
TxnOffsetCommitRequestData _object = new TxnOffsetCommitRequestData();
JsonNode _transactionalIdNode = _node.get("transactionalId");
if (_transactionalIdNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestData: unable to locate field 'transactionalId', which is mandatory in version " + _version);
} else {
if (!_transactionalIdNode.isTextual()) {
throw new RuntimeException("TxnOffsetCommitRequestData expected a string type, but got " + _node.getNodeType());
}
_object.transactionalId = _transactionalIdNode.asText();
}
JsonNode _groupIdNode = _node.get("groupId");
if (_groupIdNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestData: unable to locate field 'groupId', which is mandatory in version " + _version);
} else {
if (!_groupIdNode.isTextual()) {
throw new RuntimeException("TxnOffsetCommitRequestData expected a string type, but got " + _node.getNodeType());
}
_object.groupId = _groupIdNode.asText();
}
JsonNode _producerIdNode = _node.get("producerId");
if (_producerIdNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestData: unable to locate field 'producerId', which is mandatory in version " + _version);
} else {
_object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "TxnOffsetCommitRequestData");
}
JsonNode _producerEpochNode = _node.get("producerEpoch");
if (_producerEpochNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestData: unable to locate field 'producerEpoch', which is mandatory in version " + _version);
} else {
_object.producerEpoch = MessageUtil.jsonNodeToShort(_producerEpochNode, "TxnOffsetCommitRequestData");
}
JsonNode _generationIdNode = _node.get("generationId");
if (_generationIdNode == null) {
if (_version >= 3) {
throw new RuntimeException("TxnOffsetCommitRequestData: unable to locate field 'generationId', which is mandatory in version " + _version);
} else {
_object.generationId = -1;
}
} else {
_object.generationId = MessageUtil.jsonNodeToInt(_generationIdNode, "TxnOffsetCommitRequestData");
}
JsonNode _memberIdNode = _node.get("memberId");
if (_memberIdNode == null) {
if (_version >= 3) {
throw new RuntimeException("TxnOffsetCommitRequestData: unable to locate field 'memberId', which is mandatory in version " + _version);
} else {
_object.memberId = "";
}
} else {
if (!_memberIdNode.isTextual()) {
throw new RuntimeException("TxnOffsetCommitRequestData expected a string type, but got " + _node.getNodeType());
}
_object.memberId = _memberIdNode.asText();
}
JsonNode _groupInstanceIdNode = _node.get("groupInstanceId");
if (_groupInstanceIdNode == null) {
if (_version >= 3) {
throw new RuntimeException("TxnOffsetCommitRequestData: unable to locate field 'groupInstanceId', which is mandatory in version " + _version);
} else {
_object.groupInstanceId = null;
}
} else {
if (_groupInstanceIdNode.isNull()) {
_object.groupInstanceId = null;
} else {
if (!_groupInstanceIdNode.isTextual()) {
throw new RuntimeException("TxnOffsetCommitRequestData expected a string type, but got " + _node.getNodeType());
}
_object.groupInstanceId = _groupInstanceIdNode.asText();
}
}
JsonNode _topicsNode = _node.get("topics");
if (_topicsNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestData: unable to locate field 'topics', which is mandatory in version " + _version);
} else {
if (!_topicsNode.isArray()) {
throw new RuntimeException("TxnOffsetCommitRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<TxnOffsetCommitRequestTopic> _collection = new ArrayList<TxnOffsetCommitRequestTopic>(_topicsNode.size());
_object.topics = _collection;
for (JsonNode _element : _topicsNode) {
_collection.add(TxnOffsetCommitRequestTopicJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(TxnOffsetCommitRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("transactionalId", new TextNode(_object.transactionalId));
_node.set("groupId", new TextNode(_object.groupId));
_node.set("producerId", new LongNode(_object.producerId));
_node.set("producerEpoch", new ShortNode(_object.producerEpoch));
if (_version >= 3) {
_node.set("generationId", new IntNode(_object.generationId));
} else {
if (_object.generationId != -1) {
throw new UnsupportedVersionException("Attempted to write a non-default generationId at version " + _version);
}
}
if (_version >= 3) {
_node.set("memberId", new TextNode(_object.memberId));
} else {
if (!_object.memberId.equals("")) {
throw new UnsupportedVersionException("Attempted to write a non-default memberId at version " + _version);
}
}
if (_version >= 3) {
if (_object.groupInstanceId == null) {
_node.set("groupInstanceId", NullNode.instance);
} else {
_node.set("groupInstanceId", new TextNode(_object.groupInstanceId));
}
} else {
if (_object.groupInstanceId != null) {
throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version);
}
}
ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance);
for (TxnOffsetCommitRequestTopic _element : _object.topics) {
_topicsArray.add(TxnOffsetCommitRequestTopicJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("topics", _topicsArray);
return _node;
}
public static JsonNode write(TxnOffsetCommitRequestData _object, short _version) {
return write(_object, _version, true);
}
public static class TxnOffsetCommitRequestPartitionJsonConverter {
public static TxnOffsetCommitRequestPartition read(JsonNode _node, short _version) {
TxnOffsetCommitRequestPartition _object = new TxnOffsetCommitRequestPartition();
JsonNode _partitionIndexNode = _node.get("partitionIndex");
if (_partitionIndexNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestPartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version);
} else {
_object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "TxnOffsetCommitRequestPartition");
}
JsonNode _committedOffsetNode = _node.get("committedOffset");
if (_committedOffsetNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestPartition: unable to locate field 'committedOffset', which is mandatory in version " + _version);
} else {
_object.committedOffset = MessageUtil.jsonNodeToLong(_committedOffsetNode, "TxnOffsetCommitRequestPartition");
}
JsonNode _committedLeaderEpochNode = _node.get("committedLeaderEpoch");
if (_committedLeaderEpochNode == null) {
if (_version >= 2) {
throw new RuntimeException("TxnOffsetCommitRequestPartition: unable to locate field 'committedLeaderEpoch', which is mandatory in version " + _version);
} else {
_object.committedLeaderEpoch = -1;
}
} else {
_object.committedLeaderEpoch = MessageUtil.jsonNodeToInt(_committedLeaderEpochNode, "TxnOffsetCommitRequestPartition");
}
JsonNode _committedMetadataNode = _node.get("committedMetadata");
if (_committedMetadataNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestPartition: unable to locate field 'committedMetadata', which is mandatory in version " + _version);
} else {
if (_committedMetadataNode.isNull()) {
_object.committedMetadata = null;
} else {
if (!_committedMetadataNode.isTextual()) {
throw new RuntimeException("TxnOffsetCommitRequestPartition expected a string type, but got " + _node.getNodeType());
}
_object.committedMetadata = _committedMetadataNode.asText();
}
}
return _object;
}
public static JsonNode write(TxnOffsetCommitRequestPartition _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("partitionIndex", new IntNode(_object.partitionIndex));
_node.set("committedOffset", new LongNode(_object.committedOffset));
if (_version >= 2) {
_node.set("committedLeaderEpoch", new IntNode(_object.committedLeaderEpoch));
}
if (_object.committedMetadata == null) {
_node.set("committedMetadata", NullNode.instance);
} else {
_node.set("committedMetadata", new TextNode(_object.committedMetadata));
}
return _node;
}
public static JsonNode write(TxnOffsetCommitRequestPartition _object, short _version) {
return write(_object, _version, true);
}
}
public static class TxnOffsetCommitRequestTopicJsonConverter {
public static TxnOffsetCommitRequestTopic read(JsonNode _node, short _version) {
TxnOffsetCommitRequestTopic _object = new TxnOffsetCommitRequestTopic();
JsonNode _nameNode = _node.get("name");
if (_nameNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestTopic: unable to locate field 'name', which is mandatory in version " + _version);
} else {
if (!_nameNode.isTextual()) {
throw new RuntimeException("TxnOffsetCommitRequestTopic expected a string type, but got " + _node.getNodeType());
}
_object.name = _nameNode.asText();
}
JsonNode _partitionsNode = _node.get("partitions");
if (_partitionsNode == null) {
throw new RuntimeException("TxnOffsetCommitRequestTopic: unable to locate field 'partitions', which is mandatory in version " + _version);
} else {
if (!_partitionsNode.isArray()) {
throw new RuntimeException("TxnOffsetCommitRequestTopic expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<TxnOffsetCommitRequestPartition> _collection = new ArrayList<TxnOffsetCommitRequestPartition>(_partitionsNode.size());
_object.partitions = _collection;
for (JsonNode _element : _partitionsNode) {
_collection.add(TxnOffsetCommitRequestPartitionJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(TxnOffsetCommitRequestTopic _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("name", new TextNode(_object.name));
ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance);
for (TxnOffsetCommitRequestPartition _element : _object.partitions) {
_partitionsArray.add(TxnOffsetCommitRequestPartitionJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("partitions", _partitionsArray);
return _node;
}
public static JsonNode write(TxnOffsetCommitRequestTopic _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/TxnOffsetCommitResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class TxnOffsetCommitResponseData implements ApiMessage {
int throttleTimeMs;
List<TxnOffsetCommitResponseTopic> topics;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("topics", new ArrayOf(TxnOffsetCommitResponseTopic.SCHEMA_0), "The responses for each topic.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 = SCHEMA_1;
public static final Schema SCHEMA_3 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("topics", new CompactArrayOf(TxnOffsetCommitResponseTopic.SCHEMA_3), "The responses for each topic."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 3;
public TxnOffsetCommitResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public TxnOffsetCommitResponseData() {
this.throttleTimeMs = 0;
this.topics = new ArrayList<TxnOffsetCommitResponseTopic>(0);
}
@Override
public short apiKey() {
return 28;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 3;
}
@Override
public void read(Readable _readable, short _version) {
this.throttleTimeMs = _readable.readInt();
{
if (_version >= 3) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TxnOffsetCommitResponseTopic> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TxnOffsetCommitResponseTopic(_readable, _version));
}
this.topics = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TxnOffsetCommitResponseTopic> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TxnOffsetCommitResponseTopic(_readable, _version));
}
this.topics = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 3) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(throttleTimeMs);
if (_version >= 3) {
_writable.writeUnsignedVarint(topics.size() + 1);
for (TxnOffsetCommitResponseTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(topics.size());
for (TxnOffsetCommitResponseTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 3) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(4);
{
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1));
} else {
_size.addBytes(4);
}
for (TxnOffsetCommitResponseTopic topicsElement : topics) {
topicsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof TxnOffsetCommitResponseData)) return false;
TxnOffsetCommitResponseData other = (TxnOffsetCommitResponseData) obj;
if (throttleTimeMs != other.throttleTimeMs) return false;
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + throttleTimeMs;
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
return hashCode;
}
@Override
public TxnOffsetCommitResponseData duplicate() {
TxnOffsetCommitResponseData _duplicate = new TxnOffsetCommitResponseData();
_duplicate.throttleTimeMs = throttleTimeMs;
ArrayList<TxnOffsetCommitResponseTopic> newTopics = new ArrayList<TxnOffsetCommitResponseTopic>(topics.size());
for (TxnOffsetCommitResponseTopic _element : topics) {
newTopics.add(_element.duplicate());
}
_duplicate.topics = newTopics;
return _duplicate;
}
@Override
public String toString() {
return "TxnOffsetCommitResponseData("
+ "throttleTimeMs=" + throttleTimeMs
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public int throttleTimeMs() {
return this.throttleTimeMs;
}
public List<TxnOffsetCommitResponseTopic> topics() {
return this.topics;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public TxnOffsetCommitResponseData setThrottleTimeMs(int v) {
this.throttleTimeMs = v;
return this;
}
public TxnOffsetCommitResponseData setTopics(List<TxnOffsetCommitResponseTopic> v) {
this.topics = v;
return this;
}
public static class TxnOffsetCommitResponseTopic implements Message {
String name;
List<TxnOffsetCommitResponsePartition> partitions;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("partitions", new ArrayOf(TxnOffsetCommitResponsePartition.SCHEMA_0), "The responses for each partition in the topic.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 = SCHEMA_1;
public static final Schema SCHEMA_3 =
new Schema(
new Field("name", Type.COMPACT_STRING, "The topic name."),
new Field("partitions", new CompactArrayOf(TxnOffsetCommitResponsePartition.SCHEMA_3), "The responses for each partition in the topic."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 3;
public TxnOffsetCommitResponseTopic(Readable _readable, short _version) {
read(_readable, _version);
}
public TxnOffsetCommitResponseTopic() {
this.name = "";
this.partitions = new ArrayList<TxnOffsetCommitResponsePartition>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 3;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 3) {
throw new UnsupportedVersionException("Can't read version " + _version + " of TxnOffsetCommitResponseTopic");
}
{
int length;
if (_version >= 3) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
if (_version >= 3) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TxnOffsetCommitResponsePartition> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TxnOffsetCommitResponsePartition(_readable, _version));
}
this.partitions = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TxnOffsetCommitResponsePartition> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TxnOffsetCommitResponsePartition(_readable, _version));
}
this.partitions = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 3) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
if (_version >= 3) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 3) {
_writable.writeUnsignedVarint(partitions.size() + 1);
for (TxnOffsetCommitResponsePartition partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(partitions.size());
for (TxnOffsetCommitResponsePartition partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 3) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 3) {
throw new UnsupportedVersionException("Can't size version " + _version + " of TxnOffsetCommitResponseTopic");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
if (_version >= 3) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
{
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1));
} else {
_size.addBytes(4);
}
for (TxnOffsetCommitResponsePartition partitionsElement : partitions) {
partitionsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof TxnOffsetCommitResponseTopic)) return false;
TxnOffsetCommitResponseTopic other = (TxnOffsetCommitResponseTopic) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
if (this.partitions == null) {
if (other.partitions != null) return false;
} else {
if (!this.partitions.equals(other.partitions)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
return hashCode;
}
@Override
public TxnOffsetCommitResponseTopic duplicate() {
TxnOffsetCommitResponseTopic _duplicate = new TxnOffsetCommitResponseTopic();
_duplicate.name = name;
ArrayList<TxnOffsetCommitResponsePartition> newPartitions = new ArrayList<TxnOffsetCommitResponsePartition>(partitions.size());
for (TxnOffsetCommitResponsePartition _element : partitions) {
newPartitions.add(_element.duplicate());
}
_duplicate.partitions = newPartitions;
return _duplicate;
}
@Override
public String toString() {
return "TxnOffsetCommitResponseTopic("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List<TxnOffsetCommitResponsePartition> partitions() {
return this.partitions;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public TxnOffsetCommitResponseTopic setName(String v) {
this.name = v;
return this;
}
public TxnOffsetCommitResponseTopic setPartitions(List<TxnOffsetCommitResponsePartition> v) {
this.partitions = v;
return this;
}
}
public static class TxnOffsetCommitResponsePartition implements Message {
int partitionIndex;
short errorCode;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 = SCHEMA_1;
public static final Schema SCHEMA_3 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 3;
public TxnOffsetCommitResponsePartition(Readable _readable, short _version) {
read(_readable, _version);
}
public TxnOffsetCommitResponsePartition() {
this.partitionIndex = 0;
this.errorCode = (short) 0;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 3;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 3) {
throw new UnsupportedVersionException("Can't read version " + _version + " of TxnOffsetCommitResponsePartition");
}
this.partitionIndex = _readable.readInt();
this.errorCode = _readable.readShort();
this._unknownTaggedFields = null;
if (_version >= 3) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
_writable.writeShort(errorCode);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 3) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 3) {
throw new UnsupportedVersionException("Can't size version " + _version + " of TxnOffsetCommitResponsePartition");
}
_size.addBytes(4);
_size.addBytes(2);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 3) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof TxnOffsetCommitResponsePartition)) return false;
TxnOffsetCommitResponsePartition other = (TxnOffsetCommitResponsePartition) obj;
if (partitionIndex != other.partitionIndex) return false;
if (errorCode != other.errorCode) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + errorCode;
return hashCode;
}
@Override
public TxnOffsetCommitResponsePartition duplicate() {
TxnOffsetCommitResponsePartition _duplicate = new TxnOffsetCommitResponsePartition();
_duplicate.partitionIndex = partitionIndex;
_duplicate.errorCode = errorCode;
return _duplicate;
}
@Override
public String toString() {
return "TxnOffsetCommitResponsePartition("
+ "partitionIndex=" + partitionIndex
+ ", errorCode=" + errorCode
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public short errorCode() {
return this.errorCode;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public TxnOffsetCommitResponsePartition setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public TxnOffsetCommitResponsePartition setErrorCode(short v) {
this.errorCode = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/TxnOffsetCommitResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.TxnOffsetCommitResponseData.*;
public class TxnOffsetCommitResponseDataJsonConverter {
public static TxnOffsetCommitResponseData read(JsonNode _node, short _version) {
TxnOffsetCommitResponseData _object = new TxnOffsetCommitResponseData();
JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs");
if (_throttleTimeMsNode == null) {
throw new RuntimeException("TxnOffsetCommitResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version);
} else {
_object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "TxnOffsetCommitResponseData");
}
JsonNode _topicsNode = _node.get("topics");
if (_topicsNode == null) {
throw new RuntimeException("TxnOffsetCommitResponseData: unable to locate field 'topics', which is mandatory in version " + _version);
} else {
if (!_topicsNode.isArray()) {
throw new RuntimeException("TxnOffsetCommitResponseData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<TxnOffsetCommitResponseTopic> _collection = new ArrayList<TxnOffsetCommitResponseTopic>(_topicsNode.size());
_object.topics = _collection;
for (JsonNode _element : _topicsNode) {
_collection.add(TxnOffsetCommitResponseTopicJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(TxnOffsetCommitResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs));
ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance);
for (TxnOffsetCommitResponseTopic _element : _object.topics) {
_topicsArray.add(TxnOffsetCommitResponseTopicJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("topics", _topicsArray);
return _node;
}
public static JsonNode write(TxnOffsetCommitResponseData _object, short _version) {
return write(_object, _version, true);
}
public static class TxnOffsetCommitResponsePartitionJsonConverter {
public static TxnOffsetCommitResponsePartition read(JsonNode _node, short _version) {
TxnOffsetCommitResponsePartition _object = new TxnOffsetCommitResponsePartition();
JsonNode _partitionIndexNode = _node.get("partitionIndex");
if (_partitionIndexNode == null) {
throw new RuntimeException("TxnOffsetCommitResponsePartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version);
} else {
_object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "TxnOffsetCommitResponsePartition");
}
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("TxnOffsetCommitResponsePartition: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "TxnOffsetCommitResponsePartition");
}
return _object;
}
public static JsonNode write(TxnOffsetCommitResponsePartition _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("partitionIndex", new IntNode(_object.partitionIndex));
_node.set("errorCode", new ShortNode(_object.errorCode));
return _node;
}
public static JsonNode write(TxnOffsetCommitResponsePartition _object, short _version) {
return write(_object, _version, true);
}
}
public static class TxnOffsetCommitResponseTopicJsonConverter {
public static TxnOffsetCommitResponseTopic read(JsonNode _node, short _version) {
TxnOffsetCommitResponseTopic _object = new TxnOffsetCommitResponseTopic();
JsonNode _nameNode = _node.get("name");
if (_nameNode == null) {
throw new RuntimeException("TxnOffsetCommitResponseTopic: unable to locate field 'name', which is mandatory in version " + _version);
} else {
if (!_nameNode.isTextual()) {
throw new RuntimeException("TxnOffsetCommitResponseTopic expected a string type, but got " + _node.getNodeType());
}
_object.name = _nameNode.asText();
}
JsonNode _partitionsNode = _node.get("partitions");
if (_partitionsNode == null) {
throw new RuntimeException("TxnOffsetCommitResponseTopic: unable to locate field 'partitions', which is mandatory in version " + _version);
} else {
if (!_partitionsNode.isArray()) {
throw new RuntimeException("TxnOffsetCommitResponseTopic expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<TxnOffsetCommitResponsePartition> _collection = new ArrayList<TxnOffsetCommitResponsePartition>(_partitionsNode.size());
_object.partitions = _collection;
for (JsonNode _element : _partitionsNode) {
_collection.add(TxnOffsetCommitResponsePartitionJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(TxnOffsetCommitResponseTopic _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("name", new TextNode(_object.name));
ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance);
for (TxnOffsetCommitResponsePartition _element : _object.partitions) {
_partitionsArray.add(TxnOffsetCommitResponsePartitionJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("partitions", _partitionsArray);
return _node;
}
public static JsonNode write(TxnOffsetCommitResponseTopic _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UnregisterBrokerRequestData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class UnregisterBrokerRequestData implements ApiMessage {
int brokerId;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("broker_id", Type.INT32, "The broker ID to unregister."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public UnregisterBrokerRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public UnregisterBrokerRequestData() {
this.brokerId = 0;
}
@Override
public short apiKey() {
return 64;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
this.brokerId = _readable.readInt();
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(brokerId);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(4);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UnregisterBrokerRequestData)) return false;
UnregisterBrokerRequestData other = (UnregisterBrokerRequestData) obj;
if (brokerId != other.brokerId) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + brokerId;
return hashCode;
}
@Override
public UnregisterBrokerRequestData duplicate() {
UnregisterBrokerRequestData _duplicate = new UnregisterBrokerRequestData();
_duplicate.brokerId = brokerId;
return _duplicate;
}
@Override
public String toString() {
return "UnregisterBrokerRequestData("
+ "brokerId=" + brokerId
+ ")";
}
public int brokerId() {
return this.brokerId;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UnregisterBrokerRequestData setBrokerId(int v) {
this.brokerId = v;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UnregisterBrokerRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.UnregisterBrokerRequestData.*;
public class UnregisterBrokerRequestDataJsonConverter {
public static UnregisterBrokerRequestData read(JsonNode _node, short _version) {
UnregisterBrokerRequestData _object = new UnregisterBrokerRequestData();
JsonNode _brokerIdNode = _node.get("brokerId");
if (_brokerIdNode == null) {
throw new RuntimeException("UnregisterBrokerRequestData: unable to locate field 'brokerId', which is mandatory in version " + _version);
} else {
_object.brokerId = MessageUtil.jsonNodeToInt(_brokerIdNode, "UnregisterBrokerRequestData");
}
return _object;
}
public static JsonNode write(UnregisterBrokerRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("brokerId", new IntNode(_object.brokerId));
return _node;
}
public static JsonNode write(UnregisterBrokerRequestData _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UnregisterBrokerResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class UnregisterBrokerResponseData implements ApiMessage {
int throttleTimeMs;
short errorCode;
String errorMessage;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The top-level error message, or `null` if there was no top-level error."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public UnregisterBrokerResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public UnregisterBrokerResponseData() {
this.throttleTimeMs = 0;
this.errorCode = (short) 0;
this.errorMessage = "";
}
@Override
public short apiKey() {
return 64;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
this.throttleTimeMs = _readable.readInt();
this.errorCode = _readable.readShort();
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.errorMessage = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field errorMessage had invalid length " + length);
} else {
this.errorMessage = _readable.readString(length);
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(throttleTimeMs);
_writable.writeShort(errorCode);
if (errorMessage == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(4);
_size.addBytes(2);
if (errorMessage == null) {
_size.addBytes(1);
} else {
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'errorMessage' field is too long to be serialized");
}
_cache.cacheSerializedValue(errorMessage, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UnregisterBrokerResponseData)) return false;
UnregisterBrokerResponseData other = (UnregisterBrokerResponseData) obj;
if (throttleTimeMs != other.throttleTimeMs) return false;
if (errorCode != other.errorCode) return false;
if (this.errorMessage == null) {
if (other.errorMessage != null) return false;
} else {
if (!this.errorMessage.equals(other.errorMessage)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + throttleTimeMs;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode());
return hashCode;
}
@Override
public UnregisterBrokerResponseData duplicate() {
UnregisterBrokerResponseData _duplicate = new UnregisterBrokerResponseData();
_duplicate.throttleTimeMs = throttleTimeMs;
_duplicate.errorCode = errorCode;
if (errorMessage == null) {
_duplicate.errorMessage = null;
} else {
_duplicate.errorMessage = errorMessage;
}
return _duplicate;
}
@Override
public String toString() {
return "UnregisterBrokerResponseData("
+ "throttleTimeMs=" + throttleTimeMs
+ ", errorCode=" + errorCode
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
+ ")";
}
public int throttleTimeMs() {
return this.throttleTimeMs;
}
public short errorCode() {
return this.errorCode;
}
public String errorMessage() {
return this.errorMessage;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UnregisterBrokerResponseData setThrottleTimeMs(int v) {
this.throttleTimeMs = v;
return this;
}
public UnregisterBrokerResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
public UnregisterBrokerResponseData setErrorMessage(String v) {
this.errorMessage = v;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UnregisterBrokerResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.NullNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.UnregisterBrokerResponseData.*;
public class UnregisterBrokerResponseDataJsonConverter {
public static UnregisterBrokerResponseData read(JsonNode _node, short _version) {
UnregisterBrokerResponseData _object = new UnregisterBrokerResponseData();
JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs");
if (_throttleTimeMsNode == null) {
throw new RuntimeException("UnregisterBrokerResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version);
} else {
_object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "UnregisterBrokerResponseData");
}
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("UnregisterBrokerResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "UnregisterBrokerResponseData");
}
JsonNode _errorMessageNode = _node.get("errorMessage");
if (_errorMessageNode == null) {
throw new RuntimeException("UnregisterBrokerResponseData: unable to locate field 'errorMessage', which is mandatory in version " + _version);
} else {
if (_errorMessageNode.isNull()) {
_object.errorMessage = null;
} else {
if (!_errorMessageNode.isTextual()) {
throw new RuntimeException("UnregisterBrokerResponseData expected a string type, but got " + _node.getNodeType());
}
_object.errorMessage = _errorMessageNode.asText();
}
}
return _object;
}
public static JsonNode write(UnregisterBrokerResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs));
_node.set("errorCode", new ShortNode(_object.errorCode));
if (_object.errorMessage == null) {
_node.set("errorMessage", NullNode.instance);
} else {
_node.set("errorMessage", new TextNode(_object.errorMessage));
}
return _node;
}
public static JsonNode write(UnregisterBrokerResponseData _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UpdateFeaturesRequestData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class UpdateFeaturesRequestData implements ApiMessage {
int timeoutMs;
FeatureUpdateKeyCollection featureUpdates;
boolean validateOnly;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("timeout_ms", Type.INT32, "How long to wait in milliseconds before timing out the request."),
new Field("feature_updates", new CompactArrayOf(FeatureUpdateKey.SCHEMA_0), "The list of updates to finalized features."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("timeout_ms", Type.INT32, "How long to wait in milliseconds before timing out the request."),
new Field("feature_updates", new CompactArrayOf(FeatureUpdateKey.SCHEMA_1), "The list of updates to finalized features."),
new Field("validate_only", Type.BOOLEAN, "True if we should validate the request, but not perform the upgrade or downgrade."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public UpdateFeaturesRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public UpdateFeaturesRequestData() {
this.timeoutMs = 60000;
this.featureUpdates = new FeatureUpdateKeyCollection(0);
this.validateOnly = false;
}
@Override
public short apiKey() {
return 57;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
this.timeoutMs = _readable.readInt();
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field featureUpdates was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
FeatureUpdateKeyCollection newCollection = new FeatureUpdateKeyCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new FeatureUpdateKey(_readable, _version));
}
this.featureUpdates = newCollection;
}
}
if (_version >= 1) {
this.validateOnly = _readable.readByte() != 0;
} else {
this.validateOnly = false;
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(timeoutMs);
_writable.writeUnsignedVarint(featureUpdates.size() + 1);
for (FeatureUpdateKey featureUpdatesElement : featureUpdates) {
featureUpdatesElement.write(_writable, _cache, _version);
}
if (_version >= 1) {
_writable.writeByte(validateOnly ? (byte) 1 : (byte) 0);
} else {
if (this.validateOnly) {
throw new UnsupportedVersionException("Attempted to write a non-default validateOnly at version " + _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(4);
{
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(featureUpdates.size() + 1));
for (FeatureUpdateKey featureUpdatesElement : featureUpdates) {
featureUpdatesElement.addSize(_size, _cache, _version);
}
}
if (_version >= 1) {
_size.addBytes(1);
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UpdateFeaturesRequestData)) return false;
UpdateFeaturesRequestData other = (UpdateFeaturesRequestData) obj;
if (timeoutMs != other.timeoutMs) return false;
if (this.featureUpdates == null) {
if (other.featureUpdates != null) return false;
} else {
if (!this.featureUpdates.equals(other.featureUpdates)) return false;
}
if (validateOnly != other.validateOnly) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + timeoutMs;
hashCode = 31 * hashCode + (featureUpdates == null ? 0 : featureUpdates.hashCode());
hashCode = 31 * hashCode + (validateOnly ? 1231 : 1237);
return hashCode;
}
@Override
public UpdateFeaturesRequestData duplicate() {
UpdateFeaturesRequestData _duplicate = new UpdateFeaturesRequestData();
_duplicate.timeoutMs = timeoutMs;
FeatureUpdateKeyCollection newFeatureUpdates = new FeatureUpdateKeyCollection(featureUpdates.size());
for (FeatureUpdateKey _element : featureUpdates) {
newFeatureUpdates.add(_element.duplicate());
}
_duplicate.featureUpdates = newFeatureUpdates;
_duplicate.validateOnly = validateOnly;
return _duplicate;
}
@Override
public String toString() {
return "UpdateFeaturesRequestData("
+ "timeoutMs=" + timeoutMs
+ ", featureUpdates=" + MessageUtil.deepToString(featureUpdates.iterator())
+ ", validateOnly=" + (validateOnly ? "true" : "false")
+ ")";
}
public int timeoutMs() {
return this.timeoutMs;
}
public FeatureUpdateKeyCollection featureUpdates() {
return this.featureUpdates;
}
public boolean validateOnly() {
return this.validateOnly;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UpdateFeaturesRequestData setTimeoutMs(int v) {
this.timeoutMs = v;
return this;
}
public UpdateFeaturesRequestData setFeatureUpdates(FeatureUpdateKeyCollection v) {
this.featureUpdates = v;
return this;
}
public UpdateFeaturesRequestData setValidateOnly(boolean v) {
this.validateOnly = v;
return this;
}
public static class FeatureUpdateKey implements Message, ImplicitLinkedHashMultiCollection.Element {
String feature;
short maxVersionLevel;
boolean allowDowngrade;
byte upgradeType;
private List<RawTaggedField> _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("feature", Type.COMPACT_STRING, "The name of the finalized feature to be updated."),
new Field("max_version_level", Type.INT16, "The new maximum version level for the finalized feature. A value >= 1 is valid. A value < 1, is special, and can be used to request the deletion of the finalized feature."),
new Field("allow_downgrade", Type.BOOLEAN, "DEPRECATED in version 1 (see DowngradeType). When set to true, the finalized feature version level is allowed to be downgraded/deleted. The downgrade request will fail if the new maximum version level is a value that's not lower than the existing maximum finalized version level."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("feature", Type.COMPACT_STRING, "The name of the finalized feature to be updated."),
new Field("max_version_level", Type.INT16, "The new maximum version level for the finalized feature. A value >= 1 is valid. A value < 1, is special, and can be used to request the deletion of the finalized feature."),
new Field("upgrade_type", Type.INT8, "Determine which type of upgrade will be performed: 1 will perform an upgrade only (default), 2 is safe downgrades only (lossless), 3 is unsafe downgrades (lossy)."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public FeatureUpdateKey(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public FeatureUpdateKey() {
this.feature = "";
this.maxVersionLevel = (short) 0;
this.allowDowngrade = false;
this.upgradeType = (byte) 1;
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of FeatureUpdateKey");
}
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field feature was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field feature had invalid length " + length);
} else {
this.feature = _readable.readString(length);
}
}
this.maxVersionLevel = _readable.readShort();
if (_version <= 0) {
this.allowDowngrade = _readable.readByte() != 0;
} else {
this.allowDowngrade = false;
}
if (_version >= 1) {
this.upgradeType = _readable.readByte();
} else {
this.upgradeType = (byte) 1;
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(feature);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeShort(maxVersionLevel);
if (_version <= 0) {
_writable.writeByte(allowDowngrade ? (byte) 1 : (byte) 0);
} else {
if (this.allowDowngrade) {
throw new UnsupportedVersionException("Attempted to write a non-default allowDowngrade at version " + _version);
}
}
if (_version >= 1) {
_writable.writeByte(upgradeType);
} else {
if (this.upgradeType != (byte) 1) {
throw new UnsupportedVersionException("Attempted to write a non-default upgradeType at version " + _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of FeatureUpdateKey");
}
{
byte[] _stringBytes = feature.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'feature' field is too long to be serialized");
}
_cache.cacheSerializedValue(feature, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
_size.addBytes(2);
if (_version <= 0) {
_size.addBytes(1);
}
if (_version >= 1) {
_size.addBytes(1);
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean elementKeysAreEqual(Object obj) {
if (!(obj instanceof FeatureUpdateKey)) return false;
FeatureUpdateKey other = (FeatureUpdateKey) obj;
if (this.feature == null) {
if (other.feature != null) return false;
} else {
if (!this.feature.equals(other.feature)) return false;
}
return true;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof FeatureUpdateKey)) return false;
FeatureUpdateKey other = (FeatureUpdateKey) obj;
if (this.feature == null) {
if (other.feature != null) return false;
} else {
if (!this.feature.equals(other.feature)) return false;
}
if (maxVersionLevel != other.maxVersionLevel) return false;
if (allowDowngrade != other.allowDowngrade) return false;
if (upgradeType != other.upgradeType) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (feature == null ? 0 : feature.hashCode());
return hashCode;
}
@Override
public FeatureUpdateKey duplicate() {
FeatureUpdateKey _duplicate = new FeatureUpdateKey();
_duplicate.feature = feature;
_duplicate.maxVersionLevel = maxVersionLevel;
_duplicate.allowDowngrade = allowDowngrade;
_duplicate.upgradeType = upgradeType;
return _duplicate;
}
@Override
public String toString() {
return "FeatureUpdateKey("
+ "feature=" + ((feature == null) ? "null" : "'" + feature.toString() + "'")
+ ", maxVersionLevel=" + maxVersionLevel
+ ", allowDowngrade=" + (allowDowngrade ? "true" : "false")
+ ", upgradeType=" + upgradeType
+ ")";
}
public String feature() {
return this.feature;
}
public short maxVersionLevel() {
return this.maxVersionLevel;
}
public boolean allowDowngrade() {
return this.allowDowngrade;
}
public byte upgradeType() {
return this.upgradeType;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public FeatureUpdateKey setFeature(String v) {
this.feature = v;
return this;
}
public FeatureUpdateKey setMaxVersionLevel(short v) {
this.maxVersionLevel = v;
return this;
}
public FeatureUpdateKey setAllowDowngrade(boolean v) {
this.allowDowngrade = v;
return this;
}
public FeatureUpdateKey setUpgradeType(byte v) {
this.upgradeType = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
public static class FeatureUpdateKeyCollection extends ImplicitLinkedHashMultiCollection<FeatureUpdateKey> {
public FeatureUpdateKeyCollection() {
super();
}
public FeatureUpdateKeyCollection(int expectedNumElements) {
super(expectedNumElements);
}
public FeatureUpdateKeyCollection(Iterator<FeatureUpdateKey> iterator) {
super(iterator);
}
public FeatureUpdateKey find(String feature) {
FeatureUpdateKey _key = new FeatureUpdateKey();
_key.setFeature(feature);
return find(_key);
}
public List<FeatureUpdateKey> findAll(String feature) {
FeatureUpdateKey _key = new FeatureUpdateKey();
_key.setFeature(feature);
return findAll(_key);
}
public FeatureUpdateKeyCollection duplicate() {
FeatureUpdateKeyCollection _duplicate = new FeatureUpdateKeyCollection(size());
for (FeatureUpdateKey _element : this) {
_duplicate.add(_element.duplicate());
}
return _duplicate;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UpdateFeaturesRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.BooleanNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.UpdateFeaturesRequestData.*;
public class UpdateFeaturesRequestDataJsonConverter {
public static UpdateFeaturesRequestData read(JsonNode _node, short _version) {
UpdateFeaturesRequestData _object = new UpdateFeaturesRequestData();
JsonNode _timeoutMsNode = _node.get("timeoutMs");
if (_timeoutMsNode == null) {
throw new RuntimeException("UpdateFeaturesRequestData: unable to locate field 'timeoutMs', which is mandatory in version " + _version);
} else {
_object.timeoutMs = MessageUtil.jsonNodeToInt(_timeoutMsNode, "UpdateFeaturesRequestData");
}
JsonNode _featureUpdatesNode = _node.get("featureUpdates");
if (_featureUpdatesNode == null) {
throw new RuntimeException("UpdateFeaturesRequestData: unable to locate field 'featureUpdates', which is mandatory in version " + _version);
} else {
if (!_featureUpdatesNode.isArray()) {
throw new RuntimeException("UpdateFeaturesRequestData expected a JSON array, but got " + _node.getNodeType());
}
FeatureUpdateKeyCollection _collection = new FeatureUpdateKeyCollection(_featureUpdatesNode.size());
_object.featureUpdates = _collection;
for (JsonNode _element : _featureUpdatesNode) {
_collection.add(FeatureUpdateKeyJsonConverter.read(_element, _version));
}
}
JsonNode _validateOnlyNode = _node.get("validateOnly");
if (_validateOnlyNode == null) {
if (_version >= 1) {
throw new RuntimeException("UpdateFeaturesRequestData: unable to locate field 'validateOnly', which is mandatory in version " + _version);
} else {
_object.validateOnly = false;
}
} else {
if (!_validateOnlyNode.isBoolean()) {
throw new RuntimeException("UpdateFeaturesRequestData expected Boolean type, but got " + _node.getNodeType());
}
_object.validateOnly = _validateOnlyNode.asBoolean();
}
return _object;
}
public static JsonNode write(UpdateFeaturesRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("timeoutMs", new IntNode(_object.timeoutMs));
ArrayNode _featureUpdatesArray = new ArrayNode(JsonNodeFactory.instance);
for (FeatureUpdateKey _element : _object.featureUpdates) {
_featureUpdatesArray.add(FeatureUpdateKeyJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("featureUpdates", _featureUpdatesArray);
if (_version >= 1) {
_node.set("validateOnly", BooleanNode.valueOf(_object.validateOnly));
} else {
if (_object.validateOnly) {
throw new UnsupportedVersionException("Attempted to write a non-default validateOnly at version " + _version);
}
}
return _node;
}
public static JsonNode write(UpdateFeaturesRequestData _object, short _version) {
return write(_object, _version, true);
}
public static class FeatureUpdateKeyJsonConverter {
public static FeatureUpdateKey read(JsonNode _node, short _version) {
FeatureUpdateKey _object = new FeatureUpdateKey();
JsonNode _featureNode = _node.get("feature");
if (_featureNode == null) {
throw new RuntimeException("FeatureUpdateKey: unable to locate field 'feature', which is mandatory in version " + _version);
} else {
if (!_featureNode.isTextual()) {
throw new RuntimeException("FeatureUpdateKey expected a string type, but got " + _node.getNodeType());
}
_object.feature = _featureNode.asText();
}
JsonNode _maxVersionLevelNode = _node.get("maxVersionLevel");
if (_maxVersionLevelNode == null) {
throw new RuntimeException("FeatureUpdateKey: unable to locate field 'maxVersionLevel', which is mandatory in version " + _version);
} else {
_object.maxVersionLevel = MessageUtil.jsonNodeToShort(_maxVersionLevelNode, "FeatureUpdateKey");
}
JsonNode _allowDowngradeNode = _node.get("allowDowngrade");
if (_allowDowngradeNode == null) {
if (_version <= 0) {
throw new RuntimeException("FeatureUpdateKey: unable to locate field 'allowDowngrade', which is mandatory in version " + _version);
} else {
_object.allowDowngrade = false;
}
} else {
if (!_allowDowngradeNode.isBoolean()) {
throw new RuntimeException("FeatureUpdateKey expected Boolean type, but got " + _node.getNodeType());
}
_object.allowDowngrade = _allowDowngradeNode.asBoolean();
}
JsonNode _upgradeTypeNode = _node.get("upgradeType");
if (_upgradeTypeNode == null) {
if (_version >= 1) {
throw new RuntimeException("FeatureUpdateKey: unable to locate field 'upgradeType', which is mandatory in version " + _version);
} else {
_object.upgradeType = (byte) 1;
}
} else {
_object.upgradeType = MessageUtil.jsonNodeToByte(_upgradeTypeNode, "FeatureUpdateKey");
}
return _object;
}
public static JsonNode write(FeatureUpdateKey _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("feature", new TextNode(_object.feature));
_node.set("maxVersionLevel", new ShortNode(_object.maxVersionLevel));
if (_version <= 0) {
_node.set("allowDowngrade", BooleanNode.valueOf(_object.allowDowngrade));
} else {
if (_object.allowDowngrade) {
throw new UnsupportedVersionException("Attempted to write a non-default allowDowngrade at version " + _version);
}
}
if (_version >= 1) {
_node.set("upgradeType", new ShortNode(_object.upgradeType));
} else {
if (_object.upgradeType != (byte) 1) {
throw new UnsupportedVersionException("Attempted to write a non-default upgradeType at version " + _version);
}
}
return _node;
}
public static JsonNode write(FeatureUpdateKey _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UpdateFeaturesResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import org.apache.kafka.common.utils.ImplicitLinkedHashCollection;
import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class UpdateFeaturesResponseData implements ApiMessage {
int throttleTimeMs;
short errorCode;
String errorMessage;
UpdatableFeatureResultCollection results;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."),
new Field("error_code", Type.INT16, "The top-level error code, or `0` if there was no top-level error."),
new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The top-level error message, or `null` if there was no top-level error."),
new Field("results", new CompactArrayOf(UpdatableFeatureResult.SCHEMA_0), "Results for each feature update."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public UpdateFeaturesResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public UpdateFeaturesResponseData() {
this.throttleTimeMs = 0;
this.errorCode = (short) 0;
this.errorMessage = "";
this.results = new UpdatableFeatureResultCollection(0);
}
@Override
public short apiKey() {
return 57;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
this.throttleTimeMs = _readable.readInt();
this.errorCode = _readable.readShort();
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.errorMessage = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field errorMessage had invalid length " + length);
} else {
this.errorMessage = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field results was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
UpdatableFeatureResultCollection newCollection = new UpdatableFeatureResultCollection(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdatableFeatureResult(_readable, _version));
}
this.results = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(throttleTimeMs);
_writable.writeShort(errorCode);
if (errorMessage == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeUnsignedVarint(results.size() + 1);
for (UpdatableFeatureResult resultsElement : results) {
resultsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(4);
_size.addBytes(2);
if (errorMessage == null) {
_size.addBytes(1);
} else {
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'errorMessage' field is too long to be serialized");
}
_cache.cacheSerializedValue(errorMessage, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
{
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(results.size() + 1));
for (UpdatableFeatureResult resultsElement : results) {
resultsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UpdateFeaturesResponseData)) return false;
UpdateFeaturesResponseData other = (UpdateFeaturesResponseData) obj;
if (throttleTimeMs != other.throttleTimeMs) return false;
if (errorCode != other.errorCode) return false;
if (this.errorMessage == null) {
if (other.errorMessage != null) return false;
} else {
if (!this.errorMessage.equals(other.errorMessage)) return false;
}
if (this.results == null) {
if (other.results != null) return false;
} else {
if (!this.results.equals(other.results)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + throttleTimeMs;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode());
hashCode = 31 * hashCode + (results == null ? 0 : results.hashCode());
return hashCode;
}
@Override
public UpdateFeaturesResponseData duplicate() {
UpdateFeaturesResponseData _duplicate = new UpdateFeaturesResponseData();
_duplicate.throttleTimeMs = throttleTimeMs;
_duplicate.errorCode = errorCode;
if (errorMessage == null) {
_duplicate.errorMessage = null;
} else {
_duplicate.errorMessage = errorMessage;
}
UpdatableFeatureResultCollection newResults = new UpdatableFeatureResultCollection(results.size());
for (UpdatableFeatureResult _element : results) {
newResults.add(_element.duplicate());
}
_duplicate.results = newResults;
return _duplicate;
}
@Override
public String toString() {
return "UpdateFeaturesResponseData("
+ "throttleTimeMs=" + throttleTimeMs
+ ", errorCode=" + errorCode
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
+ ", results=" + MessageUtil.deepToString(results.iterator())
+ ")";
}
public int throttleTimeMs() {
return this.throttleTimeMs;
}
public short errorCode() {
return this.errorCode;
}
public String errorMessage() {
return this.errorMessage;
}
public UpdatableFeatureResultCollection results() {
return this.results;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UpdateFeaturesResponseData setThrottleTimeMs(int v) {
this.throttleTimeMs = v;
return this;
}
public UpdateFeaturesResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
public UpdateFeaturesResponseData setErrorMessage(String v) {
this.errorMessage = v;
return this;
}
public UpdateFeaturesResponseData setResults(UpdatableFeatureResultCollection v) {
this.results = v;
return this;
}
public static class UpdatableFeatureResult implements Message, ImplicitLinkedHashMultiCollection.Element {
String feature;
short errorCode;
String errorMessage;
private List<RawTaggedField> _unknownTaggedFields;
private int next;
private int prev;
public static final Schema SCHEMA_0 =
new Schema(
new Field("feature", Type.COMPACT_STRING, "The name of the finalized feature."),
new Field("error_code", Type.INT16, "The feature update error code or `0` if the feature update succeeded."),
new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The feature update error, or `null` if the feature update succeeded."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public UpdatableFeatureResult(Readable _readable, short _version) {
read(_readable, _version);
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
public UpdatableFeatureResult() {
this.feature = "";
this.errorCode = (short) 0;
this.errorMessage = "";
this.prev = ImplicitLinkedHashCollection.INVALID_INDEX;
this.next = ImplicitLinkedHashCollection.INVALID_INDEX;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of UpdatableFeatureResult");
}
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field feature was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field feature had invalid length " + length);
} else {
this.feature = _readable.readString(length);
}
}
this.errorCode = _readable.readShort();
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.errorMessage = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field errorMessage had invalid length " + length);
} else {
this.errorMessage = _readable.readString(length);
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(feature);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeShort(errorCode);
if (errorMessage == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(errorMessage);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of UpdatableFeatureResult");
}
{
byte[] _stringBytes = feature.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'feature' field is too long to be serialized");
}
_cache.cacheSerializedValue(feature, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
_size.addBytes(2);
if (errorMessage == null) {
_size.addBytes(1);
} else {
byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'errorMessage' field is too long to be serialized");
}
_cache.cacheSerializedValue(errorMessage, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean elementKeysAreEqual(Object obj) {
if (!(obj instanceof UpdatableFeatureResult)) return false;
UpdatableFeatureResult other = (UpdatableFeatureResult) obj;
if (this.feature == null) {
if (other.feature != null) return false;
} else {
if (!this.feature.equals(other.feature)) return false;
}
return true;
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UpdatableFeatureResult)) return false;
UpdatableFeatureResult other = (UpdatableFeatureResult) obj;
if (this.feature == null) {
if (other.feature != null) return false;
} else {
if (!this.feature.equals(other.feature)) return false;
}
if (errorCode != other.errorCode) return false;
if (this.errorMessage == null) {
if (other.errorMessage != null) return false;
} else {
if (!this.errorMessage.equals(other.errorMessage)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (feature == null ? 0 : feature.hashCode());
return hashCode;
}
@Override
public UpdatableFeatureResult duplicate() {
UpdatableFeatureResult _duplicate = new UpdatableFeatureResult();
_duplicate.feature = feature;
_duplicate.errorCode = errorCode;
if (errorMessage == null) {
_duplicate.errorMessage = null;
} else {
_duplicate.errorMessage = errorMessage;
}
return _duplicate;
}
@Override
public String toString() {
return "UpdatableFeatureResult("
+ "feature=" + ((feature == null) ? "null" : "'" + feature.toString() + "'")
+ ", errorCode=" + errorCode
+ ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'")
+ ")";
}
public String feature() {
return this.feature;
}
public short errorCode() {
return this.errorCode;
}
public String errorMessage() {
return this.errorMessage;
}
@Override
public int next() {
return this.next;
}
@Override
public int prev() {
return this.prev;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UpdatableFeatureResult setFeature(String v) {
this.feature = v;
return this;
}
public UpdatableFeatureResult setErrorCode(short v) {
this.errorCode = v;
return this;
}
public UpdatableFeatureResult setErrorMessage(String v) {
this.errorMessage = v;
return this;
}
@Override
public void setNext(int v) {
this.next = v;
}
@Override
public void setPrev(int v) {
this.prev = v;
}
}
public static class UpdatableFeatureResultCollection extends ImplicitLinkedHashMultiCollection<UpdatableFeatureResult> {
public UpdatableFeatureResultCollection() {
super();
}
public UpdatableFeatureResultCollection(int expectedNumElements) {
super(expectedNumElements);
}
public UpdatableFeatureResultCollection(Iterator<UpdatableFeatureResult> iterator) {
super(iterator);
}
public UpdatableFeatureResult find(String feature) {
UpdatableFeatureResult _key = new UpdatableFeatureResult();
_key.setFeature(feature);
return find(_key);
}
public List<UpdatableFeatureResult> findAll(String feature) {
UpdatableFeatureResult _key = new UpdatableFeatureResult();
_key.setFeature(feature);
return findAll(_key);
}
public UpdatableFeatureResultCollection duplicate() {
UpdatableFeatureResultCollection _duplicate = new UpdatableFeatureResultCollection(size());
for (UpdatableFeatureResult _element : this) {
_duplicate.add(_element.duplicate());
}
return _duplicate;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UpdateFeaturesResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.NullNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.UpdateFeaturesResponseData.*;
public class UpdateFeaturesResponseDataJsonConverter {
public static UpdateFeaturesResponseData read(JsonNode _node, short _version) {
UpdateFeaturesResponseData _object = new UpdateFeaturesResponseData();
JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs");
if (_throttleTimeMsNode == null) {
throw new RuntimeException("UpdateFeaturesResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version);
} else {
_object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "UpdateFeaturesResponseData");
}
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("UpdateFeaturesResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "UpdateFeaturesResponseData");
}
JsonNode _errorMessageNode = _node.get("errorMessage");
if (_errorMessageNode == null) {
throw new RuntimeException("UpdateFeaturesResponseData: unable to locate field 'errorMessage', which is mandatory in version " + _version);
} else {
if (_errorMessageNode.isNull()) {
_object.errorMessage = null;
} else {
if (!_errorMessageNode.isTextual()) {
throw new RuntimeException("UpdateFeaturesResponseData expected a string type, but got " + _node.getNodeType());
}
_object.errorMessage = _errorMessageNode.asText();
}
}
JsonNode _resultsNode = _node.get("results");
if (_resultsNode == null) {
throw new RuntimeException("UpdateFeaturesResponseData: unable to locate field 'results', which is mandatory in version " + _version);
} else {
if (!_resultsNode.isArray()) {
throw new RuntimeException("UpdateFeaturesResponseData expected a JSON array, but got " + _node.getNodeType());
}
UpdatableFeatureResultCollection _collection = new UpdatableFeatureResultCollection(_resultsNode.size());
_object.results = _collection;
for (JsonNode _element : _resultsNode) {
_collection.add(UpdatableFeatureResultJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(UpdateFeaturesResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs));
_node.set("errorCode", new ShortNode(_object.errorCode));
if (_object.errorMessage == null) {
_node.set("errorMessage", NullNode.instance);
} else {
_node.set("errorMessage", new TextNode(_object.errorMessage));
}
ArrayNode _resultsArray = new ArrayNode(JsonNodeFactory.instance);
for (UpdatableFeatureResult _element : _object.results) {
_resultsArray.add(UpdatableFeatureResultJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("results", _resultsArray);
return _node;
}
public static JsonNode write(UpdateFeaturesResponseData _object, short _version) {
return write(_object, _version, true);
}
public static class UpdatableFeatureResultJsonConverter {
public static UpdatableFeatureResult read(JsonNode _node, short _version) {
UpdatableFeatureResult _object = new UpdatableFeatureResult();
JsonNode _featureNode = _node.get("feature");
if (_featureNode == null) {
throw new RuntimeException("UpdatableFeatureResult: unable to locate field 'feature', which is mandatory in version " + _version);
} else {
if (!_featureNode.isTextual()) {
throw new RuntimeException("UpdatableFeatureResult expected a string type, but got " + _node.getNodeType());
}
_object.feature = _featureNode.asText();
}
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("UpdatableFeatureResult: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "UpdatableFeatureResult");
}
JsonNode _errorMessageNode = _node.get("errorMessage");
if (_errorMessageNode == null) {
throw new RuntimeException("UpdatableFeatureResult: unable to locate field 'errorMessage', which is mandatory in version " + _version);
} else {
if (_errorMessageNode.isNull()) {
_object.errorMessage = null;
} else {
if (!_errorMessageNode.isTextual()) {
throw new RuntimeException("UpdatableFeatureResult expected a string type, but got " + _node.getNodeType());
}
_object.errorMessage = _errorMessageNode.asText();
}
}
return _object;
}
public static JsonNode write(UpdatableFeatureResult _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("feature", new TextNode(_object.feature));
_node.set("errorCode", new ShortNode(_object.errorCode));
if (_object.errorMessage == null) {
_node.set("errorMessage", NullNode.instance);
} else {
_node.set("errorMessage", new TextNode(_object.errorMessage));
}
return _node;
}
public static JsonNode write(UpdatableFeatureResult _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UpdateMetadataRequestData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class UpdateMetadataRequestData implements ApiMessage {
int controllerId;
boolean isKRaftController;
int controllerEpoch;
long brokerEpoch;
List<UpdateMetadataPartitionState> ungroupedPartitionStates;
List<UpdateMetadataTopicState> topicStates;
List<UpdateMetadataBroker> liveBrokers;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("ungrouped_partition_states", new ArrayOf(UpdateMetadataPartitionState.SCHEMA_0), "In older versions of this RPC, each partition that we would like to update."),
new Field("live_brokers", new ArrayOf(UpdateMetadataBroker.SCHEMA_0), "")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("ungrouped_partition_states", new ArrayOf(UpdateMetadataPartitionState.SCHEMA_0), "In older versions of this RPC, each partition that we would like to update."),
new Field("live_brokers", new ArrayOf(UpdateMetadataBroker.SCHEMA_1), "")
);
public static final Schema SCHEMA_2 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("ungrouped_partition_states", new ArrayOf(UpdateMetadataPartitionState.SCHEMA_0), "In older versions of this RPC, each partition that we would like to update."),
new Field("live_brokers", new ArrayOf(UpdateMetadataBroker.SCHEMA_2), "")
);
public static final Schema SCHEMA_3 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("ungrouped_partition_states", new ArrayOf(UpdateMetadataPartitionState.SCHEMA_0), "In older versions of this RPC, each partition that we would like to update."),
new Field("live_brokers", new ArrayOf(UpdateMetadataBroker.SCHEMA_3), "")
);
public static final Schema SCHEMA_4 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("ungrouped_partition_states", new ArrayOf(UpdateMetadataPartitionState.SCHEMA_4), "In older versions of this RPC, each partition that we would like to update."),
new Field("live_brokers", new ArrayOf(UpdateMetadataBroker.SCHEMA_3), "")
);
public static final Schema SCHEMA_5 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("broker_epoch", Type.INT64, "The broker epoch."),
new Field("topic_states", new ArrayOf(UpdateMetadataTopicState.SCHEMA_5), "In newer versions of this RPC, each topic that we would like to update."),
new Field("live_brokers", new ArrayOf(UpdateMetadataBroker.SCHEMA_3), "")
);
public static final Schema SCHEMA_6 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("broker_epoch", Type.INT64, "The broker epoch."),
new Field("topic_states", new CompactArrayOf(UpdateMetadataTopicState.SCHEMA_6), "In newer versions of this RPC, each topic that we would like to update."),
new Field("live_brokers", new CompactArrayOf(UpdateMetadataBroker.SCHEMA_6), ""),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_7 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("broker_epoch", Type.INT64, "The broker epoch."),
new Field("topic_states", new CompactArrayOf(UpdateMetadataTopicState.SCHEMA_7), "In newer versions of this RPC, each topic that we would like to update."),
new Field("live_brokers", new CompactArrayOf(UpdateMetadataBroker.SCHEMA_6), ""),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_8 =
new Schema(
new Field("controller_id", Type.INT32, "The controller id."),
new Field("is_kraft_controller", Type.BOOLEAN, "If KRaft controller id is used during migration. See KIP-866"),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("broker_epoch", Type.INT64, "The broker epoch."),
new Field("topic_states", new CompactArrayOf(UpdateMetadataTopicState.SCHEMA_7), "In newer versions of this RPC, each topic that we would like to update."),
new Field("live_brokers", new CompactArrayOf(UpdateMetadataBroker.SCHEMA_6), ""),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4,
SCHEMA_5,
SCHEMA_6,
SCHEMA_7,
SCHEMA_8
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 8;
public UpdateMetadataRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public UpdateMetadataRequestData() {
this.controllerId = 0;
this.isKRaftController = false;
this.controllerEpoch = 0;
this.brokerEpoch = -1L;
this.ungroupedPartitionStates = new ArrayList<UpdateMetadataPartitionState>(0);
this.topicStates = new ArrayList<UpdateMetadataTopicState>(0);
this.liveBrokers = new ArrayList<UpdateMetadataBroker>(0);
}
@Override
public short apiKey() {
return 6;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 8;
}
@Override
public void read(Readable _readable, short _version) {
this.controllerId = _readable.readInt();
if (_version >= 8) {
this.isKRaftController = _readable.readByte() != 0;
} else {
this.isKRaftController = false;
}
this.controllerEpoch = _readable.readInt();
if (_version >= 5) {
this.brokerEpoch = _readable.readLong();
} else {
this.brokerEpoch = -1L;
}
if (_version <= 4) {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field ungroupedPartitionStates was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<UpdateMetadataPartitionState> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdateMetadataPartitionState(_readable, _version));
}
this.ungroupedPartitionStates = newCollection;
}
} else {
this.ungroupedPartitionStates = new ArrayList<UpdateMetadataPartitionState>(0);
}
if (_version >= 5) {
if (_version >= 6) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topicStates was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<UpdateMetadataTopicState> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdateMetadataTopicState(_readable, _version));
}
this.topicStates = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topicStates was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<UpdateMetadataTopicState> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdateMetadataTopicState(_readable, _version));
}
this.topicStates = newCollection;
}
}
} else {
this.topicStates = new ArrayList<UpdateMetadataTopicState>(0);
}
{
if (_version >= 6) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field liveBrokers was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<UpdateMetadataBroker> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdateMetadataBroker(_readable, _version));
}
this.liveBrokers = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field liveBrokers was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<UpdateMetadataBroker> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdateMetadataBroker(_readable, _version));
}
this.liveBrokers = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 6) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(controllerId);
if (_version >= 8) {
_writable.writeByte(isKRaftController ? (byte) 1 : (byte) 0);
} else {
if (this.isKRaftController) {
throw new UnsupportedVersionException("Attempted to write a non-default isKRaftController at version " + _version);
}
}
_writable.writeInt(controllerEpoch);
if (_version >= 5) {
_writable.writeLong(brokerEpoch);
}
if (_version <= 4) {
_writable.writeInt(ungroupedPartitionStates.size());
for (UpdateMetadataPartitionState ungroupedPartitionStatesElement : ungroupedPartitionStates) {
ungroupedPartitionStatesElement.write(_writable, _cache, _version);
}
} else {
if (!this.ungroupedPartitionStates.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default ungroupedPartitionStates at version " + _version);
}
}
if (_version >= 5) {
if (_version >= 6) {
_writable.writeUnsignedVarint(topicStates.size() + 1);
for (UpdateMetadataTopicState topicStatesElement : topicStates) {
topicStatesElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(topicStates.size());
for (UpdateMetadataTopicState topicStatesElement : topicStates) {
topicStatesElement.write(_writable, _cache, _version);
}
}
} else {
if (!this.topicStates.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default topicStates at version " + _version);
}
}
if (_version >= 6) {
_writable.writeUnsignedVarint(liveBrokers.size() + 1);
for (UpdateMetadataBroker liveBrokersElement : liveBrokers) {
liveBrokersElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(liveBrokers.size());
for (UpdateMetadataBroker liveBrokersElement : liveBrokers) {
liveBrokersElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 6) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(4);
if (_version >= 8) {
_size.addBytes(1);
}
_size.addBytes(4);
if (_version >= 5) {
_size.addBytes(8);
}
if (_version <= 4) {
{
_size.addBytes(4);
for (UpdateMetadataPartitionState ungroupedPartitionStatesElement : ungroupedPartitionStates) {
ungroupedPartitionStatesElement.addSize(_size, _cache, _version);
}
}
}
if (_version >= 5) {
{
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(topicStates.size() + 1));
} else {
_size.addBytes(4);
}
for (UpdateMetadataTopicState topicStatesElement : topicStates) {
topicStatesElement.addSize(_size, _cache, _version);
}
}
}
{
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(liveBrokers.size() + 1));
} else {
_size.addBytes(4);
}
for (UpdateMetadataBroker liveBrokersElement : liveBrokers) {
liveBrokersElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UpdateMetadataRequestData)) return false;
UpdateMetadataRequestData other = (UpdateMetadataRequestData) obj;
if (controllerId != other.controllerId) return false;
if (isKRaftController != other.isKRaftController) return false;
if (controllerEpoch != other.controllerEpoch) return false;
if (brokerEpoch != other.brokerEpoch) return false;
if (this.ungroupedPartitionStates == null) {
if (other.ungroupedPartitionStates != null) return false;
} else {
if (!this.ungroupedPartitionStates.equals(other.ungroupedPartitionStates)) return false;
}
if (this.topicStates == null) {
if (other.topicStates != null) return false;
} else {
if (!this.topicStates.equals(other.topicStates)) return false;
}
if (this.liveBrokers == null) {
if (other.liveBrokers != null) return false;
} else {
if (!this.liveBrokers.equals(other.liveBrokers)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + controllerId;
hashCode = 31 * hashCode + (isKRaftController ? 1231 : 1237);
hashCode = 31 * hashCode + controllerEpoch;
hashCode = 31 * hashCode + ((int) (brokerEpoch >> 32) ^ (int) brokerEpoch);
hashCode = 31 * hashCode + (ungroupedPartitionStates == null ? 0 : ungroupedPartitionStates.hashCode());
hashCode = 31 * hashCode + (topicStates == null ? 0 : topicStates.hashCode());
hashCode = 31 * hashCode + (liveBrokers == null ? 0 : liveBrokers.hashCode());
return hashCode;
}
@Override
public UpdateMetadataRequestData duplicate() {
UpdateMetadataRequestData _duplicate = new UpdateMetadataRequestData();
_duplicate.controllerId = controllerId;
_duplicate.isKRaftController = isKRaftController;
_duplicate.controllerEpoch = controllerEpoch;
_duplicate.brokerEpoch = brokerEpoch;
ArrayList<UpdateMetadataPartitionState> newUngroupedPartitionStates = new ArrayList<UpdateMetadataPartitionState>(ungroupedPartitionStates.size());
for (UpdateMetadataPartitionState _element : ungroupedPartitionStates) {
newUngroupedPartitionStates.add(_element.duplicate());
}
_duplicate.ungroupedPartitionStates = newUngroupedPartitionStates;
ArrayList<UpdateMetadataTopicState> newTopicStates = new ArrayList<UpdateMetadataTopicState>(topicStates.size());
for (UpdateMetadataTopicState _element : topicStates) {
newTopicStates.add(_element.duplicate());
}
_duplicate.topicStates = newTopicStates;
ArrayList<UpdateMetadataBroker> newLiveBrokers = new ArrayList<UpdateMetadataBroker>(liveBrokers.size());
for (UpdateMetadataBroker _element : liveBrokers) {
newLiveBrokers.add(_element.duplicate());
}
_duplicate.liveBrokers = newLiveBrokers;
return _duplicate;
}
@Override
public String toString() {
return "UpdateMetadataRequestData("
+ "controllerId=" + controllerId
+ ", isKRaftController=" + (isKRaftController ? "true" : "false")
+ ", controllerEpoch=" + controllerEpoch
+ ", brokerEpoch=" + brokerEpoch
+ ", ungroupedPartitionStates=" + MessageUtil.deepToString(ungroupedPartitionStates.iterator())
+ ", topicStates=" + MessageUtil.deepToString(topicStates.iterator())
+ ", liveBrokers=" + MessageUtil.deepToString(liveBrokers.iterator())
+ ")";
}
public int controllerId() {
return this.controllerId;
}
public boolean isKRaftController() {
return this.isKRaftController;
}
public int controllerEpoch() {
return this.controllerEpoch;
}
public long brokerEpoch() {
return this.brokerEpoch;
}
public List<UpdateMetadataPartitionState> ungroupedPartitionStates() {
return this.ungroupedPartitionStates;
}
public List<UpdateMetadataTopicState> topicStates() {
return this.topicStates;
}
public List<UpdateMetadataBroker> liveBrokers() {
return this.liveBrokers;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UpdateMetadataRequestData setControllerId(int v) {
this.controllerId = v;
return this;
}
public UpdateMetadataRequestData setIsKRaftController(boolean v) {
this.isKRaftController = v;
return this;
}
public UpdateMetadataRequestData setControllerEpoch(int v) {
this.controllerEpoch = v;
return this;
}
public UpdateMetadataRequestData setBrokerEpoch(long v) {
this.brokerEpoch = v;
return this;
}
public UpdateMetadataRequestData setUngroupedPartitionStates(List<UpdateMetadataPartitionState> v) {
this.ungroupedPartitionStates = v;
return this;
}
public UpdateMetadataRequestData setTopicStates(List<UpdateMetadataTopicState> v) {
this.topicStates = v;
return this;
}
public UpdateMetadataRequestData setLiveBrokers(List<UpdateMetadataBroker> v) {
this.liveBrokers = v;
return this;
}
public static class UpdateMetadataTopicState implements Message {
String topicName;
Uuid topicId;
List<UpdateMetadataPartitionState> partitionStates;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_5 =
new Schema(
new Field("topic_name", Type.STRING, "The topic name."),
new Field("partition_states", new ArrayOf(UpdateMetadataPartitionState.SCHEMA_5), "The partition that we would like to update.")
);
public static final Schema SCHEMA_6 =
new Schema(
new Field("topic_name", Type.COMPACT_STRING, "The topic name."),
new Field("partition_states", new CompactArrayOf(UpdateMetadataPartitionState.SCHEMA_6), "The partition that we would like to update."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_7 =
new Schema(
new Field("topic_name", Type.COMPACT_STRING, "The topic name."),
new Field("topic_id", Type.UUID, "The topic id."),
new Field("partition_states", new CompactArrayOf(UpdateMetadataPartitionState.SCHEMA_6), "The partition that we would like to update."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_8 = SCHEMA_7;
public static final Schema[] SCHEMAS = new Schema[] {
null,
null,
null,
null,
null,
SCHEMA_5,
SCHEMA_6,
SCHEMA_7,
SCHEMA_8
};
public static final short LOWEST_SUPPORTED_VERSION = 5;
public static final short HIGHEST_SUPPORTED_VERSION = 8;
public UpdateMetadataTopicState(Readable _readable, short _version) {
read(_readable, _version);
}
public UpdateMetadataTopicState() {
this.topicName = "";
this.topicId = Uuid.ZERO_UUID;
this.partitionStates = new ArrayList<UpdateMetadataPartitionState>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 8;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 8) {
throw new UnsupportedVersionException("Can't read version " + _version + " of UpdateMetadataTopicState");
}
{
int length;
if (_version >= 6) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field topicName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field topicName had invalid length " + length);
} else {
this.topicName = _readable.readString(length);
}
}
if (_version >= 7) {
this.topicId = _readable.readUuid();
} else {
this.topicId = Uuid.ZERO_UUID;
}
{
if (_version >= 6) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitionStates was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<UpdateMetadataPartitionState> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdateMetadataPartitionState(_readable, _version));
}
this.partitionStates = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitionStates was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<UpdateMetadataPartitionState> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdateMetadataPartitionState(_readable, _version));
}
this.partitionStates = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 6) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version < 5) {
throw new UnsupportedVersionException("Can't write version " + _version + " of UpdateMetadataTopicState");
}
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(topicName);
if (_version >= 6) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 7) {
_writable.writeUuid(topicId);
}
if (_version >= 6) {
_writable.writeUnsignedVarint(partitionStates.size() + 1);
for (UpdateMetadataPartitionState partitionStatesElement : partitionStates) {
partitionStatesElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(partitionStates.size());
for (UpdateMetadataPartitionState partitionStatesElement : partitionStates) {
partitionStatesElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 6) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 8) {
throw new UnsupportedVersionException("Can't size version " + _version + " of UpdateMetadataTopicState");
}
{
byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'topicName' field is too long to be serialized");
}
_cache.cacheSerializedValue(topicName, _stringBytes);
if (_version >= 6) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
if (_version >= 7) {
_size.addBytes(16);
}
{
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionStates.size() + 1));
} else {
_size.addBytes(4);
}
for (UpdateMetadataPartitionState partitionStatesElement : partitionStates) {
partitionStatesElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UpdateMetadataTopicState)) return false;
UpdateMetadataTopicState other = (UpdateMetadataTopicState) obj;
if (this.topicName == null) {
if (other.topicName != null) return false;
} else {
if (!this.topicName.equals(other.topicName)) return false;
}
if (!this.topicId.equals(other.topicId)) return false;
if (this.partitionStates == null) {
if (other.partitionStates != null) return false;
} else {
if (!this.partitionStates.equals(other.partitionStates)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode());
hashCode = 31 * hashCode + topicId.hashCode();
hashCode = 31 * hashCode + (partitionStates == null ? 0 : partitionStates.hashCode());
return hashCode;
}
@Override
public UpdateMetadataTopicState duplicate() {
UpdateMetadataTopicState _duplicate = new UpdateMetadataTopicState();
_duplicate.topicName = topicName;
_duplicate.topicId = topicId;
ArrayList<UpdateMetadataPartitionState> newPartitionStates = new ArrayList<UpdateMetadataPartitionState>(partitionStates.size());
for (UpdateMetadataPartitionState _element : partitionStates) {
newPartitionStates.add(_element.duplicate());
}
_duplicate.partitionStates = newPartitionStates;
return _duplicate;
}
@Override
public String toString() {
return "UpdateMetadataTopicState("
+ "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'")
+ ", topicId=" + topicId.toString()
+ ", partitionStates=" + MessageUtil.deepToString(partitionStates.iterator())
+ ")";
}
public String topicName() {
return this.topicName;
}
public Uuid topicId() {
return this.topicId;
}
public List<UpdateMetadataPartitionState> partitionStates() {
return this.partitionStates;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UpdateMetadataTopicState setTopicName(String v) {
this.topicName = v;
return this;
}
public UpdateMetadataTopicState setTopicId(Uuid v) {
this.topicId = v;
return this;
}
public UpdateMetadataTopicState setPartitionStates(List<UpdateMetadataPartitionState> v) {
this.partitionStates = v;
return this;
}
}
public static class UpdateMetadataBroker implements Message {
int id;
String v0Host;
int v0Port;
List<UpdateMetadataEndpoint> endpoints;
String rack;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("id", Type.INT32, "The broker id."),
new Field("v0_host", Type.STRING, "The broker hostname."),
new Field("v0_port", Type.INT32, "The broker port.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("id", Type.INT32, "The broker id."),
new Field("endpoints", new ArrayOf(UpdateMetadataEndpoint.SCHEMA_1), "The broker endpoints.")
);
public static final Schema SCHEMA_2 =
new Schema(
new Field("id", Type.INT32, "The broker id."),
new Field("endpoints", new ArrayOf(UpdateMetadataEndpoint.SCHEMA_1), "The broker endpoints."),
new Field("rack", Type.NULLABLE_STRING, "The rack which this broker belongs to.")
);
public static final Schema SCHEMA_3 =
new Schema(
new Field("id", Type.INT32, "The broker id."),
new Field("endpoints", new ArrayOf(UpdateMetadataEndpoint.SCHEMA_3), "The broker endpoints."),
new Field("rack", Type.NULLABLE_STRING, "The rack which this broker belongs to.")
);
public static final Schema SCHEMA_4 = SCHEMA_3;
public static final Schema SCHEMA_5 = SCHEMA_4;
public static final Schema SCHEMA_6 =
new Schema(
new Field("id", Type.INT32, "The broker id."),
new Field("endpoints", new CompactArrayOf(UpdateMetadataEndpoint.SCHEMA_6), "The broker endpoints."),
new Field("rack", Type.COMPACT_NULLABLE_STRING, "The rack which this broker belongs to."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_7 = SCHEMA_6;
public static final Schema SCHEMA_8 = SCHEMA_7;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4,
SCHEMA_5,
SCHEMA_6,
SCHEMA_7,
SCHEMA_8
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 8;
public UpdateMetadataBroker(Readable _readable, short _version) {
read(_readable, _version);
}
public UpdateMetadataBroker() {
this.id = 0;
this.v0Host = "";
this.v0Port = 0;
this.endpoints = new ArrayList<UpdateMetadataEndpoint>(0);
this.rack = "";
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 8;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 8) {
throw new UnsupportedVersionException("Can't read version " + _version + " of UpdateMetadataBroker");
}
this.id = _readable.readInt();
if (_version <= 0) {
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field v0Host was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field v0Host had invalid length " + length);
} else {
this.v0Host = _readable.readString(length);
}
} else {
this.v0Host = "";
}
if (_version <= 0) {
this.v0Port = _readable.readInt();
} else {
this.v0Port = 0;
}
if (_version >= 1) {
if (_version >= 6) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field endpoints was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<UpdateMetadataEndpoint> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdateMetadataEndpoint(_readable, _version));
}
this.endpoints = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field endpoints was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<UpdateMetadataEndpoint> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new UpdateMetadataEndpoint(_readable, _version));
}
this.endpoints = newCollection;
}
}
} else {
this.endpoints = new ArrayList<UpdateMetadataEndpoint>(0);
}
if (_version >= 2) {
int length;
if (_version >= 6) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
this.rack = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field rack had invalid length " + length);
} else {
this.rack = _readable.readString(length);
}
} else {
this.rack = "";
}
this._unknownTaggedFields = null;
if (_version >= 6) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(id);
if (_version <= 0) {
{
byte[] _stringBytes = _cache.getSerializedValue(v0Host);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
}
if (_version <= 0) {
_writable.writeInt(v0Port);
}
if (_version >= 1) {
if (_version >= 6) {
_writable.writeUnsignedVarint(endpoints.size() + 1);
for (UpdateMetadataEndpoint endpointsElement : endpoints) {
endpointsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(endpoints.size());
for (UpdateMetadataEndpoint endpointsElement : endpoints) {
endpointsElement.write(_writable, _cache, _version);
}
}
}
if (_version >= 2) {
if (rack == null) {
if (_version >= 6) {
_writable.writeUnsignedVarint(0);
} else {
_writable.writeShort((short) -1);
}
} else {
byte[] _stringBytes = _cache.getSerializedValue(rack);
if (_version >= 6) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 6) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 8) {
throw new UnsupportedVersionException("Can't size version " + _version + " of UpdateMetadataBroker");
}
_size.addBytes(4);
if (_version <= 0) {
{
byte[] _stringBytes = v0Host.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'v0Host' field is too long to be serialized");
}
_cache.cacheSerializedValue(v0Host, _stringBytes);
_size.addBytes(_stringBytes.length + 2);
}
}
if (_version <= 0) {
_size.addBytes(4);
}
if (_version >= 1) {
{
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(endpoints.size() + 1));
} else {
_size.addBytes(4);
}
for (UpdateMetadataEndpoint endpointsElement : endpoints) {
endpointsElement.addSize(_size, _cache, _version);
}
}
}
if (_version >= 2) {
if (rack == null) {
if (_version >= 6) {
_size.addBytes(1);
} else {
_size.addBytes(2);
}
} else {
byte[] _stringBytes = rack.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'rack' field is too long to be serialized");
}
_cache.cacheSerializedValue(rack, _stringBytes);
if (_version >= 6) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UpdateMetadataBroker)) return false;
UpdateMetadataBroker other = (UpdateMetadataBroker) obj;
if (id != other.id) return false;
if (this.v0Host == null) {
if (other.v0Host != null) return false;
} else {
if (!this.v0Host.equals(other.v0Host)) return false;
}
if (v0Port != other.v0Port) return false;
if (this.endpoints == null) {
if (other.endpoints != null) return false;
} else {
if (!this.endpoints.equals(other.endpoints)) return false;
}
if (this.rack == null) {
if (other.rack != null) return false;
} else {
if (!this.rack.equals(other.rack)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + id;
hashCode = 31 * hashCode + (v0Host == null ? 0 : v0Host.hashCode());
hashCode = 31 * hashCode + v0Port;
hashCode = 31 * hashCode + (endpoints == null ? 0 : endpoints.hashCode());
hashCode = 31 * hashCode + (rack == null ? 0 : rack.hashCode());
return hashCode;
}
@Override
public UpdateMetadataBroker duplicate() {
UpdateMetadataBroker _duplicate = new UpdateMetadataBroker();
_duplicate.id = id;
_duplicate.v0Host = v0Host;
_duplicate.v0Port = v0Port;
ArrayList<UpdateMetadataEndpoint> newEndpoints = new ArrayList<UpdateMetadataEndpoint>(endpoints.size());
for (UpdateMetadataEndpoint _element : endpoints) {
newEndpoints.add(_element.duplicate());
}
_duplicate.endpoints = newEndpoints;
if (rack == null) {
_duplicate.rack = null;
} else {
_duplicate.rack = rack;
}
return _duplicate;
}
@Override
public String toString() {
return "UpdateMetadataBroker("
+ "id=" + id
+ ", v0Host=" + ((v0Host == null) ? "null" : "'" + v0Host.toString() + "'")
+ ", v0Port=" + v0Port
+ ", endpoints=" + MessageUtil.deepToString(endpoints.iterator())
+ ", rack=" + ((rack == null) ? "null" : "'" + rack.toString() + "'")
+ ")";
}
public int id() {
return this.id;
}
public String v0Host() {
return this.v0Host;
}
public int v0Port() {
return this.v0Port;
}
public List<UpdateMetadataEndpoint> endpoints() {
return this.endpoints;
}
public String rack() {
return this.rack;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UpdateMetadataBroker setId(int v) {
this.id = v;
return this;
}
public UpdateMetadataBroker setV0Host(String v) {
this.v0Host = v;
return this;
}
public UpdateMetadataBroker setV0Port(int v) {
this.v0Port = v;
return this;
}
public UpdateMetadataBroker setEndpoints(List<UpdateMetadataEndpoint> v) {
this.endpoints = v;
return this;
}
public UpdateMetadataBroker setRack(String v) {
this.rack = v;
return this;
}
}
public static class UpdateMetadataEndpoint implements Message {
int port;
String host;
String listener;
short securityProtocol;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_1 =
new Schema(
new Field("port", Type.INT32, "The port of this endpoint"),
new Field("host", Type.STRING, "The hostname of this endpoint"),
new Field("security_protocol", Type.INT16, "The security protocol type.")
);
public static final Schema SCHEMA_2 = SCHEMA_1;
public static final Schema SCHEMA_3 =
new Schema(
new Field("port", Type.INT32, "The port of this endpoint"),
new Field("host", Type.STRING, "The hostname of this endpoint"),
new Field("listener", Type.STRING, "The listener name."),
new Field("security_protocol", Type.INT16, "The security protocol type.")
);
public static final Schema SCHEMA_4 = SCHEMA_3;
public static final Schema SCHEMA_5 = SCHEMA_4;
public static final Schema SCHEMA_6 =
new Schema(
new Field("port", Type.INT32, "The port of this endpoint"),
new Field("host", Type.COMPACT_STRING, "The hostname of this endpoint"),
new Field("listener", Type.COMPACT_STRING, "The listener name."),
new Field("security_protocol", Type.INT16, "The security protocol type."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_7 = SCHEMA_6;
public static final Schema SCHEMA_8 = SCHEMA_7;
public static final Schema[] SCHEMAS = new Schema[] {
null,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4,
SCHEMA_5,
SCHEMA_6,
SCHEMA_7,
SCHEMA_8
};
public static final short LOWEST_SUPPORTED_VERSION = 1;
public static final short HIGHEST_SUPPORTED_VERSION = 8;
public UpdateMetadataEndpoint(Readable _readable, short _version) {
read(_readable, _version);
}
public UpdateMetadataEndpoint() {
this.port = 0;
this.host = "";
this.listener = "";
this.securityProtocol = (short) 0;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 8;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 8) {
throw new UnsupportedVersionException("Can't read version " + _version + " of UpdateMetadataEndpoint");
}
this.port = _readable.readInt();
{
int length;
if (_version >= 6) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field host was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field host had invalid length " + length);
} else {
this.host = _readable.readString(length);
}
}
if (_version >= 3) {
int length;
if (_version >= 6) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field listener was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field listener had invalid length " + length);
} else {
this.listener = _readable.readString(length);
}
} else {
this.listener = "";
}
this.securityProtocol = _readable.readShort();
this._unknownTaggedFields = null;
if (_version >= 6) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
if (_version < 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of UpdateMetadataEndpoint");
}
int _numTaggedFields = 0;
_writable.writeInt(port);
{
byte[] _stringBytes = _cache.getSerializedValue(host);
if (_version >= 6) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 3) {
{
byte[] _stringBytes = _cache.getSerializedValue(listener);
if (_version >= 6) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
}
_writable.writeShort(securityProtocol);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 6) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 8) {
throw new UnsupportedVersionException("Can't size version " + _version + " of UpdateMetadataEndpoint");
}
_size.addBytes(4);
{
byte[] _stringBytes = host.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'host' field is too long to be serialized");
}
_cache.cacheSerializedValue(host, _stringBytes);
if (_version >= 6) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
if (_version >= 3) {
{
byte[] _stringBytes = listener.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'listener' field is too long to be serialized");
}
_cache.cacheSerializedValue(listener, _stringBytes);
if (_version >= 6) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
}
_size.addBytes(2);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UpdateMetadataEndpoint)) return false;
UpdateMetadataEndpoint other = (UpdateMetadataEndpoint) obj;
if (port != other.port) return false;
if (this.host == null) {
if (other.host != null) return false;
} else {
if (!this.host.equals(other.host)) return false;
}
if (this.listener == null) {
if (other.listener != null) return false;
} else {
if (!this.listener.equals(other.listener)) return false;
}
if (securityProtocol != other.securityProtocol) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + port;
hashCode = 31 * hashCode + (host == null ? 0 : host.hashCode());
hashCode = 31 * hashCode + (listener == null ? 0 : listener.hashCode());
hashCode = 31 * hashCode + securityProtocol;
return hashCode;
}
@Override
public UpdateMetadataEndpoint duplicate() {
UpdateMetadataEndpoint _duplicate = new UpdateMetadataEndpoint();
_duplicate.port = port;
_duplicate.host = host;
_duplicate.listener = listener;
_duplicate.securityProtocol = securityProtocol;
return _duplicate;
}
@Override
public String toString() {
return "UpdateMetadataEndpoint("
+ "port=" + port
+ ", host=" + ((host == null) ? "null" : "'" + host.toString() + "'")
+ ", listener=" + ((listener == null) ? "null" : "'" + listener.toString() + "'")
+ ", securityProtocol=" + securityProtocol
+ ")";
}
public int port() {
return this.port;
}
public String host() {
return this.host;
}
public String listener() {
return this.listener;
}
public short securityProtocol() {
return this.securityProtocol;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UpdateMetadataEndpoint setPort(int v) {
this.port = v;
return this;
}
public UpdateMetadataEndpoint setHost(String v) {
this.host = v;
return this;
}
public UpdateMetadataEndpoint setListener(String v) {
this.listener = v;
return this;
}
public UpdateMetadataEndpoint setSecurityProtocol(short v) {
this.securityProtocol = v;
return this;
}
}
public static class UpdateMetadataPartitionState implements Message {
String topicName;
int partitionIndex;
int controllerEpoch;
int leader;
int leaderEpoch;
List<Integer> isr;
int zkVersion;
List<Integer> replicas;
List<Integer> offlineReplicas;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("topic_name", Type.STRING, "In older versions of this RPC, the topic name."),
new Field("partition_index", Type.INT32, "The partition index."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("leader", Type.INT32, "The ID of the broker which is the current partition leader."),
new Field("leader_epoch", Type.INT32, "The leader epoch of this partition."),
new Field("isr", new ArrayOf(Type.INT32), "The brokers which are in the ISR for this partition."),
new Field("zk_version", Type.INT32, "The Zookeeper version."),
new Field("replicas", new ArrayOf(Type.INT32), "All the replicas of this partition.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 = SCHEMA_1;
public static final Schema SCHEMA_3 = SCHEMA_2;
public static final Schema SCHEMA_4 =
new Schema(
new Field("topic_name", Type.STRING, "In older versions of this RPC, the topic name."),
new Field("partition_index", Type.INT32, "The partition index."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("leader", Type.INT32, "The ID of the broker which is the current partition leader."),
new Field("leader_epoch", Type.INT32, "The leader epoch of this partition."),
new Field("isr", new ArrayOf(Type.INT32), "The brokers which are in the ISR for this partition."),
new Field("zk_version", Type.INT32, "The Zookeeper version."),
new Field("replicas", new ArrayOf(Type.INT32), "All the replicas of this partition."),
new Field("offline_replicas", new ArrayOf(Type.INT32), "The replicas of this partition which are offline.")
);
public static final Schema SCHEMA_5 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("leader", Type.INT32, "The ID of the broker which is the current partition leader."),
new Field("leader_epoch", Type.INT32, "The leader epoch of this partition."),
new Field("isr", new ArrayOf(Type.INT32), "The brokers which are in the ISR for this partition."),
new Field("zk_version", Type.INT32, "The Zookeeper version."),
new Field("replicas", new ArrayOf(Type.INT32), "All the replicas of this partition."),
new Field("offline_replicas", new ArrayOf(Type.INT32), "The replicas of this partition which are offline.")
);
public static final Schema SCHEMA_6 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("controller_epoch", Type.INT32, "The controller epoch."),
new Field("leader", Type.INT32, "The ID of the broker which is the current partition leader."),
new Field("leader_epoch", Type.INT32, "The leader epoch of this partition."),
new Field("isr", new CompactArrayOf(Type.INT32), "The brokers which are in the ISR for this partition."),
new Field("zk_version", Type.INT32, "The Zookeeper version."),
new Field("replicas", new CompactArrayOf(Type.INT32), "All the replicas of this partition."),
new Field("offline_replicas", new CompactArrayOf(Type.INT32), "The replicas of this partition which are offline."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_7 = SCHEMA_6;
public static final Schema SCHEMA_8 = SCHEMA_7;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4,
SCHEMA_5,
SCHEMA_6,
SCHEMA_7,
SCHEMA_8
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 8;
public UpdateMetadataPartitionState(Readable _readable, short _version) {
read(_readable, _version);
}
public UpdateMetadataPartitionState() {
this.topicName = "";
this.partitionIndex = 0;
this.controllerEpoch = 0;
this.leader = 0;
this.leaderEpoch = 0;
this.isr = new ArrayList<Integer>(0);
this.zkVersion = 0;
this.replicas = new ArrayList<Integer>(0);
this.offlineReplicas = new ArrayList<Integer>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 32767;
}
@Override
public void read(Readable _readable, short _version) {
if (_version <= 4) {
int length;
length = _readable.readShort();
if (length < 0) {
throw new RuntimeException("non-nullable field topicName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field topicName had invalid length " + length);
} else {
this.topicName = _readable.readString(length);
}
} else {
this.topicName = "";
}
this.partitionIndex = _readable.readInt();
this.controllerEpoch = _readable.readInt();
this.leader = _readable.readInt();
this.leaderEpoch = _readable.readInt();
{
int arrayLength;
if (_version >= 6) {
arrayLength = _readable.readUnsignedVarint() - 1;
} else {
arrayLength = _readable.readInt();
}
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field isr was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<Integer> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(_readable.readInt());
}
this.isr = newCollection;
}
}
this.zkVersion = _readable.readInt();
{
int arrayLength;
if (_version >= 6) {
arrayLength = _readable.readUnsignedVarint() - 1;
} else {
arrayLength = _readable.readInt();
}
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field replicas was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<Integer> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(_readable.readInt());
}
this.replicas = newCollection;
}
}
if (_version >= 4) {
int arrayLength;
if (_version >= 6) {
arrayLength = _readable.readUnsignedVarint() - 1;
} else {
arrayLength = _readable.readInt();
}
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field offlineReplicas was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<Integer> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(_readable.readInt());
}
this.offlineReplicas = newCollection;
}
} else {
this.offlineReplicas = new ArrayList<Integer>(0);
}
this._unknownTaggedFields = null;
if (_version >= 6) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version <= 4) {
{
byte[] _stringBytes = _cache.getSerializedValue(topicName);
_writable.writeShort((short) _stringBytes.length);
_writable.writeByteArray(_stringBytes);
}
}
_writable.writeInt(partitionIndex);
_writable.writeInt(controllerEpoch);
_writable.writeInt(leader);
_writable.writeInt(leaderEpoch);
if (_version >= 6) {
_writable.writeUnsignedVarint(isr.size() + 1);
} else {
_writable.writeInt(isr.size());
}
for (Integer isrElement : isr) {
_writable.writeInt(isrElement);
}
_writable.writeInt(zkVersion);
if (_version >= 6) {
_writable.writeUnsignedVarint(replicas.size() + 1);
} else {
_writable.writeInt(replicas.size());
}
for (Integer replicasElement : replicas) {
_writable.writeInt(replicasElement);
}
if (_version >= 4) {
if (_version >= 6) {
_writable.writeUnsignedVarint(offlineReplicas.size() + 1);
} else {
_writable.writeInt(offlineReplicas.size());
}
for (Integer offlineReplicasElement : offlineReplicas) {
_writable.writeInt(offlineReplicasElement);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 6) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version <= 4) {
{
byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'topicName' field is too long to be serialized");
}
_cache.cacheSerializedValue(topicName, _stringBytes);
_size.addBytes(_stringBytes.length + 2);
}
}
_size.addBytes(4);
_size.addBytes(4);
_size.addBytes(4);
_size.addBytes(4);
{
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(isr.size() + 1));
} else {
_size.addBytes(4);
}
_size.addBytes(isr.size() * 4);
}
_size.addBytes(4);
{
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(replicas.size() + 1));
} else {
_size.addBytes(4);
}
_size.addBytes(replicas.size() * 4);
}
if (_version >= 4) {
{
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(offlineReplicas.size() + 1));
} else {
_size.addBytes(4);
}
_size.addBytes(offlineReplicas.size() * 4);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UpdateMetadataPartitionState)) return false;
UpdateMetadataPartitionState other = (UpdateMetadataPartitionState) obj;
if (this.topicName == null) {
if (other.topicName != null) return false;
} else {
if (!this.topicName.equals(other.topicName)) return false;
}
if (partitionIndex != other.partitionIndex) return false;
if (controllerEpoch != other.controllerEpoch) return false;
if (leader != other.leader) return false;
if (leaderEpoch != other.leaderEpoch) return false;
if (this.isr == null) {
if (other.isr != null) return false;
} else {
if (!this.isr.equals(other.isr)) return false;
}
if (zkVersion != other.zkVersion) return false;
if (this.replicas == null) {
if (other.replicas != null) return false;
} else {
if (!this.replicas.equals(other.replicas)) return false;
}
if (this.offlineReplicas == null) {
if (other.offlineReplicas != null) return false;
} else {
if (!this.offlineReplicas.equals(other.offlineReplicas)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode());
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + controllerEpoch;
hashCode = 31 * hashCode + leader;
hashCode = 31 * hashCode + leaderEpoch;
hashCode = 31 * hashCode + (isr == null ? 0 : isr.hashCode());
hashCode = 31 * hashCode + zkVersion;
hashCode = 31 * hashCode + (replicas == null ? 0 : replicas.hashCode());
hashCode = 31 * hashCode + (offlineReplicas == null ? 0 : offlineReplicas.hashCode());
return hashCode;
}
@Override
public UpdateMetadataPartitionState duplicate() {
UpdateMetadataPartitionState _duplicate = new UpdateMetadataPartitionState();
_duplicate.topicName = topicName;
_duplicate.partitionIndex = partitionIndex;
_duplicate.controllerEpoch = controllerEpoch;
_duplicate.leader = leader;
_duplicate.leaderEpoch = leaderEpoch;
ArrayList<Integer> newIsr = new ArrayList<Integer>(isr.size());
for (Integer _element : isr) {
newIsr.add(_element);
}
_duplicate.isr = newIsr;
_duplicate.zkVersion = zkVersion;
ArrayList<Integer> newReplicas = new ArrayList<Integer>(replicas.size());
for (Integer _element : replicas) {
newReplicas.add(_element);
}
_duplicate.replicas = newReplicas;
ArrayList<Integer> newOfflineReplicas = new ArrayList<Integer>(offlineReplicas.size());
for (Integer _element : offlineReplicas) {
newOfflineReplicas.add(_element);
}
_duplicate.offlineReplicas = newOfflineReplicas;
return _duplicate;
}
@Override
public String toString() {
return "UpdateMetadataPartitionState("
+ "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'")
+ ", partitionIndex=" + partitionIndex
+ ", controllerEpoch=" + controllerEpoch
+ ", leader=" + leader
+ ", leaderEpoch=" + leaderEpoch
+ ", isr=" + MessageUtil.deepToString(isr.iterator())
+ ", zkVersion=" + zkVersion
+ ", replicas=" + MessageUtil.deepToString(replicas.iterator())
+ ", offlineReplicas=" + MessageUtil.deepToString(offlineReplicas.iterator())
+ ")";
}
public String topicName() {
return this.topicName;
}
public int partitionIndex() {
return this.partitionIndex;
}
public int controllerEpoch() {
return this.controllerEpoch;
}
public int leader() {
return this.leader;
}
public int leaderEpoch() {
return this.leaderEpoch;
}
public List<Integer> isr() {
return this.isr;
}
public int zkVersion() {
return this.zkVersion;
}
public List<Integer> replicas() {
return this.replicas;
}
public List<Integer> offlineReplicas() {
return this.offlineReplicas;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UpdateMetadataPartitionState setTopicName(String v) {
this.topicName = v;
return this;
}
public UpdateMetadataPartitionState setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public UpdateMetadataPartitionState setControllerEpoch(int v) {
this.controllerEpoch = v;
return this;
}
public UpdateMetadataPartitionState setLeader(int v) {
this.leader = v;
return this;
}
public UpdateMetadataPartitionState setLeaderEpoch(int v) {
this.leaderEpoch = v;
return this;
}
public UpdateMetadataPartitionState setIsr(List<Integer> v) {
this.isr = v;
return this;
}
public UpdateMetadataPartitionState setZkVersion(int v) {
this.zkVersion = v;
return this;
}
public UpdateMetadataPartitionState setReplicas(List<Integer> v) {
this.replicas = v;
return this;
}
public UpdateMetadataPartitionState setOfflineReplicas(List<Integer> v) {
this.offlineReplicas = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UpdateMetadataRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.BooleanNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.LongNode;
import com.fasterxml.jackson.databind.node.NullNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.UpdateMetadataRequestData.*;
public class UpdateMetadataRequestDataJsonConverter {
public static UpdateMetadataRequestData read(JsonNode _node, short _version) {
UpdateMetadataRequestData _object = new UpdateMetadataRequestData();
JsonNode _controllerIdNode = _node.get("controllerId");
if (_controllerIdNode == null) {
throw new RuntimeException("UpdateMetadataRequestData: unable to locate field 'controllerId', which is mandatory in version " + _version);
} else {
_object.controllerId = MessageUtil.jsonNodeToInt(_controllerIdNode, "UpdateMetadataRequestData");
}
JsonNode _isKRaftControllerNode = _node.get("isKRaftController");
if (_isKRaftControllerNode == null) {
if (_version >= 8) {
throw new RuntimeException("UpdateMetadataRequestData: unable to locate field 'isKRaftController', which is mandatory in version " + _version);
} else {
_object.isKRaftController = false;
}
} else {
if (!_isKRaftControllerNode.isBoolean()) {
throw new RuntimeException("UpdateMetadataRequestData expected Boolean type, but got " + _node.getNodeType());
}
_object.isKRaftController = _isKRaftControllerNode.asBoolean();
}
JsonNode _controllerEpochNode = _node.get("controllerEpoch");
if (_controllerEpochNode == null) {
throw new RuntimeException("UpdateMetadataRequestData: unable to locate field 'controllerEpoch', which is mandatory in version " + _version);
} else {
_object.controllerEpoch = MessageUtil.jsonNodeToInt(_controllerEpochNode, "UpdateMetadataRequestData");
}
JsonNode _brokerEpochNode = _node.get("brokerEpoch");
if (_brokerEpochNode == null) {
if (_version >= 5) {
throw new RuntimeException("UpdateMetadataRequestData: unable to locate field 'brokerEpoch', which is mandatory in version " + _version);
} else {
_object.brokerEpoch = -1L;
}
} else {
_object.brokerEpoch = MessageUtil.jsonNodeToLong(_brokerEpochNode, "UpdateMetadataRequestData");
}
JsonNode _ungroupedPartitionStatesNode = _node.get("ungroupedPartitionStates");
if (_ungroupedPartitionStatesNode == null) {
if (_version <= 4) {
throw new RuntimeException("UpdateMetadataRequestData: unable to locate field 'ungroupedPartitionStates', which is mandatory in version " + _version);
} else {
_object.ungroupedPartitionStates = new ArrayList<UpdateMetadataPartitionState>(0);
}
} else {
if (!_ungroupedPartitionStatesNode.isArray()) {
throw new RuntimeException("UpdateMetadataRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<UpdateMetadataPartitionState> _collection = new ArrayList<UpdateMetadataPartitionState>(_ungroupedPartitionStatesNode.size());
_object.ungroupedPartitionStates = _collection;
for (JsonNode _element : _ungroupedPartitionStatesNode) {
_collection.add(UpdateMetadataPartitionStateJsonConverter.read(_element, _version));
}
}
JsonNode _topicStatesNode = _node.get("topicStates");
if (_topicStatesNode == null) {
if (_version >= 5) {
throw new RuntimeException("UpdateMetadataRequestData: unable to locate field 'topicStates', which is mandatory in version " + _version);
} else {
_object.topicStates = new ArrayList<UpdateMetadataTopicState>(0);
}
} else {
if (!_topicStatesNode.isArray()) {
throw new RuntimeException("UpdateMetadataRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<UpdateMetadataTopicState> _collection = new ArrayList<UpdateMetadataTopicState>(_topicStatesNode.size());
_object.topicStates = _collection;
for (JsonNode _element : _topicStatesNode) {
_collection.add(UpdateMetadataTopicStateJsonConverter.read(_element, _version));
}
}
JsonNode _liveBrokersNode = _node.get("liveBrokers");
if (_liveBrokersNode == null) {
throw new RuntimeException("UpdateMetadataRequestData: unable to locate field 'liveBrokers', which is mandatory in version " + _version);
} else {
if (!_liveBrokersNode.isArray()) {
throw new RuntimeException("UpdateMetadataRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<UpdateMetadataBroker> _collection = new ArrayList<UpdateMetadataBroker>(_liveBrokersNode.size());
_object.liveBrokers = _collection;
for (JsonNode _element : _liveBrokersNode) {
_collection.add(UpdateMetadataBrokerJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(UpdateMetadataRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("controllerId", new IntNode(_object.controllerId));
if (_version >= 8) {
_node.set("isKRaftController", BooleanNode.valueOf(_object.isKRaftController));
} else {
if (_object.isKRaftController) {
throw new UnsupportedVersionException("Attempted to write a non-default isKRaftController at version " + _version);
}
}
_node.set("controllerEpoch", new IntNode(_object.controllerEpoch));
if (_version >= 5) {
_node.set("brokerEpoch", new LongNode(_object.brokerEpoch));
}
if (_version <= 4) {
ArrayNode _ungroupedPartitionStatesArray = new ArrayNode(JsonNodeFactory.instance);
for (UpdateMetadataPartitionState _element : _object.ungroupedPartitionStates) {
_ungroupedPartitionStatesArray.add(UpdateMetadataPartitionStateJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("ungroupedPartitionStates", _ungroupedPartitionStatesArray);
} else {
if (!_object.ungroupedPartitionStates.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default ungroupedPartitionStates at version " + _version);
}
}
if (_version >= 5) {
ArrayNode _topicStatesArray = new ArrayNode(JsonNodeFactory.instance);
for (UpdateMetadataTopicState _element : _object.topicStates) {
_topicStatesArray.add(UpdateMetadataTopicStateJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("topicStates", _topicStatesArray);
} else {
if (!_object.topicStates.isEmpty()) {
throw new UnsupportedVersionException("Attempted to write a non-default topicStates at version " + _version);
}
}
ArrayNode _liveBrokersArray = new ArrayNode(JsonNodeFactory.instance);
for (UpdateMetadataBroker _element : _object.liveBrokers) {
_liveBrokersArray.add(UpdateMetadataBrokerJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("liveBrokers", _liveBrokersArray);
return _node;
}
public static JsonNode write(UpdateMetadataRequestData _object, short _version) {
return write(_object, _version, true);
}
public static class UpdateMetadataBrokerJsonConverter {
public static UpdateMetadataBroker read(JsonNode _node, short _version) {
UpdateMetadataBroker _object = new UpdateMetadataBroker();
JsonNode _idNode = _node.get("id");
if (_idNode == null) {
throw new RuntimeException("UpdateMetadataBroker: unable to locate field 'id', which is mandatory in version " + _version);
} else {
_object.id = MessageUtil.jsonNodeToInt(_idNode, "UpdateMetadataBroker");
}
JsonNode _v0HostNode = _node.get("v0Host");
if (_v0HostNode == null) {
if (_version <= 0) {
throw new RuntimeException("UpdateMetadataBroker: unable to locate field 'v0Host', which is mandatory in version " + _version);
} else {
_object.v0Host = "";
}
} else {
if (!_v0HostNode.isTextual()) {
throw new RuntimeException("UpdateMetadataBroker expected a string type, but got " + _node.getNodeType());
}
_object.v0Host = _v0HostNode.asText();
}
JsonNode _v0PortNode = _node.get("v0Port");
if (_v0PortNode == null) {
if (_version <= 0) {
throw new RuntimeException("UpdateMetadataBroker: unable to locate field 'v0Port', which is mandatory in version " + _version);
} else {
_object.v0Port = 0;
}
} else {
_object.v0Port = MessageUtil.jsonNodeToInt(_v0PortNode, "UpdateMetadataBroker");
}
JsonNode _endpointsNode = _node.get("endpoints");
if (_endpointsNode == null) {
if (_version >= 1) {
throw new RuntimeException("UpdateMetadataBroker: unable to locate field 'endpoints', which is mandatory in version " + _version);
} else {
_object.endpoints = new ArrayList<UpdateMetadataEndpoint>(0);
}
} else {
if (!_endpointsNode.isArray()) {
throw new RuntimeException("UpdateMetadataBroker expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<UpdateMetadataEndpoint> _collection = new ArrayList<UpdateMetadataEndpoint>(_endpointsNode.size());
_object.endpoints = _collection;
for (JsonNode _element : _endpointsNode) {
_collection.add(UpdateMetadataEndpointJsonConverter.read(_element, _version));
}
}
JsonNode _rackNode = _node.get("rack");
if (_rackNode == null) {
if (_version >= 2) {
throw new RuntimeException("UpdateMetadataBroker: unable to locate field 'rack', which is mandatory in version " + _version);
} else {
_object.rack = "";
}
} else {
if (_rackNode.isNull()) {
_object.rack = null;
} else {
if (!_rackNode.isTextual()) {
throw new RuntimeException("UpdateMetadataBroker expected a string type, but got " + _node.getNodeType());
}
_object.rack = _rackNode.asText();
}
}
return _object;
}
public static JsonNode write(UpdateMetadataBroker _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("id", new IntNode(_object.id));
if (_version <= 0) {
_node.set("v0Host", new TextNode(_object.v0Host));
}
if (_version <= 0) {
_node.set("v0Port", new IntNode(_object.v0Port));
}
if (_version >= 1) {
ArrayNode _endpointsArray = new ArrayNode(JsonNodeFactory.instance);
for (UpdateMetadataEndpoint _element : _object.endpoints) {
_endpointsArray.add(UpdateMetadataEndpointJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("endpoints", _endpointsArray);
}
if (_version >= 2) {
if (_object.rack == null) {
_node.set("rack", NullNode.instance);
} else {
_node.set("rack", new TextNode(_object.rack));
}
}
return _node;
}
public static JsonNode write(UpdateMetadataBroker _object, short _version) {
return write(_object, _version, true);
}
}
public static class UpdateMetadataEndpointJsonConverter {
public static UpdateMetadataEndpoint read(JsonNode _node, short _version) {
UpdateMetadataEndpoint _object = new UpdateMetadataEndpoint();
if (_version < 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of UpdateMetadataEndpoint");
}
JsonNode _portNode = _node.get("port");
if (_portNode == null) {
throw new RuntimeException("UpdateMetadataEndpoint: unable to locate field 'port', which is mandatory in version " + _version);
} else {
_object.port = MessageUtil.jsonNodeToInt(_portNode, "UpdateMetadataEndpoint");
}
JsonNode _hostNode = _node.get("host");
if (_hostNode == null) {
throw new RuntimeException("UpdateMetadataEndpoint: unable to locate field 'host', which is mandatory in version " + _version);
} else {
if (!_hostNode.isTextual()) {
throw new RuntimeException("UpdateMetadataEndpoint expected a string type, but got " + _node.getNodeType());
}
_object.host = _hostNode.asText();
}
JsonNode _listenerNode = _node.get("listener");
if (_listenerNode == null) {
if (_version >= 3) {
throw new RuntimeException("UpdateMetadataEndpoint: unable to locate field 'listener', which is mandatory in version " + _version);
} else {
_object.listener = "";
}
} else {
if (!_listenerNode.isTextual()) {
throw new RuntimeException("UpdateMetadataEndpoint expected a string type, but got " + _node.getNodeType());
}
_object.listener = _listenerNode.asText();
}
JsonNode _securityProtocolNode = _node.get("securityProtocol");
if (_securityProtocolNode == null) {
throw new RuntimeException("UpdateMetadataEndpoint: unable to locate field 'securityProtocol', which is mandatory in version " + _version);
} else {
_object.securityProtocol = MessageUtil.jsonNodeToShort(_securityProtocolNode, "UpdateMetadataEndpoint");
}
return _object;
}
public static JsonNode write(UpdateMetadataEndpoint _object, short _version, boolean _serializeRecords) {
if (_version < 1) {
throw new UnsupportedVersionException("Can't write version " + _version + " of UpdateMetadataEndpoint");
}
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("port", new IntNode(_object.port));
_node.set("host", new TextNode(_object.host));
if (_version >= 3) {
_node.set("listener", new TextNode(_object.listener));
}
_node.set("securityProtocol", new ShortNode(_object.securityProtocol));
return _node;
}
public static JsonNode write(UpdateMetadataEndpoint _object, short _version) {
return write(_object, _version, true);
}
}
public static class UpdateMetadataPartitionStateJsonConverter {
public static UpdateMetadataPartitionState read(JsonNode _node, short _version) {
UpdateMetadataPartitionState _object = new UpdateMetadataPartitionState();
JsonNode _topicNameNode = _node.get("topicName");
if (_topicNameNode == null) {
if (_version <= 4) {
throw new RuntimeException("UpdateMetadataPartitionState: unable to locate field 'topicName', which is mandatory in version " + _version);
} else {
_object.topicName = "";
}
} else {
if (!_topicNameNode.isTextual()) {
throw new RuntimeException("UpdateMetadataPartitionState expected a string type, but got " + _node.getNodeType());
}
_object.topicName = _topicNameNode.asText();
}
JsonNode _partitionIndexNode = _node.get("partitionIndex");
if (_partitionIndexNode == null) {
throw new RuntimeException("UpdateMetadataPartitionState: unable to locate field 'partitionIndex', which is mandatory in version " + _version);
} else {
_object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "UpdateMetadataPartitionState");
}
JsonNode _controllerEpochNode = _node.get("controllerEpoch");
if (_controllerEpochNode == null) {
throw new RuntimeException("UpdateMetadataPartitionState: unable to locate field 'controllerEpoch', which is mandatory in version " + _version);
} else {
_object.controllerEpoch = MessageUtil.jsonNodeToInt(_controllerEpochNode, "UpdateMetadataPartitionState");
}
JsonNode _leaderNode = _node.get("leader");
if (_leaderNode == null) {
throw new RuntimeException("UpdateMetadataPartitionState: unable to locate field 'leader', which is mandatory in version " + _version);
} else {
_object.leader = MessageUtil.jsonNodeToInt(_leaderNode, "UpdateMetadataPartitionState");
}
JsonNode _leaderEpochNode = _node.get("leaderEpoch");
if (_leaderEpochNode == null) {
throw new RuntimeException("UpdateMetadataPartitionState: unable to locate field 'leaderEpoch', which is mandatory in version " + _version);
} else {
_object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "UpdateMetadataPartitionState");
}
JsonNode _isrNode = _node.get("isr");
if (_isrNode == null) {
throw new RuntimeException("UpdateMetadataPartitionState: unable to locate field 'isr', which is mandatory in version " + _version);
} else {
if (!_isrNode.isArray()) {
throw new RuntimeException("UpdateMetadataPartitionState expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<Integer> _collection = new ArrayList<Integer>(_isrNode.size());
_object.isr = _collection;
for (JsonNode _element : _isrNode) {
_collection.add(MessageUtil.jsonNodeToInt(_element, "UpdateMetadataPartitionState element"));
}
}
JsonNode _zkVersionNode = _node.get("zkVersion");
if (_zkVersionNode == null) {
throw new RuntimeException("UpdateMetadataPartitionState: unable to locate field 'zkVersion', which is mandatory in version " + _version);
} else {
_object.zkVersion = MessageUtil.jsonNodeToInt(_zkVersionNode, "UpdateMetadataPartitionState");
}
JsonNode _replicasNode = _node.get("replicas");
if (_replicasNode == null) {
throw new RuntimeException("UpdateMetadataPartitionState: unable to locate field 'replicas', which is mandatory in version " + _version);
} else {
if (!_replicasNode.isArray()) {
throw new RuntimeException("UpdateMetadataPartitionState expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<Integer> _collection = new ArrayList<Integer>(_replicasNode.size());
_object.replicas = _collection;
for (JsonNode _element : _replicasNode) {
_collection.add(MessageUtil.jsonNodeToInt(_element, "UpdateMetadataPartitionState element"));
}
}
JsonNode _offlineReplicasNode = _node.get("offlineReplicas");
if (_offlineReplicasNode == null) {
if (_version >= 4) {
throw new RuntimeException("UpdateMetadataPartitionState: unable to locate field 'offlineReplicas', which is mandatory in version " + _version);
} else {
_object.offlineReplicas = new ArrayList<Integer>(0);
}
} else {
if (!_offlineReplicasNode.isArray()) {
throw new RuntimeException("UpdateMetadataPartitionState expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<Integer> _collection = new ArrayList<Integer>(_offlineReplicasNode.size());
_object.offlineReplicas = _collection;
for (JsonNode _element : _offlineReplicasNode) {
_collection.add(MessageUtil.jsonNodeToInt(_element, "UpdateMetadataPartitionState element"));
}
}
return _object;
}
public static JsonNode write(UpdateMetadataPartitionState _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
if (_version <= 4) {
_node.set("topicName", new TextNode(_object.topicName));
}
_node.set("partitionIndex", new IntNode(_object.partitionIndex));
_node.set("controllerEpoch", new IntNode(_object.controllerEpoch));
_node.set("leader", new IntNode(_object.leader));
_node.set("leaderEpoch", new IntNode(_object.leaderEpoch));
ArrayNode _isrArray = new ArrayNode(JsonNodeFactory.instance);
for (Integer _element : _object.isr) {
_isrArray.add(new IntNode(_element));
}
_node.set("isr", _isrArray);
_node.set("zkVersion", new IntNode(_object.zkVersion));
ArrayNode _replicasArray = new ArrayNode(JsonNodeFactory.instance);
for (Integer _element : _object.replicas) {
_replicasArray.add(new IntNode(_element));
}
_node.set("replicas", _replicasArray);
if (_version >= 4) {
ArrayNode _offlineReplicasArray = new ArrayNode(JsonNodeFactory.instance);
for (Integer _element : _object.offlineReplicas) {
_offlineReplicasArray.add(new IntNode(_element));
}
_node.set("offlineReplicas", _offlineReplicasArray);
}
return _node;
}
public static JsonNode write(UpdateMetadataPartitionState _object, short _version) {
return write(_object, _version, true);
}
}
public static class UpdateMetadataTopicStateJsonConverter {
public static UpdateMetadataTopicState read(JsonNode _node, short _version) {
UpdateMetadataTopicState _object = new UpdateMetadataTopicState();
if (_version < 5) {
throw new UnsupportedVersionException("Can't read version " + _version + " of UpdateMetadataTopicState");
}
JsonNode _topicNameNode = _node.get("topicName");
if (_topicNameNode == null) {
throw new RuntimeException("UpdateMetadataTopicState: unable to locate field 'topicName', which is mandatory in version " + _version);
} else {
if (!_topicNameNode.isTextual()) {
throw new RuntimeException("UpdateMetadataTopicState expected a string type, but got " + _node.getNodeType());
}
_object.topicName = _topicNameNode.asText();
}
JsonNode _topicIdNode = _node.get("topicId");
if (_topicIdNode == null) {
if (_version >= 7) {
throw new RuntimeException("UpdateMetadataTopicState: unable to locate field 'topicId', which is mandatory in version " + _version);
} else {
_object.topicId = Uuid.ZERO_UUID;
}
} else {
if (!_topicIdNode.isTextual()) {
throw new RuntimeException("UpdateMetadataTopicState expected a JSON string type, but got " + _node.getNodeType());
}
_object.topicId = Uuid.fromString(_topicIdNode.asText());
}
JsonNode _partitionStatesNode = _node.get("partitionStates");
if (_partitionStatesNode == null) {
throw new RuntimeException("UpdateMetadataTopicState: unable to locate field 'partitionStates', which is mandatory in version " + _version);
} else {
if (!_partitionStatesNode.isArray()) {
throw new RuntimeException("UpdateMetadataTopicState expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<UpdateMetadataPartitionState> _collection = new ArrayList<UpdateMetadataPartitionState>(_partitionStatesNode.size());
_object.partitionStates = _collection;
for (JsonNode _element : _partitionStatesNode) {
_collection.add(UpdateMetadataPartitionStateJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(UpdateMetadataTopicState _object, short _version, boolean _serializeRecords) {
if (_version < 5) {
throw new UnsupportedVersionException("Can't write version " + _version + " of UpdateMetadataTopicState");
}
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("topicName", new TextNode(_object.topicName));
if (_version >= 7) {
_node.set("topicId", new TextNode(_object.topicId.toString()));
}
ArrayNode _partitionStatesArray = new ArrayNode(JsonNodeFactory.instance);
for (UpdateMetadataPartitionState _element : _object.partitionStates) {
_partitionStatesArray.add(UpdateMetadataPartitionStateJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("partitionStates", _partitionStatesArray);
return _node;
}
public static JsonNode write(UpdateMetadataTopicState _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UpdateMetadataResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class UpdateMetadataResponseData implements ApiMessage {
short errorCode;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.")
);
public static final Schema SCHEMA_1 = SCHEMA_0;
public static final Schema SCHEMA_2 = SCHEMA_1;
public static final Schema SCHEMA_3 = SCHEMA_2;
public static final Schema SCHEMA_4 = SCHEMA_3;
public static final Schema SCHEMA_5 = SCHEMA_4;
public static final Schema SCHEMA_6 =
new Schema(
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
TaggedFieldsSection.of(
)
);
public static final Schema SCHEMA_7 = SCHEMA_6;
public static final Schema SCHEMA_8 = SCHEMA_7;
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1,
SCHEMA_2,
SCHEMA_3,
SCHEMA_4,
SCHEMA_5,
SCHEMA_6,
SCHEMA_7,
SCHEMA_8
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 8;
public UpdateMetadataResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public UpdateMetadataResponseData() {
this.errorCode = (short) 0;
}
@Override
public short apiKey() {
return 6;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 8;
}
@Override
public void read(Readable _readable, short _version) {
this.errorCode = _readable.readShort();
this._unknownTaggedFields = null;
if (_version >= 6) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeShort(errorCode);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 6) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(2);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 6) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof UpdateMetadataResponseData)) return false;
UpdateMetadataResponseData other = (UpdateMetadataResponseData) obj;
if (errorCode != other.errorCode) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + errorCode;
return hashCode;
}
@Override
public UpdateMetadataResponseData duplicate() {
UpdateMetadataResponseData _duplicate = new UpdateMetadataResponseData();
_duplicate.errorCode = errorCode;
return _duplicate;
}
@Override
public String toString() {
return "UpdateMetadataResponseData("
+ "errorCode=" + errorCode
+ ")";
}
public short errorCode() {
return this.errorCode;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public UpdateMetadataResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/UpdateMetadataResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.UpdateMetadataResponseData.*;
public class UpdateMetadataResponseDataJsonConverter {
public static UpdateMetadataResponseData read(JsonNode _node, short _version) {
UpdateMetadataResponseData _object = new UpdateMetadataResponseData();
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("UpdateMetadataResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "UpdateMetadataResponseData");
}
return _object;
}
public static JsonNode write(UpdateMetadataResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("errorCode", new ShortNode(_object.errorCode));
return _node;
}
public static JsonNode write(UpdateMetadataResponseData _object, short _version) {
return write(_object, _version, true);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/VoteRequestData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class VoteRequestData implements ApiMessage {
String clusterId;
List<TopicData> topics;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("cluster_id", Type.COMPACT_NULLABLE_STRING, ""),
new Field("topics", new CompactArrayOf(TopicData.SCHEMA_0), ""),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public VoteRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public VoteRequestData() {
this.clusterId = null;
this.topics = new ArrayList<TopicData>(0);
}
@Override
public short apiKey() {
return 52;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
this.clusterId = null;
} else if (length > 0x7fff) {
throw new RuntimeException("string field clusterId had invalid length " + length);
} else {
this.clusterId = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TopicData> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TopicData(_readable, _version));
}
this.topics = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (clusterId == null) {
_writable.writeUnsignedVarint(0);
} else {
byte[] _stringBytes = _cache.getSerializedValue(clusterId);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeUnsignedVarint(topics.size() + 1);
for (TopicData topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (clusterId == null) {
_size.addBytes(1);
} else {
byte[] _stringBytes = clusterId.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'clusterId' field is too long to be serialized");
}
_cache.cacheSerializedValue(clusterId, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
{
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1));
for (TopicData topicsElement : topics) {
topicsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof VoteRequestData)) return false;
VoteRequestData other = (VoteRequestData) obj;
if (this.clusterId == null) {
if (other.clusterId != null) return false;
} else {
if (!this.clusterId.equals(other.clusterId)) return false;
}
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (clusterId == null ? 0 : clusterId.hashCode());
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
return hashCode;
}
@Override
public VoteRequestData duplicate() {
VoteRequestData _duplicate = new VoteRequestData();
if (clusterId == null) {
_duplicate.clusterId = null;
} else {
_duplicate.clusterId = clusterId;
}
ArrayList<TopicData> newTopics = new ArrayList<TopicData>(topics.size());
for (TopicData _element : topics) {
newTopics.add(_element.duplicate());
}
_duplicate.topics = newTopics;
return _duplicate;
}
@Override
public String toString() {
return "VoteRequestData("
+ "clusterId=" + ((clusterId == null) ? "null" : "'" + clusterId.toString() + "'")
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public String clusterId() {
return this.clusterId;
}
public List<TopicData> topics() {
return this.topics;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public VoteRequestData setClusterId(String v) {
this.clusterId = v;
return this;
}
public VoteRequestData setTopics(List<TopicData> v) {
this.topics = v;
return this;
}
public static class TopicData implements Message {
String topicName;
List<PartitionData> partitions;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("topic_name", Type.COMPACT_STRING, "The topic name."),
new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_0), ""),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public TopicData(Readable _readable, short _version) {
read(_readable, _version);
}
public TopicData() {
this.topicName = "";
this.partitions = new ArrayList<PartitionData>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of TopicData");
}
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field topicName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field topicName had invalid length " + length);
} else {
this.topicName = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<PartitionData> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new PartitionData(_readable, _version));
}
this.partitions = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(topicName);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeUnsignedVarint(partitions.size() + 1);
for (PartitionData partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of TopicData");
}
{
byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'topicName' field is too long to be serialized");
}
_cache.cacheSerializedValue(topicName, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
{
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1));
for (PartitionData partitionsElement : partitions) {
partitionsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof TopicData)) return false;
TopicData other = (TopicData) obj;
if (this.topicName == null) {
if (other.topicName != null) return false;
} else {
if (!this.topicName.equals(other.topicName)) return false;
}
if (this.partitions == null) {
if (other.partitions != null) return false;
} else {
if (!this.partitions.equals(other.partitions)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode());
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
return hashCode;
}
@Override
public TopicData duplicate() {
TopicData _duplicate = new TopicData();
_duplicate.topicName = topicName;
ArrayList<PartitionData> newPartitions = new ArrayList<PartitionData>(partitions.size());
for (PartitionData _element : partitions) {
newPartitions.add(_element.duplicate());
}
_duplicate.partitions = newPartitions;
return _duplicate;
}
@Override
public String toString() {
return "TopicData("
+ "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String topicName() {
return this.topicName;
}
public List<PartitionData> partitions() {
return this.partitions;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public TopicData setTopicName(String v) {
this.topicName = v;
return this;
}
public TopicData setPartitions(List<PartitionData> v) {
this.partitions = v;
return this;
}
}
public static class PartitionData implements Message {
int partitionIndex;
int candidateEpoch;
int candidateId;
int lastOffsetEpoch;
long lastOffset;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("candidate_epoch", Type.INT32, "The bumped epoch of the candidate sending the request"),
new Field("candidate_id", Type.INT32, "The ID of the voter sending the request"),
new Field("last_offset_epoch", Type.INT32, "The epoch of the last record written to the metadata log"),
new Field("last_offset", Type.INT64, "The offset of the last record written to the metadata log"),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public PartitionData(Readable _readable, short _version) {
read(_readable, _version);
}
public PartitionData() {
this.partitionIndex = 0;
this.candidateEpoch = 0;
this.candidateId = 0;
this.lastOffsetEpoch = 0;
this.lastOffset = 0L;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionData");
}
this.partitionIndex = _readable.readInt();
this.candidateEpoch = _readable.readInt();
this.candidateId = _readable.readInt();
this.lastOffsetEpoch = _readable.readInt();
this.lastOffset = _readable.readLong();
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
_writable.writeInt(candidateEpoch);
_writable.writeInt(candidateId);
_writable.writeInt(lastOffsetEpoch);
_writable.writeLong(lastOffset);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionData");
}
_size.addBytes(4);
_size.addBytes(4);
_size.addBytes(4);
_size.addBytes(4);
_size.addBytes(8);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof PartitionData)) return false;
PartitionData other = (PartitionData) obj;
if (partitionIndex != other.partitionIndex) return false;
if (candidateEpoch != other.candidateEpoch) return false;
if (candidateId != other.candidateId) return false;
if (lastOffsetEpoch != other.lastOffsetEpoch) return false;
if (lastOffset != other.lastOffset) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + candidateEpoch;
hashCode = 31 * hashCode + candidateId;
hashCode = 31 * hashCode + lastOffsetEpoch;
hashCode = 31 * hashCode + ((int) (lastOffset >> 32) ^ (int) lastOffset);
return hashCode;
}
@Override
public PartitionData duplicate() {
PartitionData _duplicate = new PartitionData();
_duplicate.partitionIndex = partitionIndex;
_duplicate.candidateEpoch = candidateEpoch;
_duplicate.candidateId = candidateId;
_duplicate.lastOffsetEpoch = lastOffsetEpoch;
_duplicate.lastOffset = lastOffset;
return _duplicate;
}
@Override
public String toString() {
return "PartitionData("
+ "partitionIndex=" + partitionIndex
+ ", candidateEpoch=" + candidateEpoch
+ ", candidateId=" + candidateId
+ ", lastOffsetEpoch=" + lastOffsetEpoch
+ ", lastOffset=" + lastOffset
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public int candidateEpoch() {
return this.candidateEpoch;
}
public int candidateId() {
return this.candidateId;
}
public int lastOffsetEpoch() {
return this.lastOffsetEpoch;
}
public long lastOffset() {
return this.lastOffset;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public PartitionData setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public PartitionData setCandidateEpoch(int v) {
this.candidateEpoch = v;
return this;
}
public PartitionData setCandidateId(int v) {
this.candidateId = v;
return this;
}
public PartitionData setLastOffsetEpoch(int v) {
this.lastOffsetEpoch = v;
return this;
}
public PartitionData setLastOffset(long v) {
this.lastOffset = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/VoteRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.LongNode;
import com.fasterxml.jackson.databind.node.NullNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.VoteRequestData.*;
public class VoteRequestDataJsonConverter {
public static VoteRequestData read(JsonNode _node, short _version) {
VoteRequestData _object = new VoteRequestData();
JsonNode _clusterIdNode = _node.get("clusterId");
if (_clusterIdNode == null) {
throw new RuntimeException("VoteRequestData: unable to locate field 'clusterId', which is mandatory in version " + _version);
} else {
if (_clusterIdNode.isNull()) {
_object.clusterId = null;
} else {
if (!_clusterIdNode.isTextual()) {
throw new RuntimeException("VoteRequestData expected a string type, but got " + _node.getNodeType());
}
_object.clusterId = _clusterIdNode.asText();
}
}
JsonNode _topicsNode = _node.get("topics");
if (_topicsNode == null) {
throw new RuntimeException("VoteRequestData: unable to locate field 'topics', which is mandatory in version " + _version);
} else {
if (!_topicsNode.isArray()) {
throw new RuntimeException("VoteRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<TopicData> _collection = new ArrayList<TopicData>(_topicsNode.size());
_object.topics = _collection;
for (JsonNode _element : _topicsNode) {
_collection.add(TopicDataJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(VoteRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
if (_object.clusterId == null) {
_node.set("clusterId", NullNode.instance);
} else {
_node.set("clusterId", new TextNode(_object.clusterId));
}
ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance);
for (TopicData _element : _object.topics) {
_topicsArray.add(TopicDataJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("topics", _topicsArray);
return _node;
}
public static JsonNode write(VoteRequestData _object, short _version) {
return write(_object, _version, true);
}
public static class PartitionDataJsonConverter {
public static PartitionData read(JsonNode _node, short _version) {
PartitionData _object = new PartitionData();
JsonNode _partitionIndexNode = _node.get("partitionIndex");
if (_partitionIndexNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'partitionIndex', which is mandatory in version " + _version);
} else {
_object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "PartitionData");
}
JsonNode _candidateEpochNode = _node.get("candidateEpoch");
if (_candidateEpochNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'candidateEpoch', which is mandatory in version " + _version);
} else {
_object.candidateEpoch = MessageUtil.jsonNodeToInt(_candidateEpochNode, "PartitionData");
}
JsonNode _candidateIdNode = _node.get("candidateId");
if (_candidateIdNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'candidateId', which is mandatory in version " + _version);
} else {
_object.candidateId = MessageUtil.jsonNodeToInt(_candidateIdNode, "PartitionData");
}
JsonNode _lastOffsetEpochNode = _node.get("lastOffsetEpoch");
if (_lastOffsetEpochNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'lastOffsetEpoch', which is mandatory in version " + _version);
} else {
_object.lastOffsetEpoch = MessageUtil.jsonNodeToInt(_lastOffsetEpochNode, "PartitionData");
}
JsonNode _lastOffsetNode = _node.get("lastOffset");
if (_lastOffsetNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'lastOffset', which is mandatory in version " + _version);
} else {
_object.lastOffset = MessageUtil.jsonNodeToLong(_lastOffsetNode, "PartitionData");
}
return _object;
}
public static JsonNode write(PartitionData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("partitionIndex", new IntNode(_object.partitionIndex));
_node.set("candidateEpoch", new IntNode(_object.candidateEpoch));
_node.set("candidateId", new IntNode(_object.candidateId));
_node.set("lastOffsetEpoch", new IntNode(_object.lastOffsetEpoch));
_node.set("lastOffset", new LongNode(_object.lastOffset));
return _node;
}
public static JsonNode write(PartitionData _object, short _version) {
return write(_object, _version, true);
}
}
public static class TopicDataJsonConverter {
public static TopicData read(JsonNode _node, short _version) {
TopicData _object = new TopicData();
JsonNode _topicNameNode = _node.get("topicName");
if (_topicNameNode == null) {
throw new RuntimeException("TopicData: unable to locate field 'topicName', which is mandatory in version " + _version);
} else {
if (!_topicNameNode.isTextual()) {
throw new RuntimeException("TopicData expected a string type, but got " + _node.getNodeType());
}
_object.topicName = _topicNameNode.asText();
}
JsonNode _partitionsNode = _node.get("partitions");
if (_partitionsNode == null) {
throw new RuntimeException("TopicData: unable to locate field 'partitions', which is mandatory in version " + _version);
} else {
if (!_partitionsNode.isArray()) {
throw new RuntimeException("TopicData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<PartitionData> _collection = new ArrayList<PartitionData>(_partitionsNode.size());
_object.partitions = _collection;
for (JsonNode _element : _partitionsNode) {
_collection.add(PartitionDataJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(TopicData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("topicName", new TextNode(_object.topicName));
ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance);
for (PartitionData _element : _object.partitions) {
_partitionsArray.add(PartitionDataJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("partitions", _partitionsArray);
return _node;
}
public static JsonNode write(TopicData _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/VoteResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class VoteResponseData implements ApiMessage {
short errorCode;
List<TopicData> topics;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("error_code", Type.INT16, "The top level error code."),
new Field("topics", new CompactArrayOf(TopicData.SCHEMA_0), ""),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public VoteResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public VoteResponseData() {
this.errorCode = (short) 0;
this.topics = new ArrayList<TopicData>(0);
}
@Override
public short apiKey() {
return 52;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
this.errorCode = _readable.readShort();
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<TopicData> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new TopicData(_readable, _version));
}
this.topics = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeShort(errorCode);
_writable.writeUnsignedVarint(topics.size() + 1);
for (TopicData topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_size.addBytes(2);
{
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1));
for (TopicData topicsElement : topics) {
topicsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof VoteResponseData)) return false;
VoteResponseData other = (VoteResponseData) obj;
if (errorCode != other.errorCode) return false;
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
return hashCode;
}
@Override
public VoteResponseData duplicate() {
VoteResponseData _duplicate = new VoteResponseData();
_duplicate.errorCode = errorCode;
ArrayList<TopicData> newTopics = new ArrayList<TopicData>(topics.size());
for (TopicData _element : topics) {
newTopics.add(_element.duplicate());
}
_duplicate.topics = newTopics;
return _duplicate;
}
@Override
public String toString() {
return "VoteResponseData("
+ "errorCode=" + errorCode
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public short errorCode() {
return this.errorCode;
}
public List<TopicData> topics() {
return this.topics;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public VoteResponseData setErrorCode(short v) {
this.errorCode = v;
return this;
}
public VoteResponseData setTopics(List<TopicData> v) {
this.topics = v;
return this;
}
public static class TopicData implements Message {
String topicName;
List<PartitionData> partitions;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("topic_name", Type.COMPACT_STRING, "The topic name."),
new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_0), ""),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public TopicData(Readable _readable, short _version) {
read(_readable, _version);
}
public TopicData() {
this.topicName = "";
this.partitions = new ArrayList<PartitionData>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of TopicData");
}
{
int length;
length = _readable.readUnsignedVarint() - 1;
if (length < 0) {
throw new RuntimeException("non-nullable field topicName was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field topicName had invalid length " + length);
} else {
this.topicName = _readable.readString(length);
}
}
{
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<PartitionData> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new PartitionData(_readable, _version));
}
this.partitions = newCollection;
}
}
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(topicName);
_writable.writeUnsignedVarint(_stringBytes.length + 1);
_writable.writeByteArray(_stringBytes);
}
_writable.writeUnsignedVarint(partitions.size() + 1);
for (PartitionData partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of TopicData");
}
{
byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'topicName' field is too long to be serialized");
}
_cache.cacheSerializedValue(topicName, _stringBytes);
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
}
{
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1));
for (PartitionData partitionsElement : partitions) {
partitionsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof TopicData)) return false;
TopicData other = (TopicData) obj;
if (this.topicName == null) {
if (other.topicName != null) return false;
} else {
if (!this.topicName.equals(other.topicName)) return false;
}
if (this.partitions == null) {
if (other.partitions != null) return false;
} else {
if (!this.partitions.equals(other.partitions)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode());
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
return hashCode;
}
@Override
public TopicData duplicate() {
TopicData _duplicate = new TopicData();
_duplicate.topicName = topicName;
ArrayList<PartitionData> newPartitions = new ArrayList<PartitionData>(partitions.size());
for (PartitionData _element : partitions) {
newPartitions.add(_element.duplicate());
}
_duplicate.partitions = newPartitions;
return _duplicate;
}
@Override
public String toString() {
return "TopicData("
+ "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String topicName() {
return this.topicName;
}
public List<PartitionData> partitions() {
return this.partitions;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public TopicData setTopicName(String v) {
this.topicName = v;
return this;
}
public TopicData setPartitions(List<PartitionData> v) {
this.partitions = v;
return this;
}
}
public static class PartitionData implements Message {
int partitionIndex;
short errorCode;
int leaderId;
int leaderEpoch;
boolean voteGranted;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("error_code", Type.INT16, ""),
new Field("leader_id", Type.INT32, "The ID of the current leader or -1 if the leader is unknown."),
new Field("leader_epoch", Type.INT32, "The latest known leader epoch"),
new Field("vote_granted", Type.BOOLEAN, "True if the vote was granted and false otherwise"),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 0;
public PartitionData(Readable _readable, short _version) {
read(_readable, _version);
}
public PartitionData() {
this.partitionIndex = 0;
this.errorCode = (short) 0;
this.leaderId = 0;
this.leaderEpoch = 0;
this.voteGranted = false;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 0;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 0) {
throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionData");
}
this.partitionIndex = _readable.readInt();
this.errorCode = _readable.readShort();
this.leaderId = _readable.readInt();
this.leaderEpoch = _readable.readInt();
this.voteGranted = _readable.readByte() != 0;
this._unknownTaggedFields = null;
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
_writable.writeShort(errorCode);
_writable.writeInt(leaderId);
_writable.writeInt(leaderEpoch);
_writable.writeByte(voteGranted ? (byte) 1 : (byte) 0);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 0) {
throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionData");
}
_size.addBytes(4);
_size.addBytes(2);
_size.addBytes(4);
_size.addBytes(4);
_size.addBytes(1);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof PartitionData)) return false;
PartitionData other = (PartitionData) obj;
if (partitionIndex != other.partitionIndex) return false;
if (errorCode != other.errorCode) return false;
if (leaderId != other.leaderId) return false;
if (leaderEpoch != other.leaderEpoch) return false;
if (voteGranted != other.voteGranted) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + errorCode;
hashCode = 31 * hashCode + leaderId;
hashCode = 31 * hashCode + leaderEpoch;
hashCode = 31 * hashCode + (voteGranted ? 1231 : 1237);
return hashCode;
}
@Override
public PartitionData duplicate() {
PartitionData _duplicate = new PartitionData();
_duplicate.partitionIndex = partitionIndex;
_duplicate.errorCode = errorCode;
_duplicate.leaderId = leaderId;
_duplicate.leaderEpoch = leaderEpoch;
_duplicate.voteGranted = voteGranted;
return _duplicate;
}
@Override
public String toString() {
return "PartitionData("
+ "partitionIndex=" + partitionIndex
+ ", errorCode=" + errorCode
+ ", leaderId=" + leaderId
+ ", leaderEpoch=" + leaderEpoch
+ ", voteGranted=" + (voteGranted ? "true" : "false")
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public short errorCode() {
return this.errorCode;
}
public int leaderId() {
return this.leaderId;
}
public int leaderEpoch() {
return this.leaderEpoch;
}
public boolean voteGranted() {
return this.voteGranted;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public PartitionData setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public PartitionData setErrorCode(short v) {
this.errorCode = v;
return this;
}
public PartitionData setLeaderId(int v) {
this.leaderId = v;
return this;
}
public PartitionData setLeaderEpoch(int v) {
this.leaderEpoch = v;
return this;
}
public PartitionData setVoteGranted(boolean v) {
this.voteGranted = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/VoteResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.BooleanNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.VoteResponseData.*;
public class VoteResponseDataJsonConverter {
public static VoteResponseData read(JsonNode _node, short _version) {
VoteResponseData _object = new VoteResponseData();
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("VoteResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "VoteResponseData");
}
JsonNode _topicsNode = _node.get("topics");
if (_topicsNode == null) {
throw new RuntimeException("VoteResponseData: unable to locate field 'topics', which is mandatory in version " + _version);
} else {
if (!_topicsNode.isArray()) {
throw new RuntimeException("VoteResponseData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<TopicData> _collection = new ArrayList<TopicData>(_topicsNode.size());
_object.topics = _collection;
for (JsonNode _element : _topicsNode) {
_collection.add(TopicDataJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(VoteResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("errorCode", new ShortNode(_object.errorCode));
ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance);
for (TopicData _element : _object.topics) {
_topicsArray.add(TopicDataJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("topics", _topicsArray);
return _node;
}
public static JsonNode write(VoteResponseData _object, short _version) {
return write(_object, _version, true);
}
public static class PartitionDataJsonConverter {
public static PartitionData read(JsonNode _node, short _version) {
PartitionData _object = new PartitionData();
JsonNode _partitionIndexNode = _node.get("partitionIndex");
if (_partitionIndexNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'partitionIndex', which is mandatory in version " + _version);
} else {
_object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "PartitionData");
}
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "PartitionData");
}
JsonNode _leaderIdNode = _node.get("leaderId");
if (_leaderIdNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'leaderId', which is mandatory in version " + _version);
} else {
_object.leaderId = MessageUtil.jsonNodeToInt(_leaderIdNode, "PartitionData");
}
JsonNode _leaderEpochNode = _node.get("leaderEpoch");
if (_leaderEpochNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'leaderEpoch', which is mandatory in version " + _version);
} else {
_object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "PartitionData");
}
JsonNode _voteGrantedNode = _node.get("voteGranted");
if (_voteGrantedNode == null) {
throw new RuntimeException("PartitionData: unable to locate field 'voteGranted', which is mandatory in version " + _version);
} else {
if (!_voteGrantedNode.isBoolean()) {
throw new RuntimeException("PartitionData expected Boolean type, but got " + _node.getNodeType());
}
_object.voteGranted = _voteGrantedNode.asBoolean();
}
return _object;
}
public static JsonNode write(PartitionData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("partitionIndex", new IntNode(_object.partitionIndex));
_node.set("errorCode", new ShortNode(_object.errorCode));
_node.set("leaderId", new IntNode(_object.leaderId));
_node.set("leaderEpoch", new IntNode(_object.leaderEpoch));
_node.set("voteGranted", BooleanNode.valueOf(_object.voteGranted));
return _node;
}
public static JsonNode write(PartitionData _object, short _version) {
return write(_object, _version, true);
}
}
public static class TopicDataJsonConverter {
public static TopicData read(JsonNode _node, short _version) {
TopicData _object = new TopicData();
JsonNode _topicNameNode = _node.get("topicName");
if (_topicNameNode == null) {
throw new RuntimeException("TopicData: unable to locate field 'topicName', which is mandatory in version " + _version);
} else {
if (!_topicNameNode.isTextual()) {
throw new RuntimeException("TopicData expected a string type, but got " + _node.getNodeType());
}
_object.topicName = _topicNameNode.asText();
}
JsonNode _partitionsNode = _node.get("partitions");
if (_partitionsNode == null) {
throw new RuntimeException("TopicData: unable to locate field 'partitions', which is mandatory in version " + _version);
} else {
if (!_partitionsNode.isArray()) {
throw new RuntimeException("TopicData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<PartitionData> _collection = new ArrayList<PartitionData>(_partitionsNode.size());
_object.partitions = _collection;
for (JsonNode _element : _partitionsNode) {
_collection.add(PartitionDataJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(TopicData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("topicName", new TextNode(_object.topicName));
ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance);
for (PartitionData _element : _object.partitions) {
_partitionsArray.add(PartitionDataJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("partitions", _partitionsArray);
return _node;
}
public static JsonNode write(TopicData _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/WriteTxnMarkersRequestData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class WriteTxnMarkersRequestData implements ApiMessage {
List<WritableTxnMarker> markers;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("markers", new ArrayOf(WritableTxnMarker.SCHEMA_0), "The transaction markers to be written.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("markers", new CompactArrayOf(WritableTxnMarker.SCHEMA_1), "The transaction markers to be written."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public WriteTxnMarkersRequestData(Readable _readable, short _version) {
read(_readable, _version);
}
public WriteTxnMarkersRequestData() {
this.markers = new ArrayList<WritableTxnMarker>(0);
}
@Override
public short apiKey() {
return 27;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
{
if (_version >= 1) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field markers was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarker> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarker(_readable, _version));
}
this.markers = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field markers was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarker> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarker(_readable, _version));
}
this.markers = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 1) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version >= 1) {
_writable.writeUnsignedVarint(markers.size() + 1);
for (WritableTxnMarker markersElement : markers) {
markersElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(markers.size());
for (WritableTxnMarker markersElement : markers) {
markersElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 1) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(markers.size() + 1));
} else {
_size.addBytes(4);
}
for (WritableTxnMarker markersElement : markers) {
markersElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof WriteTxnMarkersRequestData)) return false;
WriteTxnMarkersRequestData other = (WriteTxnMarkersRequestData) obj;
if (this.markers == null) {
if (other.markers != null) return false;
} else {
if (!this.markers.equals(other.markers)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (markers == null ? 0 : markers.hashCode());
return hashCode;
}
@Override
public WriteTxnMarkersRequestData duplicate() {
WriteTxnMarkersRequestData _duplicate = new WriteTxnMarkersRequestData();
ArrayList<WritableTxnMarker> newMarkers = new ArrayList<WritableTxnMarker>(markers.size());
for (WritableTxnMarker _element : markers) {
newMarkers.add(_element.duplicate());
}
_duplicate.markers = newMarkers;
return _duplicate;
}
@Override
public String toString() {
return "WriteTxnMarkersRequestData("
+ "markers=" + MessageUtil.deepToString(markers.iterator())
+ ")";
}
public List<WritableTxnMarker> markers() {
return this.markers;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public WriteTxnMarkersRequestData setMarkers(List<WritableTxnMarker> v) {
this.markers = v;
return this;
}
public static class WritableTxnMarker implements Message {
long producerId;
short producerEpoch;
boolean transactionResult;
List<WritableTxnMarkerTopic> topics;
int coordinatorEpoch;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("producer_id", Type.INT64, "The current producer ID."),
new Field("producer_epoch", Type.INT16, "The current epoch associated with the producer ID."),
new Field("transaction_result", Type.BOOLEAN, "The result of the transaction to write to the partitions (false = ABORT, true = COMMIT)."),
new Field("topics", new ArrayOf(WritableTxnMarkerTopic.SCHEMA_0), "Each topic that we want to write transaction marker(s) for."),
new Field("coordinator_epoch", Type.INT32, "Epoch associated with the transaction state partition hosted by this transaction coordinator")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("producer_id", Type.INT64, "The current producer ID."),
new Field("producer_epoch", Type.INT16, "The current epoch associated with the producer ID."),
new Field("transaction_result", Type.BOOLEAN, "The result of the transaction to write to the partitions (false = ABORT, true = COMMIT)."),
new Field("topics", new CompactArrayOf(WritableTxnMarkerTopic.SCHEMA_1), "Each topic that we want to write transaction marker(s) for."),
new Field("coordinator_epoch", Type.INT32, "Epoch associated with the transaction state partition hosted by this transaction coordinator"),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public WritableTxnMarker(Readable _readable, short _version) {
read(_readable, _version);
}
public WritableTxnMarker() {
this.producerId = 0L;
this.producerEpoch = (short) 0;
this.transactionResult = false;
this.topics = new ArrayList<WritableTxnMarkerTopic>(0);
this.coordinatorEpoch = 0;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of WritableTxnMarker");
}
this.producerId = _readable.readLong();
this.producerEpoch = _readable.readShort();
this.transactionResult = _readable.readByte() != 0;
{
if (_version >= 1) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarkerTopic> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarkerTopic(_readable, _version));
}
this.topics = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarkerTopic> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarkerTopic(_readable, _version));
}
this.topics = newCollection;
}
}
}
this.coordinatorEpoch = _readable.readInt();
this._unknownTaggedFields = null;
if (_version >= 1) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeLong(producerId);
_writable.writeShort(producerEpoch);
_writable.writeByte(transactionResult ? (byte) 1 : (byte) 0);
if (_version >= 1) {
_writable.writeUnsignedVarint(topics.size() + 1);
for (WritableTxnMarkerTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(topics.size());
for (WritableTxnMarkerTopic topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
}
_writable.writeInt(coordinatorEpoch);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 1) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of WritableTxnMarker");
}
_size.addBytes(8);
_size.addBytes(2);
_size.addBytes(1);
{
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1));
} else {
_size.addBytes(4);
}
for (WritableTxnMarkerTopic topicsElement : topics) {
topicsElement.addSize(_size, _cache, _version);
}
}
_size.addBytes(4);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof WritableTxnMarker)) return false;
WritableTxnMarker other = (WritableTxnMarker) obj;
if (producerId != other.producerId) return false;
if (producerEpoch != other.producerEpoch) return false;
if (transactionResult != other.transactionResult) return false;
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
if (coordinatorEpoch != other.coordinatorEpoch) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId);
hashCode = 31 * hashCode + producerEpoch;
hashCode = 31 * hashCode + (transactionResult ? 1231 : 1237);
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
hashCode = 31 * hashCode + coordinatorEpoch;
return hashCode;
}
@Override
public WritableTxnMarker duplicate() {
WritableTxnMarker _duplicate = new WritableTxnMarker();
_duplicate.producerId = producerId;
_duplicate.producerEpoch = producerEpoch;
_duplicate.transactionResult = transactionResult;
ArrayList<WritableTxnMarkerTopic> newTopics = new ArrayList<WritableTxnMarkerTopic>(topics.size());
for (WritableTxnMarkerTopic _element : topics) {
newTopics.add(_element.duplicate());
}
_duplicate.topics = newTopics;
_duplicate.coordinatorEpoch = coordinatorEpoch;
return _duplicate;
}
@Override
public String toString() {
return "WritableTxnMarker("
+ "producerId=" + producerId
+ ", producerEpoch=" + producerEpoch
+ ", transactionResult=" + (transactionResult ? "true" : "false")
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ", coordinatorEpoch=" + coordinatorEpoch
+ ")";
}
public long producerId() {
return this.producerId;
}
public short producerEpoch() {
return this.producerEpoch;
}
public boolean transactionResult() {
return this.transactionResult;
}
public List<WritableTxnMarkerTopic> topics() {
return this.topics;
}
public int coordinatorEpoch() {
return this.coordinatorEpoch;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public WritableTxnMarker setProducerId(long v) {
this.producerId = v;
return this;
}
public WritableTxnMarker setProducerEpoch(short v) {
this.producerEpoch = v;
return this;
}
public WritableTxnMarker setTransactionResult(boolean v) {
this.transactionResult = v;
return this;
}
public WritableTxnMarker setTopics(List<WritableTxnMarkerTopic> v) {
this.topics = v;
return this;
}
public WritableTxnMarker setCoordinatorEpoch(int v) {
this.coordinatorEpoch = v;
return this;
}
}
public static class WritableTxnMarkerTopic implements Message {
String name;
List<Integer> partitionIndexes;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("partition_indexes", new ArrayOf(Type.INT32), "The indexes of the partitions to write transaction markers for.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("name", Type.COMPACT_STRING, "The topic name."),
new Field("partition_indexes", new CompactArrayOf(Type.INT32), "The indexes of the partitions to write transaction markers for."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public WritableTxnMarkerTopic(Readable _readable, short _version) {
read(_readable, _version);
}
public WritableTxnMarkerTopic() {
this.name = "";
this.partitionIndexes = new ArrayList<Integer>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of WritableTxnMarkerTopic");
}
{
int length;
if (_version >= 1) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
int arrayLength;
if (_version >= 1) {
arrayLength = _readable.readUnsignedVarint() - 1;
} else {
arrayLength = _readable.readInt();
}
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitionIndexes was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<Integer> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(_readable.readInt());
}
this.partitionIndexes = newCollection;
}
}
this._unknownTaggedFields = null;
if (_version >= 1) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
if (_version >= 1) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 1) {
_writable.writeUnsignedVarint(partitionIndexes.size() + 1);
} else {
_writable.writeInt(partitionIndexes.size());
}
for (Integer partitionIndexesElement : partitionIndexes) {
_writable.writeInt(partitionIndexesElement);
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 1) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of WritableTxnMarkerTopic");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
if (_version >= 1) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
{
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionIndexes.size() + 1));
} else {
_size.addBytes(4);
}
_size.addBytes(partitionIndexes.size() * 4);
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof WritableTxnMarkerTopic)) return false;
WritableTxnMarkerTopic other = (WritableTxnMarkerTopic) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
if (this.partitionIndexes == null) {
if (other.partitionIndexes != null) return false;
} else {
if (!this.partitionIndexes.equals(other.partitionIndexes)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
hashCode = 31 * hashCode + (partitionIndexes == null ? 0 : partitionIndexes.hashCode());
return hashCode;
}
@Override
public WritableTxnMarkerTopic duplicate() {
WritableTxnMarkerTopic _duplicate = new WritableTxnMarkerTopic();
_duplicate.name = name;
ArrayList<Integer> newPartitionIndexes = new ArrayList<Integer>(partitionIndexes.size());
for (Integer _element : partitionIndexes) {
newPartitionIndexes.add(_element);
}
_duplicate.partitionIndexes = newPartitionIndexes;
return _duplicate;
}
@Override
public String toString() {
return "WritableTxnMarkerTopic("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitionIndexes=" + MessageUtil.deepToString(partitionIndexes.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List<Integer> partitionIndexes() {
return this.partitionIndexes;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public WritableTxnMarkerTopic setName(String v) {
this.name = v;
return this;
}
public WritableTxnMarkerTopic setPartitionIndexes(List<Integer> v) {
this.partitionIndexes = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/WriteTxnMarkersRequestDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.BooleanNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.LongNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.WriteTxnMarkersRequestData.*;
public class WriteTxnMarkersRequestDataJsonConverter {
public static WriteTxnMarkersRequestData read(JsonNode _node, short _version) {
WriteTxnMarkersRequestData _object = new WriteTxnMarkersRequestData();
JsonNode _markersNode = _node.get("markers");
if (_markersNode == null) {
throw new RuntimeException("WriteTxnMarkersRequestData: unable to locate field 'markers', which is mandatory in version " + _version);
} else {
if (!_markersNode.isArray()) {
throw new RuntimeException("WriteTxnMarkersRequestData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<WritableTxnMarker> _collection = new ArrayList<WritableTxnMarker>(_markersNode.size());
_object.markers = _collection;
for (JsonNode _element : _markersNode) {
_collection.add(WritableTxnMarkerJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(WriteTxnMarkersRequestData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
ArrayNode _markersArray = new ArrayNode(JsonNodeFactory.instance);
for (WritableTxnMarker _element : _object.markers) {
_markersArray.add(WritableTxnMarkerJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("markers", _markersArray);
return _node;
}
public static JsonNode write(WriteTxnMarkersRequestData _object, short _version) {
return write(_object, _version, true);
}
public static class WritableTxnMarkerJsonConverter {
public static WritableTxnMarker read(JsonNode _node, short _version) {
WritableTxnMarker _object = new WritableTxnMarker();
JsonNode _producerIdNode = _node.get("producerId");
if (_producerIdNode == null) {
throw new RuntimeException("WritableTxnMarker: unable to locate field 'producerId', which is mandatory in version " + _version);
} else {
_object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "WritableTxnMarker");
}
JsonNode _producerEpochNode = _node.get("producerEpoch");
if (_producerEpochNode == null) {
throw new RuntimeException("WritableTxnMarker: unable to locate field 'producerEpoch', which is mandatory in version " + _version);
} else {
_object.producerEpoch = MessageUtil.jsonNodeToShort(_producerEpochNode, "WritableTxnMarker");
}
JsonNode _transactionResultNode = _node.get("transactionResult");
if (_transactionResultNode == null) {
throw new RuntimeException("WritableTxnMarker: unable to locate field 'transactionResult', which is mandatory in version " + _version);
} else {
if (!_transactionResultNode.isBoolean()) {
throw new RuntimeException("WritableTxnMarker expected Boolean type, but got " + _node.getNodeType());
}
_object.transactionResult = _transactionResultNode.asBoolean();
}
JsonNode _topicsNode = _node.get("topics");
if (_topicsNode == null) {
throw new RuntimeException("WritableTxnMarker: unable to locate field 'topics', which is mandatory in version " + _version);
} else {
if (!_topicsNode.isArray()) {
throw new RuntimeException("WritableTxnMarker expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<WritableTxnMarkerTopic> _collection = new ArrayList<WritableTxnMarkerTopic>(_topicsNode.size());
_object.topics = _collection;
for (JsonNode _element : _topicsNode) {
_collection.add(WritableTxnMarkerTopicJsonConverter.read(_element, _version));
}
}
JsonNode _coordinatorEpochNode = _node.get("coordinatorEpoch");
if (_coordinatorEpochNode == null) {
throw new RuntimeException("WritableTxnMarker: unable to locate field 'coordinatorEpoch', which is mandatory in version " + _version);
} else {
_object.coordinatorEpoch = MessageUtil.jsonNodeToInt(_coordinatorEpochNode, "WritableTxnMarker");
}
return _object;
}
public static JsonNode write(WritableTxnMarker _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("producerId", new LongNode(_object.producerId));
_node.set("producerEpoch", new ShortNode(_object.producerEpoch));
_node.set("transactionResult", BooleanNode.valueOf(_object.transactionResult));
ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance);
for (WritableTxnMarkerTopic _element : _object.topics) {
_topicsArray.add(WritableTxnMarkerTopicJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("topics", _topicsArray);
_node.set("coordinatorEpoch", new IntNode(_object.coordinatorEpoch));
return _node;
}
public static JsonNode write(WritableTxnMarker _object, short _version) {
return write(_object, _version, true);
}
}
public static class WritableTxnMarkerTopicJsonConverter {
public static WritableTxnMarkerTopic read(JsonNode _node, short _version) {
WritableTxnMarkerTopic _object = new WritableTxnMarkerTopic();
JsonNode _nameNode = _node.get("name");
if (_nameNode == null) {
throw new RuntimeException("WritableTxnMarkerTopic: unable to locate field 'name', which is mandatory in version " + _version);
} else {
if (!_nameNode.isTextual()) {
throw new RuntimeException("WritableTxnMarkerTopic expected a string type, but got " + _node.getNodeType());
}
_object.name = _nameNode.asText();
}
JsonNode _partitionIndexesNode = _node.get("partitionIndexes");
if (_partitionIndexesNode == null) {
throw new RuntimeException("WritableTxnMarkerTopic: unable to locate field 'partitionIndexes', which is mandatory in version " + _version);
} else {
if (!_partitionIndexesNode.isArray()) {
throw new RuntimeException("WritableTxnMarkerTopic expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<Integer> _collection = new ArrayList<Integer>(_partitionIndexesNode.size());
_object.partitionIndexes = _collection;
for (JsonNode _element : _partitionIndexesNode) {
_collection.add(MessageUtil.jsonNodeToInt(_element, "WritableTxnMarkerTopic element"));
}
}
return _object;
}
public static JsonNode write(WritableTxnMarkerTopic _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("name", new TextNode(_object.name));
ArrayNode _partitionIndexesArray = new ArrayNode(JsonNodeFactory.instance);
for (Integer _element : _object.partitionIndexes) {
_partitionIndexesArray.add(new IntNode(_element));
}
_node.set("partitionIndexes", _partitionIndexesArray);
return _node;
}
public static JsonNode write(WritableTxnMarkerTopic _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/WriteTxnMarkersResponseData.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.ApiMessage;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.MessageSizeAccumulator;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.protocol.Readable;
import org.apache.kafka.common.protocol.Writable;
import org.apache.kafka.common.protocol.types.ArrayOf;
import org.apache.kafka.common.protocol.types.CompactArrayOf;
import org.apache.kafka.common.protocol.types.Field;
import org.apache.kafka.common.protocol.types.RawTaggedField;
import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter;
import org.apache.kafka.common.protocol.types.Schema;
import org.apache.kafka.common.protocol.types.Type;
import org.apache.kafka.common.utils.ByteUtils;
import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection;
public class WriteTxnMarkersResponseData implements ApiMessage {
List<WritableTxnMarkerResult> markers;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("markers", new ArrayOf(WritableTxnMarkerResult.SCHEMA_0), "The results for writing makers.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("markers", new CompactArrayOf(WritableTxnMarkerResult.SCHEMA_1), "The results for writing makers."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public WriteTxnMarkersResponseData(Readable _readable, short _version) {
read(_readable, _version);
}
public WriteTxnMarkersResponseData() {
this.markers = new ArrayList<WritableTxnMarkerResult>(0);
}
@Override
public short apiKey() {
return 27;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
{
if (_version >= 1) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field markers was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarkerResult> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarkerResult(_readable, _version));
}
this.markers = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field markers was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarkerResult> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarkerResult(_readable, _version));
}
this.markers = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 1) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version >= 1) {
_writable.writeUnsignedVarint(markers.size() + 1);
for (WritableTxnMarkerResult markersElement : markers) {
markersElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(markers.size());
for (WritableTxnMarkerResult markersElement : markers) {
markersElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 1) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(markers.size() + 1));
} else {
_size.addBytes(4);
}
for (WritableTxnMarkerResult markersElement : markers) {
markersElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof WriteTxnMarkersResponseData)) return false;
WriteTxnMarkersResponseData other = (WriteTxnMarkersResponseData) obj;
if (this.markers == null) {
if (other.markers != null) return false;
} else {
if (!this.markers.equals(other.markers)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (markers == null ? 0 : markers.hashCode());
return hashCode;
}
@Override
public WriteTxnMarkersResponseData duplicate() {
WriteTxnMarkersResponseData _duplicate = new WriteTxnMarkersResponseData();
ArrayList<WritableTxnMarkerResult> newMarkers = new ArrayList<WritableTxnMarkerResult>(markers.size());
for (WritableTxnMarkerResult _element : markers) {
newMarkers.add(_element.duplicate());
}
_duplicate.markers = newMarkers;
return _duplicate;
}
@Override
public String toString() {
return "WriteTxnMarkersResponseData("
+ "markers=" + MessageUtil.deepToString(markers.iterator())
+ ")";
}
public List<WritableTxnMarkerResult> markers() {
return this.markers;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public WriteTxnMarkersResponseData setMarkers(List<WritableTxnMarkerResult> v) {
this.markers = v;
return this;
}
public static class WritableTxnMarkerResult implements Message {
long producerId;
List<WritableTxnMarkerTopicResult> topics;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("producer_id", Type.INT64, "The current producer ID in use by the transactional ID."),
new Field("topics", new ArrayOf(WritableTxnMarkerTopicResult.SCHEMA_0), "The results by topic.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("producer_id", Type.INT64, "The current producer ID in use by the transactional ID."),
new Field("topics", new CompactArrayOf(WritableTxnMarkerTopicResult.SCHEMA_1), "The results by topic."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public WritableTxnMarkerResult(Readable _readable, short _version) {
read(_readable, _version);
}
public WritableTxnMarkerResult() {
this.producerId = 0L;
this.topics = new ArrayList<WritableTxnMarkerTopicResult>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of WritableTxnMarkerResult");
}
this.producerId = _readable.readLong();
{
if (_version >= 1) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarkerTopicResult> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarkerTopicResult(_readable, _version));
}
this.topics = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field topics was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarkerTopicResult> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarkerTopicResult(_readable, _version));
}
this.topics = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 1) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeLong(producerId);
if (_version >= 1) {
_writable.writeUnsignedVarint(topics.size() + 1);
for (WritableTxnMarkerTopicResult topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(topics.size());
for (WritableTxnMarkerTopicResult topicsElement : topics) {
topicsElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 1) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of WritableTxnMarkerResult");
}
_size.addBytes(8);
{
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1));
} else {
_size.addBytes(4);
}
for (WritableTxnMarkerTopicResult topicsElement : topics) {
topicsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof WritableTxnMarkerResult)) return false;
WritableTxnMarkerResult other = (WritableTxnMarkerResult) obj;
if (producerId != other.producerId) return false;
if (this.topics == null) {
if (other.topics != null) return false;
} else {
if (!this.topics.equals(other.topics)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId);
hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode());
return hashCode;
}
@Override
public WritableTxnMarkerResult duplicate() {
WritableTxnMarkerResult _duplicate = new WritableTxnMarkerResult();
_duplicate.producerId = producerId;
ArrayList<WritableTxnMarkerTopicResult> newTopics = new ArrayList<WritableTxnMarkerTopicResult>(topics.size());
for (WritableTxnMarkerTopicResult _element : topics) {
newTopics.add(_element.duplicate());
}
_duplicate.topics = newTopics;
return _duplicate;
}
@Override
public String toString() {
return "WritableTxnMarkerResult("
+ "producerId=" + producerId
+ ", topics=" + MessageUtil.deepToString(topics.iterator())
+ ")";
}
public long producerId() {
return this.producerId;
}
public List<WritableTxnMarkerTopicResult> topics() {
return this.topics;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public WritableTxnMarkerResult setProducerId(long v) {
this.producerId = v;
return this;
}
public WritableTxnMarkerResult setTopics(List<WritableTxnMarkerTopicResult> v) {
this.topics = v;
return this;
}
}
public static class WritableTxnMarkerTopicResult implements Message {
String name;
List<WritableTxnMarkerPartitionResult> partitions;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("name", Type.STRING, "The topic name."),
new Field("partitions", new ArrayOf(WritableTxnMarkerPartitionResult.SCHEMA_0), "The results by partition.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("name", Type.COMPACT_STRING, "The topic name."),
new Field("partitions", new CompactArrayOf(WritableTxnMarkerPartitionResult.SCHEMA_1), "The results by partition."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public WritableTxnMarkerTopicResult(Readable _readable, short _version) {
read(_readable, _version);
}
public WritableTxnMarkerTopicResult() {
this.name = "";
this.partitions = new ArrayList<WritableTxnMarkerPartitionResult>(0);
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of WritableTxnMarkerTopicResult");
}
{
int length;
if (_version >= 1) {
length = _readable.readUnsignedVarint() - 1;
} else {
length = _readable.readShort();
}
if (length < 0) {
throw new RuntimeException("non-nullable field name was serialized as null");
} else if (length > 0x7fff) {
throw new RuntimeException("string field name had invalid length " + length);
} else {
this.name = _readable.readString(length);
}
}
{
if (_version >= 1) {
int arrayLength;
arrayLength = _readable.readUnsignedVarint() - 1;
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarkerPartitionResult> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarkerPartitionResult(_readable, _version));
}
this.partitions = newCollection;
}
} else {
int arrayLength;
arrayLength = _readable.readInt();
if (arrayLength < 0) {
throw new RuntimeException("non-nullable field partitions was serialized as null");
} else {
if (arrayLength > _readable.remaining()) {
throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining.");
}
ArrayList<WritableTxnMarkerPartitionResult> newCollection = new ArrayList<>(arrayLength);
for (int i = 0; i < arrayLength; i++) {
newCollection.add(new WritableTxnMarkerPartitionResult(_readable, _version));
}
this.partitions = newCollection;
}
}
}
this._unknownTaggedFields = null;
if (_version >= 1) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
{
byte[] _stringBytes = _cache.getSerializedValue(name);
if (_version >= 1) {
_writable.writeUnsignedVarint(_stringBytes.length + 1);
} else {
_writable.writeShort((short) _stringBytes.length);
}
_writable.writeByteArray(_stringBytes);
}
if (_version >= 1) {
_writable.writeUnsignedVarint(partitions.size() + 1);
for (WritableTxnMarkerPartitionResult partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
} else {
_writable.writeInt(partitions.size());
for (WritableTxnMarkerPartitionResult partitionsElement : partitions) {
partitionsElement.write(_writable, _cache, _version);
}
}
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 1) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of WritableTxnMarkerTopicResult");
}
{
byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8);
if (_stringBytes.length > 0x7fff) {
throw new RuntimeException("'name' field is too long to be serialized");
}
_cache.cacheSerializedValue(name, _stringBytes);
if (_version >= 1) {
_size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1));
} else {
_size.addBytes(_stringBytes.length + 2);
}
}
{
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1));
} else {
_size.addBytes(4);
}
for (WritableTxnMarkerPartitionResult partitionsElement : partitions) {
partitionsElement.addSize(_size, _cache, _version);
}
}
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof WritableTxnMarkerTopicResult)) return false;
WritableTxnMarkerTopicResult other = (WritableTxnMarkerTopicResult) obj;
if (this.name == null) {
if (other.name != null) return false;
} else {
if (!this.name.equals(other.name)) return false;
}
if (this.partitions == null) {
if (other.partitions != null) return false;
} else {
if (!this.partitions.equals(other.partitions)) return false;
}
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode());
hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode());
return hashCode;
}
@Override
public WritableTxnMarkerTopicResult duplicate() {
WritableTxnMarkerTopicResult _duplicate = new WritableTxnMarkerTopicResult();
_duplicate.name = name;
ArrayList<WritableTxnMarkerPartitionResult> newPartitions = new ArrayList<WritableTxnMarkerPartitionResult>(partitions.size());
for (WritableTxnMarkerPartitionResult _element : partitions) {
newPartitions.add(_element.duplicate());
}
_duplicate.partitions = newPartitions;
return _duplicate;
}
@Override
public String toString() {
return "WritableTxnMarkerTopicResult("
+ "name=" + ((name == null) ? "null" : "'" + name.toString() + "'")
+ ", partitions=" + MessageUtil.deepToString(partitions.iterator())
+ ")";
}
public String name() {
return this.name;
}
public List<WritableTxnMarkerPartitionResult> partitions() {
return this.partitions;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public WritableTxnMarkerTopicResult setName(String v) {
this.name = v;
return this;
}
public WritableTxnMarkerTopicResult setPartitions(List<WritableTxnMarkerPartitionResult> v) {
this.partitions = v;
return this;
}
}
public static class WritableTxnMarkerPartitionResult implements Message {
int partitionIndex;
short errorCode;
private List<RawTaggedField> _unknownTaggedFields;
public static final Schema SCHEMA_0 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.")
);
public static final Schema SCHEMA_1 =
new Schema(
new Field("partition_index", Type.INT32, "The partition index."),
new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."),
TaggedFieldsSection.of(
)
);
public static final Schema[] SCHEMAS = new Schema[] {
SCHEMA_0,
SCHEMA_1
};
public static final short LOWEST_SUPPORTED_VERSION = 0;
public static final short HIGHEST_SUPPORTED_VERSION = 1;
public WritableTxnMarkerPartitionResult(Readable _readable, short _version) {
read(_readable, _version);
}
public WritableTxnMarkerPartitionResult() {
this.partitionIndex = 0;
this.errorCode = (short) 0;
}
@Override
public short lowestSupportedVersion() {
return 0;
}
@Override
public short highestSupportedVersion() {
return 1;
}
@Override
public void read(Readable _readable, short _version) {
if (_version > 1) {
throw new UnsupportedVersionException("Can't read version " + _version + " of WritableTxnMarkerPartitionResult");
}
this.partitionIndex = _readable.readInt();
this.errorCode = _readable.readShort();
this._unknownTaggedFields = null;
if (_version >= 1) {
int _numTaggedFields = _readable.readUnsignedVarint();
for (int _i = 0; _i < _numTaggedFields; _i++) {
int _tag = _readable.readUnsignedVarint();
int _size = _readable.readUnsignedVarint();
switch (_tag) {
default:
this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size);
break;
}
}
}
}
@Override
public void write(Writable _writable, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
_writable.writeInt(partitionIndex);
_writable.writeShort(errorCode);
RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields);
_numTaggedFields += _rawWriter.numFields();
if (_version >= 1) {
_writable.writeUnsignedVarint(_numTaggedFields);
_rawWriter.writeRawTags(_writable, Integer.MAX_VALUE);
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) {
int _numTaggedFields = 0;
if (_version > 1) {
throw new UnsupportedVersionException("Can't size version " + _version + " of WritableTxnMarkerPartitionResult");
}
_size.addBytes(4);
_size.addBytes(2);
if (_unknownTaggedFields != null) {
_numTaggedFields += _unknownTaggedFields.size();
for (RawTaggedField _field : _unknownTaggedFields) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag()));
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size()));
_size.addBytes(_field.size());
}
}
if (_version >= 1) {
_size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields));
} else {
if (_numTaggedFields > 0) {
throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them.");
}
}
}
@Override
public boolean equals(Object obj) {
if (!(obj instanceof WritableTxnMarkerPartitionResult)) return false;
WritableTxnMarkerPartitionResult other = (WritableTxnMarkerPartitionResult) obj;
if (partitionIndex != other.partitionIndex) return false;
if (errorCode != other.errorCode) return false;
return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields);
}
@Override
public int hashCode() {
int hashCode = 0;
hashCode = 31 * hashCode + partitionIndex;
hashCode = 31 * hashCode + errorCode;
return hashCode;
}
@Override
public WritableTxnMarkerPartitionResult duplicate() {
WritableTxnMarkerPartitionResult _duplicate = new WritableTxnMarkerPartitionResult();
_duplicate.partitionIndex = partitionIndex;
_duplicate.errorCode = errorCode;
return _duplicate;
}
@Override
public String toString() {
return "WritableTxnMarkerPartitionResult("
+ "partitionIndex=" + partitionIndex
+ ", errorCode=" + errorCode
+ ")";
}
public int partitionIndex() {
return this.partitionIndex;
}
public short errorCode() {
return this.errorCode;
}
@Override
public List<RawTaggedField> unknownTaggedFields() {
if (_unknownTaggedFields == null) {
_unknownTaggedFields = new ArrayList<>(0);
}
return _unknownTaggedFields;
}
public WritableTxnMarkerPartitionResult setPartitionIndex(int v) {
this.partitionIndex = v;
return this;
}
public WritableTxnMarkerPartitionResult setErrorCode(short v) {
this.errorCode = v;
return this;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/WriteTxnMarkersResponseDataJsonConverter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
package org.apache.kafka.common.message;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ArrayNode;
import com.fasterxml.jackson.databind.node.IntNode;
import com.fasterxml.jackson.databind.node.JsonNodeFactory;
import com.fasterxml.jackson.databind.node.LongNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.ShortNode;
import com.fasterxml.jackson.databind.node.TextNode;
import java.util.ArrayList;
import org.apache.kafka.common.protocol.MessageUtil;
import static org.apache.kafka.common.message.WriteTxnMarkersResponseData.*;
public class WriteTxnMarkersResponseDataJsonConverter {
public static WriteTxnMarkersResponseData read(JsonNode _node, short _version) {
WriteTxnMarkersResponseData _object = new WriteTxnMarkersResponseData();
JsonNode _markersNode = _node.get("markers");
if (_markersNode == null) {
throw new RuntimeException("WriteTxnMarkersResponseData: unable to locate field 'markers', which is mandatory in version " + _version);
} else {
if (!_markersNode.isArray()) {
throw new RuntimeException("WriteTxnMarkersResponseData expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<WritableTxnMarkerResult> _collection = new ArrayList<WritableTxnMarkerResult>(_markersNode.size());
_object.markers = _collection;
for (JsonNode _element : _markersNode) {
_collection.add(WritableTxnMarkerResultJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(WriteTxnMarkersResponseData _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
ArrayNode _markersArray = new ArrayNode(JsonNodeFactory.instance);
for (WritableTxnMarkerResult _element : _object.markers) {
_markersArray.add(WritableTxnMarkerResultJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("markers", _markersArray);
return _node;
}
public static JsonNode write(WriteTxnMarkersResponseData _object, short _version) {
return write(_object, _version, true);
}
public static class WritableTxnMarkerPartitionResultJsonConverter {
public static WritableTxnMarkerPartitionResult read(JsonNode _node, short _version) {
WritableTxnMarkerPartitionResult _object = new WritableTxnMarkerPartitionResult();
JsonNode _partitionIndexNode = _node.get("partitionIndex");
if (_partitionIndexNode == null) {
throw new RuntimeException("WritableTxnMarkerPartitionResult: unable to locate field 'partitionIndex', which is mandatory in version " + _version);
} else {
_object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "WritableTxnMarkerPartitionResult");
}
JsonNode _errorCodeNode = _node.get("errorCode");
if (_errorCodeNode == null) {
throw new RuntimeException("WritableTxnMarkerPartitionResult: unable to locate field 'errorCode', which is mandatory in version " + _version);
} else {
_object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "WritableTxnMarkerPartitionResult");
}
return _object;
}
public static JsonNode write(WritableTxnMarkerPartitionResult _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("partitionIndex", new IntNode(_object.partitionIndex));
_node.set("errorCode", new ShortNode(_object.errorCode));
return _node;
}
public static JsonNode write(WritableTxnMarkerPartitionResult _object, short _version) {
return write(_object, _version, true);
}
}
public static class WritableTxnMarkerResultJsonConverter {
public static WritableTxnMarkerResult read(JsonNode _node, short _version) {
WritableTxnMarkerResult _object = new WritableTxnMarkerResult();
JsonNode _producerIdNode = _node.get("producerId");
if (_producerIdNode == null) {
throw new RuntimeException("WritableTxnMarkerResult: unable to locate field 'producerId', which is mandatory in version " + _version);
} else {
_object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "WritableTxnMarkerResult");
}
JsonNode _topicsNode = _node.get("topics");
if (_topicsNode == null) {
throw new RuntimeException("WritableTxnMarkerResult: unable to locate field 'topics', which is mandatory in version " + _version);
} else {
if (!_topicsNode.isArray()) {
throw new RuntimeException("WritableTxnMarkerResult expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<WritableTxnMarkerTopicResult> _collection = new ArrayList<WritableTxnMarkerTopicResult>(_topicsNode.size());
_object.topics = _collection;
for (JsonNode _element : _topicsNode) {
_collection.add(WritableTxnMarkerTopicResultJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(WritableTxnMarkerResult _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("producerId", new LongNode(_object.producerId));
ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance);
for (WritableTxnMarkerTopicResult _element : _object.topics) {
_topicsArray.add(WritableTxnMarkerTopicResultJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("topics", _topicsArray);
return _node;
}
public static JsonNode write(WritableTxnMarkerResult _object, short _version) {
return write(_object, _version, true);
}
}
public static class WritableTxnMarkerTopicResultJsonConverter {
public static WritableTxnMarkerTopicResult read(JsonNode _node, short _version) {
WritableTxnMarkerTopicResult _object = new WritableTxnMarkerTopicResult();
JsonNode _nameNode = _node.get("name");
if (_nameNode == null) {
throw new RuntimeException("WritableTxnMarkerTopicResult: unable to locate field 'name', which is mandatory in version " + _version);
} else {
if (!_nameNode.isTextual()) {
throw new RuntimeException("WritableTxnMarkerTopicResult expected a string type, but got " + _node.getNodeType());
}
_object.name = _nameNode.asText();
}
JsonNode _partitionsNode = _node.get("partitions");
if (_partitionsNode == null) {
throw new RuntimeException("WritableTxnMarkerTopicResult: unable to locate field 'partitions', which is mandatory in version " + _version);
} else {
if (!_partitionsNode.isArray()) {
throw new RuntimeException("WritableTxnMarkerTopicResult expected a JSON array, but got " + _node.getNodeType());
}
ArrayList<WritableTxnMarkerPartitionResult> _collection = new ArrayList<WritableTxnMarkerPartitionResult>(_partitionsNode.size());
_object.partitions = _collection;
for (JsonNode _element : _partitionsNode) {
_collection.add(WritableTxnMarkerPartitionResultJsonConverter.read(_element, _version));
}
}
return _object;
}
public static JsonNode write(WritableTxnMarkerTopicResult _object, short _version, boolean _serializeRecords) {
ObjectNode _node = new ObjectNode(JsonNodeFactory.instance);
_node.set("name", new TextNode(_object.name));
ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance);
for (WritableTxnMarkerPartitionResult _element : _object.partitions) {
_partitionsArray.add(WritableTxnMarkerPartitionResultJsonConverter.write(_element, _version, _serializeRecords));
}
_node.set("partitions", _partitionsArray);
return _node;
}
public static JsonNode write(WritableTxnMarkerTopicResult _object, short _version) {
return write(_object, _version, true);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/CompoundStat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import org.apache.kafka.common.MetricName;
import java.util.List;
/**
* A compound stat is a stat where a single measurement and associated data structure feeds many metrics. This is the
* example for a histogram which has many associated percentiles.
*/
public interface CompoundStat extends Stat {
List<NamedMeasurable> stats();
class NamedMeasurable {
private final MetricName name;
private final Measurable stat;
public NamedMeasurable(MetricName name, Measurable stat) {
super();
this.name = name;
this.stat = stat;
}
public MetricName name() {
return name;
}
public Measurable stat() {
return stat;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/Gauge.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
/**
* A gauge metric is an instantaneous reading of a particular value.
*/
@FunctionalInterface
public interface Gauge<T> extends MetricValueProvider<T> {
/**
* Returns the current value associated with this gauge.
* @param config The configuration for this metric
* @param now The POSIX time in milliseconds the measurement is being taken
*/
T value(MetricConfig config, long now);
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/JmxReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.utils.ConfigUtils;
import org.apache.kafka.common.utils.Sanitizer;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.management.Attribute;
import javax.management.AttributeList;
import javax.management.AttributeNotFoundException;
import javax.management.DynamicMBean;
import javax.management.JMException;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
import javax.management.MBeanServer;
import javax.management.MalformedObjectNameException;
import javax.management.ObjectName;
import java.lang.management.ManagementFactory;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.function.Predicate;
import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException;
/**
* Register metrics in JMX as dynamic mbeans based on the metric names
*/
public class JmxReporter implements MetricsReporter {
public static final String METRICS_CONFIG_PREFIX = "metrics.jmx.";
public static final String EXCLUDE_CONFIG = METRICS_CONFIG_PREFIX + "exclude";
public static final String EXCLUDE_CONFIG_ALIAS = METRICS_CONFIG_PREFIX + "blacklist";
public static final String INCLUDE_CONFIG = METRICS_CONFIG_PREFIX + "include";
public static final String INCLUDE_CONFIG_ALIAS = METRICS_CONFIG_PREFIX + "whitelist";
public static final Set<String> RECONFIGURABLE_CONFIGS = Utils.mkSet(INCLUDE_CONFIG,
INCLUDE_CONFIG_ALIAS,
EXCLUDE_CONFIG,
EXCLUDE_CONFIG_ALIAS);
public static final String DEFAULT_INCLUDE = ".*";
public static final String DEFAULT_EXCLUDE = "";
private static final Logger log = LoggerFactory.getLogger(JmxReporter.class);
private static final Object LOCK = new Object();
private String prefix;
private final Map<String, KafkaMbean> mbeans = new HashMap<>();
private Predicate<String> mbeanPredicate = s -> true;
public JmxReporter() {
this("");
}
/**
* Create a JMX reporter that prefixes all metrics with the given string.
* @deprecated Since 2.6.0. Use {@link JmxReporter#JmxReporter()}
* Initialize JmxReporter with {@link JmxReporter#contextChange(MetricsContext)}
* Populate prefix by adding _namespace/prefix key value pair to {@link MetricsContext}
*/
@Deprecated
public JmxReporter(String prefix) {
this.prefix = prefix != null ? prefix : "";
}
@Override
public void configure(Map<String, ?> configs) {
reconfigure(configs);
}
@Override
public Set<String> reconfigurableConfigs() {
return RECONFIGURABLE_CONFIGS;
}
@Override
public void validateReconfiguration(Map<String, ?> configs) throws ConfigException {
compilePredicate(configs);
}
@Override
public void reconfigure(Map<String, ?> configs) {
synchronized (LOCK) {
this.mbeanPredicate = JmxReporter.compilePredicate(configs);
mbeans.forEach((name, mbean) -> {
if (mbeanPredicate.test(name)) {
reregister(mbean);
} else {
unregister(mbean);
}
});
}
}
@Override
public void init(List<KafkaMetric> metrics) {
synchronized (LOCK) {
for (KafkaMetric metric : metrics)
addAttribute(metric);
mbeans.forEach((name, mbean) -> {
if (mbeanPredicate.test(name)) {
reregister(mbean);
}
});
}
}
public boolean containsMbean(String mbeanName) {
return mbeans.containsKey(mbeanName);
}
@Override
public void metricChange(KafkaMetric metric) {
synchronized (LOCK) {
String mbeanName = addAttribute(metric);
if (mbeanName != null && mbeanPredicate.test(mbeanName)) {
reregister(mbeans.get(mbeanName));
}
}
}
@Override
public void metricRemoval(KafkaMetric metric) {
synchronized (LOCK) {
MetricName metricName = metric.metricName();
String mBeanName = getMBeanName(prefix, metricName);
KafkaMbean mbean = removeAttribute(metric, mBeanName);
if (mbean != null) {
if (mbean.metrics.isEmpty()) {
unregister(mbean);
mbeans.remove(mBeanName);
} else if (mbeanPredicate.test(mBeanName))
reregister(mbean);
}
}
}
private KafkaMbean removeAttribute(KafkaMetric metric, String mBeanName) {
MetricName metricName = metric.metricName();
KafkaMbean mbean = this.mbeans.get(mBeanName);
if (mbean != null)
mbean.removeAttribute(metricName.name());
return mbean;
}
private String addAttribute(KafkaMetric metric) {
try {
MetricName metricName = metric.metricName();
String mBeanName = getMBeanName(prefix, metricName);
if (!this.mbeans.containsKey(mBeanName))
mbeans.put(mBeanName, new KafkaMbean(mBeanName));
KafkaMbean mbean = this.mbeans.get(mBeanName);
mbean.setAttribute(metricName.name(), metric);
return mBeanName;
} catch (JMException e) {
throw new KafkaException("Error creating mbean attribute for metricName :" + metric.metricName(), e);
}
}
/**
* @param metricName
* @return standard JMX MBean name in the following format domainName:type=metricType,key1=val1,key2=val2
*/
static String getMBeanName(String prefix, MetricName metricName) {
StringBuilder mBeanName = new StringBuilder();
mBeanName.append(prefix);
mBeanName.append(":type=");
mBeanName.append(metricName.group());
for (Map.Entry<String, String> entry : metricName.tags().entrySet()) {
if (entry.getKey().length() <= 0 || entry.getValue().length() <= 0)
continue;
mBeanName.append(",");
mBeanName.append(entry.getKey());
mBeanName.append("=");
mBeanName.append(Sanitizer.jmxSanitize(entry.getValue()));
}
return mBeanName.toString();
}
public void close() {
synchronized (LOCK) {
for (KafkaMbean mbean : this.mbeans.values())
unregister(mbean);
}
}
private void unregister(KafkaMbean mbean) {
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
try {
if (server.isRegistered(mbean.name()))
server.unregisterMBean(mbean.name());
} catch (JMException e) {
throw new KafkaException("Error unregistering mbean", e);
}
}
private void reregister(KafkaMbean mbean) {
unregister(mbean);
try {
ManagementFactory.getPlatformMBeanServer().registerMBean(mbean, mbean.name());
} catch (JMException e) {
throw new KafkaException("Error registering mbean " + mbean.name(), e);
}
}
private static class KafkaMbean implements DynamicMBean {
private final ObjectName objectName;
private final Map<String, KafkaMetric> metrics;
KafkaMbean(String mbeanName) throws MalformedObjectNameException {
this.metrics = new HashMap<>();
this.objectName = new ObjectName(mbeanName);
}
public ObjectName name() {
return objectName;
}
void setAttribute(String name, KafkaMetric metric) {
this.metrics.put(name, metric);
}
@Override
public Object getAttribute(String name) throws AttributeNotFoundException {
if (this.metrics.containsKey(name))
return this.metrics.get(name).metricValue();
else
throw new AttributeNotFoundException("Could not find attribute " + name);
}
@Override
public AttributeList getAttributes(String[] names) {
AttributeList list = new AttributeList();
for (String name : names) {
try {
list.add(new Attribute(name, getAttribute(name)));
} catch (Exception e) {
log.warn("Error getting JMX attribute '{}'", name, e);
}
}
return list;
}
KafkaMetric removeAttribute(String name) {
return this.metrics.remove(name);
}
@Override
public MBeanInfo getMBeanInfo() {
MBeanAttributeInfo[] attrs = new MBeanAttributeInfo[metrics.size()];
int i = 0;
for (Map.Entry<String, KafkaMetric> entry : this.metrics.entrySet()) {
String attribute = entry.getKey();
KafkaMetric metric = entry.getValue();
attrs[i] = new MBeanAttributeInfo(attribute,
double.class.getName(),
metric.metricName().description(),
true,
false,
false);
i += 1;
}
return new MBeanInfo(this.getClass().getName(), "", attrs, null, null, null);
}
@Override
public Object invoke(String name, Object[] params, String[] sig) {
throw new UnsupportedOperationException("Set not allowed.");
}
@Override
public void setAttribute(Attribute attribute) {
throw new UnsupportedOperationException("Set not allowed.");
}
@Override
public AttributeList setAttributes(AttributeList list) {
throw new UnsupportedOperationException("Set not allowed.");
}
}
public static Predicate<String> compilePredicate(Map<String, ?> originalConfig) {
Map<String, ?> configs = ConfigUtils.translateDeprecatedConfigs(
originalConfig, new String[][]{{INCLUDE_CONFIG, INCLUDE_CONFIG_ALIAS},
{EXCLUDE_CONFIG, EXCLUDE_CONFIG_ALIAS}});
String include = (String) configs.get(INCLUDE_CONFIG);
String exclude = (String) configs.get(EXCLUDE_CONFIG);
if (include == null) {
include = DEFAULT_INCLUDE;
}
if (exclude == null) {
exclude = DEFAULT_EXCLUDE;
}
try {
Pattern includePattern = Pattern.compile(include);
Pattern excludePattern = Pattern.compile(exclude);
return s -> includePattern.matcher(s).matches()
&& !excludePattern.matcher(s).matches();
} catch (PatternSyntaxException e) {
throw new ConfigException("JMX filter for configuration" + METRICS_CONFIG_PREFIX
+ ".(include/exclude) is not a valid regular expression");
}
}
@Override
public void contextChange(MetricsContext metricsContext) {
String namespace = metricsContext.contextLabels().get(MetricsContext.NAMESPACE);
Objects.requireNonNull(namespace);
synchronized (LOCK) {
if (!mbeans.isEmpty()) {
throw new IllegalStateException("JMX MetricsContext can only be updated before JMX metrics are created");
}
// prevent prefix from getting reset back to empty for backwards compatibility
// with the deprecated JmxReporter(String prefix) constructor, in case contextChange gets called
// via one of the Metrics() constructor with a default empty MetricsContext()
if (namespace.isEmpty()) {
return;
}
prefix = namespace;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/KafkaMetric.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.utils.Time;
public final class KafkaMetric implements Metric {
private MetricName metricName;
private final Object lock;
private final Time time;
private final MetricValueProvider<?> metricValueProvider;
private MetricConfig config;
// public for testing
public KafkaMetric(Object lock, MetricName metricName, MetricValueProvider<?> valueProvider,
MetricConfig config, Time time) {
this.metricName = metricName;
this.lock = lock;
if (!(valueProvider instanceof Measurable) && !(valueProvider instanceof Gauge))
throw new IllegalArgumentException("Unsupported metric value provider of class " + valueProvider.getClass());
this.metricValueProvider = valueProvider;
this.config = config;
this.time = time;
}
public MetricConfig config() {
return this.config;
}
@Override
public MetricName metricName() {
return this.metricName;
}
@Override
public Object metricValue() {
long now = time.milliseconds();
synchronized (this.lock) {
if (this.metricValueProvider instanceof Measurable)
return ((Measurable) metricValueProvider).measure(config, now);
else if (this.metricValueProvider instanceof Gauge)
return ((Gauge<?>) metricValueProvider).value(config, now);
else
throw new IllegalStateException("Not a valid metric: " + this.metricValueProvider.getClass());
}
}
public Measurable measurable() {
if (this.metricValueProvider instanceof Measurable)
return (Measurable) metricValueProvider;
else
throw new IllegalStateException("Not a measurable: " + this.metricValueProvider.getClass());
}
double measurableValue(long timeMs) {
synchronized (this.lock) {
if (this.metricValueProvider instanceof Measurable)
return ((Measurable) metricValueProvider).measure(config, timeMs);
else
return 0;
}
}
public void config(MetricConfig config) {
synchronized (lock) {
this.config = config;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/KafkaMetricsContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* A implementation of MetricsContext, it encapsulates required metrics context properties for Kafka services and clients
*/
public class KafkaMetricsContext implements MetricsContext {
/**
* Client or Service's contextLabels map.
*/
private final Map<String, String> contextLabels = new HashMap<>();
/**
* Create a MetricsContext with namespace, no service or client properties
* @param namespace value for _namespace key
*/
public KafkaMetricsContext(String namespace) {
this(namespace, new HashMap<>());
}
/**
* Create a MetricsContext with namespace, service or client properties
* @param namespace value for _namespace key
* @param contextLabels contextLabels additional entries to add to the context.
* values will be converted to string using Object.toString()
*/
public KafkaMetricsContext(String namespace, Map<String, ?> contextLabels) {
this.contextLabels.put(MetricsContext.NAMESPACE, namespace);
contextLabels.forEach((key, value) -> this.contextLabels.put(key, value != null ? value.toString() : null));
}
@Override
public Map<String, String> contextLabels() {
return Collections.unmodifiableMap(contextLabels);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/Measurable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
/**
* A measurable quantity that can be registered as a metric
*/
public interface Measurable extends MetricValueProvider<Double> {
/**
* Measure this quantity and return the result as a double
* @param config The configuration for this metric
* @param now The POSIX time in milliseconds the measurement is being taken
* @return The measured value
*/
double measure(MetricConfig config, long now);
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/MeasurableStat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
/**
* A MeasurableStat is a {@link Stat} that is also {@link Measurable} (i.e. can produce a single floating point value).
* This is the interface used for most of the simple statistics such as {@link org.apache.kafka.common.metrics.stats.Avg},
* {@link org.apache.kafka.common.metrics.stats.Max}, {@link org.apache.kafka.common.metrics.stats.CumulativeCount}, etc.
*/
public interface MeasurableStat extends Stat, Measurable {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/MetricConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
/**
* Configuration values for metrics
*/
public class MetricConfig {
private Quota quota;
private int samples;
private long eventWindow;
private long timeWindowMs;
private Map<String, String> tags;
private Sensor.RecordingLevel recordingLevel;
public MetricConfig() {
this.quota = null;
this.samples = 2;
this.eventWindow = Long.MAX_VALUE;
this.timeWindowMs = TimeUnit.MILLISECONDS.convert(30, TimeUnit.SECONDS);
this.tags = new LinkedHashMap<>();
this.recordingLevel = Sensor.RecordingLevel.INFO;
}
public Quota quota() {
return this.quota;
}
public MetricConfig quota(Quota quota) {
this.quota = quota;
return this;
}
public long eventWindow() {
return eventWindow;
}
public MetricConfig eventWindow(long window) {
this.eventWindow = window;
return this;
}
public long timeWindowMs() {
return timeWindowMs;
}
public MetricConfig timeWindow(long window, TimeUnit unit) {
this.timeWindowMs = TimeUnit.MILLISECONDS.convert(window, unit);
return this;
}
public Map<String, String> tags() {
return this.tags;
}
public MetricConfig tags(Map<String, String> tags) {
this.tags = tags;
return this;
}
public int samples() {
return this.samples;
}
public MetricConfig samples(int samples) {
if (samples < 1)
throw new IllegalArgumentException("The number of samples must be at least 1.");
this.samples = samples;
return this;
}
public Sensor.RecordingLevel recordLevel() {
return this.recordingLevel;
}
public MetricConfig recordLevel(Sensor.RecordingLevel recordingLevel) {
this.recordingLevel = recordingLevel;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/MetricValueProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
/**
* Super-interface for {@link Measurable} or {@link Gauge} that provides
* metric values.
* <p>
* In the future for Java8 and above, {@link Gauge#value(MetricConfig, long)} will be
* moved to this interface with a default implementation in {@link Measurable} that returns
* {@link Measurable#measure(MetricConfig, long)}.
* </p>
*/
public interface MetricValueProvider<T> { }
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/Metrics.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.MetricNameTemplate;
import org.apache.kafka.common.metrics.internals.MetricsUtils;
import org.apache.kafka.common.utils.KafkaThread;
import org.apache.kafka.common.utils.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import static java.util.Collections.emptyList;
/**
* A registry of sensors and metrics.
* <p>
* A metric is a named, numerical measurement. A sensor is a handle to record numerical measurements as they occur. Each
* Sensor has zero or more associated metrics. For example a Sensor might represent message sizes and we might associate
* with this sensor a metric for the average, maximum, or other statistics computed off the sequence of message sizes
* that are recorded by the sensor.
* <p>
* Usage looks something like this:
*
* <pre>
* // set up metrics:
* Metrics metrics = new Metrics(); // this is the global repository of metrics and sensors
* Sensor sensor = metrics.sensor("message-sizes");
* MetricName metricName = new MetricName("message-size-avg", "producer-metrics");
* sensor.add(metricName, new Avg());
* metricName = new MetricName("message-size-max", "producer-metrics");
* sensor.add(metricName, new Max());
*
* // as messages are sent we record the sizes
* sensor.record(messageSize);
* </pre>
*/
public class Metrics implements Closeable {
private final MetricConfig config;
private final ConcurrentMap<MetricName, KafkaMetric> metrics;
private final ConcurrentMap<String, Sensor> sensors;
private final ConcurrentMap<Sensor, List<Sensor>> childrenSensors;
private final List<MetricsReporter> reporters;
private final Time time;
private final ScheduledThreadPoolExecutor metricsScheduler;
private static final Logger log = LoggerFactory.getLogger(Metrics.class);
/**
* Create a metrics repository with no metric reporters and default configuration.
* Expiration of Sensors is disabled.
*/
public Metrics() {
this(new MetricConfig());
}
/**
* Create a metrics repository with no metric reporters and default configuration.
* Expiration of Sensors is disabled.
*/
public Metrics(Time time) {
this(new MetricConfig(), new ArrayList<>(0), time);
}
/**
* Create a metrics repository with no metric reporters and the given default configuration.
* Expiration of Sensors is disabled.
*/
public Metrics(MetricConfig defaultConfig, Time time) {
this(defaultConfig, new ArrayList<>(0), time);
}
/**
* Create a metrics repository with no reporters and the given default config. This config will be used for any
* metric that doesn't override its own config. Expiration of Sensors is disabled.
* @param defaultConfig The default config to use for all metrics that don't override their config
*/
public Metrics(MetricConfig defaultConfig) {
this(defaultConfig, new ArrayList<>(0), Time.SYSTEM);
}
/**
* Create a metrics repository with a default config and the given metric reporters.
* Expiration of Sensors is disabled.
* @param defaultConfig The default config
* @param reporters The metrics reporters
* @param time The time instance to use with the metrics
*/
public Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time) {
this(defaultConfig, reporters, time, false);
}
/**
* Create a metrics repository with a default config, metric reporters and metric context
* Expiration of Sensors is disabled.
* @param defaultConfig The default config
* @param reporters The metrics reporters
* @param time The time instance to use with the metrics
* @param metricsContext The metricsContext to initialize metrics reporter with
*/
public Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, MetricsContext metricsContext) {
this(defaultConfig, reporters, time, false, metricsContext);
}
/**
* Create a metrics repository with a default config, given metric reporters and the ability to expire eligible sensors
* @param defaultConfig The default config
* @param reporters The metrics reporters
* @param time The time instance to use with the metrics
* @param enableExpiration true if the metrics instance can garbage collect inactive sensors, false otherwise
*/
public Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration) {
this(defaultConfig, reporters, time, enableExpiration, new KafkaMetricsContext(""));
}
/**
* Create a metrics repository with a default config, given metric reporters, the ability to expire eligible sensors
* and MetricContext
* @param defaultConfig The default config
* @param reporters The metrics reporters
* @param time The time instance to use with the metrics
* @param enableExpiration true if the metrics instance can garbage collect inactive sensors, false otherwise
* @param metricsContext The metricsContext to initialize metrics reporter with
*/
public Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration,
MetricsContext metricsContext) {
this.config = defaultConfig;
this.sensors = new ConcurrentHashMap<>();
this.metrics = new ConcurrentHashMap<>();
this.childrenSensors = new ConcurrentHashMap<>();
this.reporters = Objects.requireNonNull(reporters);
this.time = time;
for (MetricsReporter reporter : reporters) {
reporter.contextChange(metricsContext);
reporter.init(new ArrayList<>());
}
// Create the ThreadPoolExecutor only if expiration of Sensors is enabled.
if (enableExpiration) {
this.metricsScheduler = new ScheduledThreadPoolExecutor(1);
// Creating a daemon thread to not block shutdown
this.metricsScheduler.setThreadFactory(runnable -> KafkaThread.daemon("SensorExpiryThread", runnable));
this.metricsScheduler.scheduleAtFixedRate(new ExpireSensorTask(), 30, 30, TimeUnit.SECONDS);
} else {
this.metricsScheduler = null;
}
addMetric(metricName("count", "kafka-metrics-count", "total number of registered metrics"),
(config, now) -> metrics.size());
}
/**
* Create a MetricName with the given name, group, description and tags, plus default tags specified in the metric
* configuration. Tag in tags takes precedence if the same tag key is specified in the default metric configuration.
*
* @param name The name of the metric
* @param group logical group name of the metrics to which this metric belongs
* @param description A human-readable description to include in the metric
* @param tags additional key/value attributes of the metric
*/
public MetricName metricName(String name, String group, String description, Map<String, String> tags) {
Map<String, String> combinedTag = new LinkedHashMap<>(config.tags());
combinedTag.putAll(tags);
return new MetricName(name, group, description, combinedTag);
}
/**
* Create a MetricName with the given name, group, description, and default tags
* specified in the metric configuration.
*
* @param name The name of the metric
* @param group logical group name of the metrics to which this metric belongs
* @param description A human-readable description to include in the metric
*/
public MetricName metricName(String name, String group, String description) {
return metricName(name, group, description, new HashMap<>());
}
/**
* Create a MetricName with the given name, group and default tags specified in the metric configuration.
*
* @param name The name of the metric
* @param group logical group name of the metrics to which this metric belongs
*/
public MetricName metricName(String name, String group) {
return metricName(name, group, "", new HashMap<>());
}
/**
* Create a MetricName with the given name, group, description, and keyValue as tags, plus default tags specified in the metric
* configuration. Tag in keyValue takes precedence if the same tag key is specified in the default metric configuration.
*
* @param name The name of the metric
* @param group logical group name of the metrics to which this metric belongs
* @param description A human-readable description to include in the metric
* @param keyValue additional key/value attributes of the metric (must come in pairs)
*/
public MetricName metricName(String name, String group, String description, String... keyValue) {
return metricName(name, group, description, MetricsUtils.getTags(keyValue));
}
/**
* Create a MetricName with the given name, group and tags, plus default tags specified in the metric
* configuration. Tag in tags takes precedence if the same tag key is specified in the default metric configuration.
*
* @param name The name of the metric
* @param group logical group name of the metrics to which this metric belongs
* @param tags key/value attributes of the metric
*/
public MetricName metricName(String name, String group, Map<String, String> tags) {
return metricName(name, group, "", tags);
}
/**
* Use the specified domain and metric name templates to generate an HTML table documenting the metrics. A separate table section
* will be generated for each of the MBeans and the associated attributes. The MBean names are lexicographically sorted to
* determine the order of these sections. This order is therefore dependent upon the order of the
* tags in each {@link MetricNameTemplate}.
*
* @param domain the domain or prefix for the JMX MBean names; may not be null
* @param allMetrics the collection of all {@link MetricNameTemplate} instances each describing one metric; may not be null
* @return the string containing the HTML table; never null
*/
public static String toHtmlTable(String domain, Iterable<MetricNameTemplate> allMetrics) {
Map<String, Map<String, String>> beansAndAttributes = new TreeMap<>();
try (Metrics metrics = new Metrics()) {
for (MetricNameTemplate template : allMetrics) {
Map<String, String> tags = new LinkedHashMap<>();
for (String s : template.tags()) {
tags.put(s, "{" + s + "}");
}
MetricName metricName = metrics.metricName(template.name(), template.group(), template.description(), tags);
String mBeanName = JmxReporter.getMBeanName(domain, metricName);
if (!beansAndAttributes.containsKey(mBeanName)) {
beansAndAttributes.put(mBeanName, new TreeMap<>());
}
Map<String, String> attrAndDesc = beansAndAttributes.get(mBeanName);
if (!attrAndDesc.containsKey(template.name())) {
attrAndDesc.put(template.name(), template.description());
} else {
throw new IllegalArgumentException("mBean '" + mBeanName + "' attribute '" + template.name() + "' is defined twice.");
}
}
}
StringBuilder b = new StringBuilder();
b.append("<table class=\"data-table\"><tbody>\n");
for (Entry<String, Map<String, String>> e : beansAndAttributes.entrySet()) {
b.append("<tr>\n");
b.append("<td colspan=3 class=\"mbeanName\" style=\"background-color:#ccc; font-weight: bold;\">");
b.append(e.getKey());
b.append("</td>");
b.append("</tr>\n");
b.append("<tr>\n");
b.append("<th style=\"width: 90px\"></th>\n");
b.append("<th>Attribute name</th>\n");
b.append("<th>Description</th>\n");
b.append("</tr>\n");
for (Entry<String, String> e2 : e.getValue().entrySet()) {
b.append("<tr>\n");
b.append("<td></td>");
b.append("<td>");
b.append(e2.getKey());
b.append("</td>");
b.append("<td>");
b.append(e2.getValue());
b.append("</td>");
b.append("</tr>\n");
}
}
b.append("</tbody></table>");
return b.toString();
}
public MetricConfig config() {
return config;
}
/**
* Get the sensor with the given name if it exists
* @param name The name of the sensor
* @return Return the sensor or null if no such sensor exists
*/
public Sensor getSensor(String name) {
return this.sensors.get(Objects.requireNonNull(name));
}
/**
* Get or create a sensor with the given unique name and no parent sensors. This uses
* a default recording level of INFO.
* @param name The sensor name
* @return The sensor
*/
public Sensor sensor(String name) {
return this.sensor(name, Sensor.RecordingLevel.INFO);
}
/**
* Get or create a sensor with the given unique name and no parent sensors and with a given
* recording level.
* @param name The sensor name.
* @param recordingLevel The recording level.
* @return The sensor
*/
public Sensor sensor(String name, Sensor.RecordingLevel recordingLevel) {
return sensor(name, null, recordingLevel, (Sensor[]) null);
}
/**
* Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
* receive every value recorded with this sensor. This uses a default recording level of INFO.
* @param name The name of the sensor
* @param parents The parent sensors
* @return The sensor that is created
*/
public Sensor sensor(String name, Sensor... parents) {
return this.sensor(name, Sensor.RecordingLevel.INFO, parents);
}
/**
* Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
* receive every value recorded with this sensor.
* @param name The name of the sensor.
* @param parents The parent sensors.
* @param recordingLevel The recording level.
* @return The sensor that is created
*/
public Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents) {
return sensor(name, null, recordingLevel, parents);
}
/**
* Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
* receive every value recorded with this sensor. This uses a default recording level of INFO.
* @param name The name of the sensor
* @param config A default configuration to use for this sensor for metrics that don't have their own config
* @param parents The parent sensors
* @return The sensor that is created
*/
public synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents) {
return this.sensor(name, config, Sensor.RecordingLevel.INFO, parents);
}
/**
* Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
* receive every value recorded with this sensor.
* @param name The name of the sensor
* @param config A default configuration to use for this sensor for metrics that don't have their own config
* @param recordingLevel The recording level.
* @param parents The parent sensors
* @return The sensor that is created
*/
public synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents) {
return sensor(name, config, Long.MAX_VALUE, recordingLevel, parents);
}
/**
* Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
* receive every value recorded with this sensor.
* @param name The name of the sensor
* @param config A default configuration to use for this sensor for metrics that don't have their own config
* @param inactiveSensorExpirationTimeSeconds If no value if recorded on the Sensor for this duration of time,
* it is eligible for removal
* @param parents The parent sensors
* @param recordingLevel The recording level.
* @return The sensor that is created
*/
public synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents) {
Sensor s = getSensor(name);
if (s == null) {
s = new Sensor(this, name, parents, config == null ? this.config : config, time, inactiveSensorExpirationTimeSeconds, recordingLevel);
this.sensors.put(name, s);
if (parents != null) {
for (Sensor parent : parents) {
List<Sensor> children = childrenSensors.computeIfAbsent(parent, k -> new ArrayList<>());
children.add(s);
}
}
log.trace("Added sensor with name {}", name);
}
return s;
}
/**
* Get or create a sensor with the given unique name and zero or more parent sensors. All parent sensors will
* receive every value recorded with this sensor. This uses a default recording level of INFO.
* @param name The name of the sensor
* @param config A default configuration to use for this sensor for metrics that don't have their own config
* @param inactiveSensorExpirationTimeSeconds If no value if recorded on the Sensor for this duration of time,
* it is eligible for removal
* @param parents The parent sensors
* @return The sensor that is created
*/
public synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents) {
return this.sensor(name, config, inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel.INFO, parents);
}
/**
* Remove a sensor (if it exists), associated metrics and its children.
*
* @param name The name of the sensor to be removed
*/
public void removeSensor(String name) {
Sensor sensor = sensors.get(name);
if (sensor != null) {
List<Sensor> childSensors = null;
synchronized (sensor) {
synchronized (this) {
if (sensors.remove(name, sensor)) {
for (KafkaMetric metric : sensor.metrics())
removeMetric(metric.metricName());
log.trace("Removed sensor with name {}", name);
childSensors = childrenSensors.remove(sensor);
for (final Sensor parent : sensor.parents()) {
childrenSensors.getOrDefault(parent, emptyList()).remove(sensor);
}
}
}
}
if (childSensors != null) {
for (Sensor childSensor : childSensors)
removeSensor(childSensor.name());
}
}
}
/**
* Add a metric to monitor an object that implements measurable. This metric won't be associated with any sensor.
* This is a way to expose existing values as metrics.
*
* This method is kept for binary compatibility purposes, it has the same behaviour as
* {@link #addMetric(MetricName, MetricValueProvider)}.
*
* @param metricName The name of the metric
* @param measurable The measurable that will be measured by this metric
*/
public void addMetric(MetricName metricName, Measurable measurable) {
addMetric(metricName, null, measurable);
}
/**
* Add a metric to monitor an object that implements Measurable. This metric won't be associated with any sensor.
* This is a way to expose existing values as metrics.
*
* This method is kept for binary compatibility purposes, it has the same behaviour as
* {@link #addMetric(MetricName, MetricConfig, MetricValueProvider)}.
*
* @param metricName The name of the metric
* @param config The configuration to use when measuring this measurable
* @param measurable The measurable that will be measured by this metric
*/
public void addMetric(MetricName metricName, MetricConfig config, Measurable measurable) {
addMetric(metricName, config, (MetricValueProvider<?>) measurable);
}
/**
* Add a metric to monitor an object that implements MetricValueProvider. This metric won't be associated with any
* sensor. This is a way to expose existing values as metrics. User is expected to add any additional
* synchronization to update and access metric values, if required.
*
* @param metricName The name of the metric
* @param metricValueProvider The metric value provider associated with this metric
* @throws IllegalArgumentException if a metric with same name already exists.
*/
public void addMetric(MetricName metricName, MetricConfig config, MetricValueProvider<?> metricValueProvider) {
KafkaMetric m = new KafkaMetric(new Object(),
Objects.requireNonNull(metricName),
Objects.requireNonNull(metricValueProvider),
config == null ? this.config : config,
time);
KafkaMetric existingMetric = registerMetric(m);
if (existingMetric != null) {
throw new IllegalArgumentException("A metric named '" + metricName + "' already exists, can't register another one.");
}
}
/**
* Add a metric to monitor an object that implements MetricValueProvider. This metric won't be associated with any
* sensor. This is a way to expose existing values as metrics. User is expected to add any additional
* synchronization to update and access metric values, if required.
*
* @param metricName The name of the metric
* @param metricValueProvider The metric value provider associated with this metric
*/
public void addMetric(MetricName metricName, MetricValueProvider<?> metricValueProvider) {
addMetric(metricName, null, metricValueProvider);
}
/**
* Create or get an existing metric to monitor an object that implements MetricValueProvider.
* This metric won't be associated with any sensor. This is a way to expose existing values as metrics.
* This method takes care of synchronisation while updating/accessing metrics by concurrent threads.
*
* @param metricName The name of the metric
* @param metricValueProvider The metric value provider associated with this metric
* @return Existing KafkaMetric if already registered or else a newly created one
*/
public KafkaMetric addMetricIfAbsent(MetricName metricName, MetricConfig config, MetricValueProvider<?> metricValueProvider) {
KafkaMetric metric = new KafkaMetric(new Object(),
Objects.requireNonNull(metricName),
Objects.requireNonNull(metricValueProvider),
config == null ? this.config : config,
time);
KafkaMetric existingMetric = registerMetric(metric);
return existingMetric == null ? metric : existingMetric;
}
/**
* Remove a metric if it exists and return it. Return null otherwise. If a metric is removed, `metricRemoval`
* will be invoked for each reporter.
*
* @param metricName The name of the metric
* @return the removed `KafkaMetric` or null if no such metric exists
*/
public synchronized KafkaMetric removeMetric(MetricName metricName) {
KafkaMetric metric = this.metrics.remove(metricName);
if (metric != null) {
for (MetricsReporter reporter : reporters) {
try {
reporter.metricRemoval(metric);
} catch (Exception e) {
log.error("Error when removing metric from " + reporter.getClass().getName(), e);
}
}
log.trace("Removed metric named {}", metricName);
}
return metric;
}
/**
* Add a MetricReporter
*/
public synchronized void addReporter(MetricsReporter reporter) {
Objects.requireNonNull(reporter).init(new ArrayList<>(metrics.values()));
this.reporters.add(reporter);
}
/**
* Remove a MetricReporter
*/
public synchronized void removeReporter(MetricsReporter reporter) {
if (this.reporters.remove(reporter)) {
reporter.close();
}
}
/**
* Register a metric if not present or return the already existing metric with the same name.
* When a metric is newly registered, this method returns null
*
* @param metric The KafkaMetric to register
* @return the existing metric with the same name or null
*/
synchronized KafkaMetric registerMetric(KafkaMetric metric) {
MetricName metricName = metric.metricName();
KafkaMetric existingMetric = this.metrics.putIfAbsent(metricName, metric);
if (existingMetric != null) {
return existingMetric;
}
// newly added metric
for (MetricsReporter reporter : reporters) {
try {
reporter.metricChange(metric);
} catch (Exception e) {
log.error("Error when registering metric on " + reporter.getClass().getName(), e);
}
}
log.trace("Registered metric named {}", metricName);
return null;
}
/**
* Get all the metrics currently maintained indexed by metricName
*/
public Map<MetricName, KafkaMetric> metrics() {
return this.metrics;
}
public List<MetricsReporter> reporters() {
return this.reporters;
}
public KafkaMetric metric(MetricName metricName) {
return this.metrics.get(metricName);
}
/**
* This iterates over every Sensor and triggers a removeSensor if it has expired
* Package private for testing
*/
class ExpireSensorTask implements Runnable {
@Override
public void run() {
for (Map.Entry<String, Sensor> sensorEntry : sensors.entrySet()) {
// removeSensor also locks the sensor object. This is fine because synchronized is reentrant
// There is however a minor race condition here. Assume we have a parent sensor P and child sensor C.
// Calling record on C would cause a record on P as well.
// So expiration time for P == expiration time for C. If the record on P happens via C just after P is removed,
// that will cause C to also get removed.
// Since the expiration time is typically high it is not expected to be a significant concern
// and thus not necessary to optimize
synchronized (sensorEntry.getValue()) {
if (sensorEntry.getValue().hasExpired()) {
log.debug("Removing expired sensor {}", sensorEntry.getKey());
removeSensor(sensorEntry.getKey());
}
}
}
}
}
/* For testing use only. */
Map<Sensor, List<Sensor>> childrenSensors() {
return Collections.unmodifiableMap(childrenSensors);
}
public MetricName metricInstance(MetricNameTemplate template, String... keyValue) {
return metricInstance(template, MetricsUtils.getTags(keyValue));
}
public MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags) {
// check to make sure that the runtime defined tags contain all the template tags.
Set<String> runtimeTagKeys = new HashSet<>(tags.keySet());
runtimeTagKeys.addAll(config().tags().keySet());
Set<String> templateTagKeys = template.tags();
if (!runtimeTagKeys.equals(templateTagKeys)) {
throw new IllegalArgumentException("For '" + template.name() + "', runtime-defined metric tags do not match the tags in the template. "
+ "Runtime = " + runtimeTagKeys + " Template = " + templateTagKeys.toString());
}
return this.metricName(template.name(), template.group(), template.description(), tags);
}
/**
* Close this metrics repository.
*/
@Override
public void close() {
if (this.metricsScheduler != null) {
this.metricsScheduler.shutdown();
try {
this.metricsScheduler.awaitTermination(30, TimeUnit.SECONDS);
} catch (InterruptedException ex) {
// ignore and continue shutdown
Thread.currentThread().interrupt();
}
}
log.info("Metrics scheduler closed");
for (MetricsReporter reporter : reporters) {
try {
log.info("Closing reporter {}", reporter.getClass().getName());
reporter.close();
} catch (Exception e) {
log.error("Error when closing " + reporter.getClass().getName(), e);
}
}
log.info("Metrics reporters closed");
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/MetricsContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* MetricsContext encapsulates additional contextLabels about metrics exposed via a
* {@link org.apache.kafka.common.metrics.MetricsReporter}
*
* <p>The {@link #contextLabels()} map provides following information:
* <dl>
* <dt>in all components</dt>
* <dd>a {@code _namespace} field indicating the component exposing metrics
* e.g. kafka.server, kafka.consumer.
* The {@link JmxReporter} uses this as prefix for MBean names</dd>
*
* <dt>for clients and streams libraries</dt>
* <dd>any freeform fields passed in via
* client properties in the form of {@code metrics.context.<key>=<value>}</dd>
*
* <dt>for kafka brokers</dt>
* <dd>kafka.broker.id, kafka.cluster.id</dd>
*
* <dt>for connect workers</dt>
* <dd>connect.kafka.cluster.id, connect.group.id</dd>
* </dl>
*/
@InterfaceStability.Evolving
public interface MetricsContext {
/* predefined fields */
String NAMESPACE = "_namespace"; // metrics namespace, formerly jmx prefix
/**
* Returns the labels for this metrics context.
*
* @return the map of label keys and values; never null but possibly empty
*/
Map<String, String> contextLabels();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/MetricsReporter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.kafka.common.Reconfigurable;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.ConfigException;
/**
* A plugin interface to allow things to listen as new metrics are created so they can be reported.
* <p>
* Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information.
*/
public interface MetricsReporter extends Reconfigurable, AutoCloseable {
/**
* This is called when the reporter is first registered to initially register all existing metrics
* @param metrics All currently existing metrics
*/
void init(List<KafkaMetric> metrics);
/**
* This is called whenever a metric is updated or added
* @param metric
*/
void metricChange(KafkaMetric metric);
/**
* This is called whenever a metric is removed
* @param metric
*/
void metricRemoval(KafkaMetric metric);
/**
* Called when the metrics repository is closed.
*/
void close();
// default methods for backwards compatibility with reporters that only implement Configurable
default Set<String> reconfigurableConfigs() {
return Collections.emptySet();
}
default void validateReconfiguration(Map<String, ?> configs) throws ConfigException {
}
default void reconfigure(Map<String, ?> configs) {
}
/**
* Sets the context labels for the service or library exposing metrics. This will be called before {@link #init(List)} and may be called anytime after that.
*
* @param metricsContext the metric context
*/
@InterfaceStability.Evolving
default void contextChange(MetricsContext metricsContext) {
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/Quota.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
/**
* An upper or lower bound for metrics
*/
public final class Quota {
private final boolean upper;
private final double bound;
public Quota(double bound, boolean upper) {
this.bound = bound;
this.upper = upper;
}
public static Quota upperBound(double upperBound) {
return new Quota(upperBound, true);
}
public static Quota lowerBound(double lowerBound) {
return new Quota(lowerBound, false);
}
public boolean isUpperBound() {
return this.upper;
}
public double bound() {
return this.bound;
}
public boolean acceptable(double value) {
return (upper && value <= bound) || (!upper && value >= bound);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + (int) this.bound;
result = prime * result + (this.upper ? 1 : 0);
return result;
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (!(obj instanceof Quota))
return false;
Quota that = (Quota) obj;
return (that.bound == this.bound) && (that.upper == this.upper);
}
@Override
public String toString() {
return (upper ? "upper=" : "lower=") + bound;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/QuotaViolationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import org.apache.kafka.common.KafkaException;
/**
* Thrown when a sensor records a value that causes a metric to go outside the bounds configured as its quota
*/
public class QuotaViolationException extends KafkaException {
private static final long serialVersionUID = 1L;
private final KafkaMetric metric;
private final double value;
private final double bound;
public QuotaViolationException(KafkaMetric metric, double value, double bound) {
this.metric = metric;
this.value = value;
this.bound = bound;
}
public KafkaMetric metric() {
return metric;
}
public double value() {
return value;
}
public double bound() {
return bound;
}
@Override
public String toString() {
return getClass().getName()
+ ": '"
+ metric.metricName()
+ "' violated quota. Actual: "
+ value
+ ", Threshold: "
+ bound;
}
/* avoid the expensive and stack trace for quota violation exceptions */
@Override
public Throwable fillInStackTrace() {
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/Sensor.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
import java.util.function.Supplier;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.CompoundStat.NamedMeasurable;
import org.apache.kafka.common.metrics.stats.TokenBucket;
import org.apache.kafka.common.utils.Time;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import static java.util.Arrays.asList;
import static java.util.Collections.unmodifiableList;
/**
* A sensor applies a continuous sequence of numerical values to a set of associated metrics. For example a sensor on
* message size would record a sequence of message sizes using the {@link #record(double)} api and would maintain a set
* of metrics about request sizes such as the average or max.
*/
public final class Sensor {
private final Metrics registry;
private final String name;
private final Sensor[] parents;
private final List<StatAndConfig> stats;
private final Map<MetricName, KafkaMetric> metrics;
private final MetricConfig config;
private final Time time;
private volatile long lastRecordTime;
private final long inactiveSensorExpirationTimeMs;
private final Object metricLock;
private static class StatAndConfig {
private final Stat stat;
private final Supplier<MetricConfig> configSupplier;
StatAndConfig(Stat stat, Supplier<MetricConfig> configSupplier) {
this.stat = stat;
this.configSupplier = configSupplier;
}
public Stat stat() {
return stat;
}
public MetricConfig config() {
return configSupplier.get();
}
@Override
public String toString() {
return "StatAndConfig(stat=" + stat + ')';
}
}
public enum RecordingLevel {
INFO(0, "INFO"), DEBUG(1, "DEBUG"), TRACE(2, "TRACE");
private static final RecordingLevel[] ID_TO_TYPE;
private static final int MIN_RECORDING_LEVEL_KEY = 0;
public static final int MAX_RECORDING_LEVEL_KEY;
static {
int maxRL = -1;
for (RecordingLevel level : RecordingLevel.values()) {
maxRL = Math.max(maxRL, level.id);
}
RecordingLevel[] idToName = new RecordingLevel[maxRL + 1];
for (RecordingLevel level : RecordingLevel.values()) {
idToName[level.id] = level;
}
ID_TO_TYPE = idToName;
MAX_RECORDING_LEVEL_KEY = maxRL;
}
/** an english description of the api--this is for debugging and can change */
public final String name;
/** the permanent and immutable id of an API--this can't change ever */
public final short id;
RecordingLevel(int id, String name) {
this.id = (short) id;
this.name = name;
}
public static RecordingLevel forId(int id) {
if (id < MIN_RECORDING_LEVEL_KEY || id > MAX_RECORDING_LEVEL_KEY)
throw new IllegalArgumentException(String.format("Unexpected RecordLevel id `%d`, it should be between `%d` " +
"and `%d` (inclusive)", id, MIN_RECORDING_LEVEL_KEY, MAX_RECORDING_LEVEL_KEY));
return ID_TO_TYPE[id];
}
/** Case insensitive lookup by protocol name */
public static RecordingLevel forName(String name) {
return RecordingLevel.valueOf(name.toUpperCase(Locale.ROOT));
}
public boolean shouldRecord(final int configId) {
if (configId == INFO.id) {
return this.id == INFO.id;
} else if (configId == DEBUG.id) {
return this.id == INFO.id || this.id == DEBUG.id;
} else if (configId == TRACE.id) {
return true;
} else {
throw new IllegalStateException("Did not recognize recording level " + configId);
}
}
}
private final RecordingLevel recordingLevel;
Sensor(Metrics registry, String name, Sensor[] parents, MetricConfig config, Time time,
long inactiveSensorExpirationTimeSeconds, RecordingLevel recordingLevel) {
super();
this.registry = registry;
this.name = Objects.requireNonNull(name);
this.parents = parents == null ? new Sensor[0] : parents;
this.metrics = new LinkedHashMap<>();
this.stats = new ArrayList<>();
this.config = config;
this.time = time;
this.inactiveSensorExpirationTimeMs = TimeUnit.MILLISECONDS.convert(inactiveSensorExpirationTimeSeconds, TimeUnit.SECONDS);
this.lastRecordTime = time.milliseconds();
this.recordingLevel = recordingLevel;
this.metricLock = new Object();
checkForest(new HashSet<>());
}
/* Validate that this sensor doesn't end up referencing itself */
private void checkForest(Set<Sensor> sensors) {
if (!sensors.add(this))
throw new IllegalArgumentException("Circular dependency in sensors: " + name() + " is its own parent.");
for (Sensor parent : parents)
parent.checkForest(sensors);
}
/**
* The name this sensor is registered with. This name will be unique among all registered sensors.
*/
public String name() {
return this.name;
}
List<Sensor> parents() {
return unmodifiableList(asList(parents));
}
/**
* @return true if the sensor's record level indicates that the metric will be recorded, false otherwise
*/
public boolean shouldRecord() {
return this.recordingLevel.shouldRecord(config.recordLevel().id);
}
/**
* Record an occurrence, this is just short-hand for {@link #record(double) record(1.0)}
*/
public void record() {
if (shouldRecord()) {
recordInternal(1.0d, time.milliseconds(), true);
}
}
/**
* Record a value with this sensor
* @param value The value to record
* @throws QuotaViolationException if recording this value moves a metric beyond its configured maximum or minimum
* bound
*/
public void record(double value) {
if (shouldRecord()) {
recordInternal(value, time.milliseconds(), true);
}
}
/**
* Record a value at a known time. This method is slightly faster than {@link #record(double)} since it will reuse
* the time stamp.
* @param value The value we are recording
* @param timeMs The current POSIX time in milliseconds
* @throws QuotaViolationException if recording this value moves a metric beyond its configured maximum or minimum
* bound
*/
public void record(double value, long timeMs) {
if (shouldRecord()) {
recordInternal(value, timeMs, true);
}
}
/**
* Record a value at a known time. This method is slightly faster than {@link #record(double)} since it will reuse
* the time stamp.
* @param value The value we are recording
* @param timeMs The current POSIX time in milliseconds
* @param checkQuotas Indicate if quota must be enforced or not
* @throws QuotaViolationException if recording this value moves a metric beyond its configured maximum or minimum
* bound
*/
public void record(double value, long timeMs, boolean checkQuotas) {
if (shouldRecord()) {
recordInternal(value, timeMs, checkQuotas);
}
}
private void recordInternal(double value, long timeMs, boolean checkQuotas) {
this.lastRecordTime = timeMs;
synchronized (this) {
synchronized (metricLock()) {
// increment all the stats
for (StatAndConfig statAndConfig : this.stats) {
statAndConfig.stat.record(statAndConfig.config(), value, timeMs);
}
}
if (checkQuotas)
checkQuotas(timeMs);
}
for (Sensor parent : parents)
parent.record(value, timeMs, checkQuotas);
}
/**
* Check if we have violated our quota for any metric that has a configured quota
*/
public void checkQuotas() {
checkQuotas(time.milliseconds());
}
public void checkQuotas(long timeMs) {
for (KafkaMetric metric : this.metrics.values()) {
MetricConfig config = metric.config();
if (config != null) {
Quota quota = config.quota();
if (quota != null) {
double value = metric.measurableValue(timeMs);
if (metric.measurable() instanceof TokenBucket) {
if (value < 0) {
throw new QuotaViolationException(metric, value, quota.bound());
}
} else {
if (!quota.acceptable(value)) {
throw new QuotaViolationException(metric, value, quota.bound());
}
}
}
}
}
}
/**
* Register a compound statistic with this sensor with no config override
* @param stat The stat to register
* @return true if stat is added to sensor, false if sensor is expired
*/
public boolean add(CompoundStat stat) {
return add(stat, null);
}
/**
* Register a compound statistic with this sensor which yields multiple measurable quantities (like a histogram)
* @param stat The stat to register
* @param config The configuration for this stat. If null then the stat will use the default configuration for this
* sensor.
* @return true if stat is added to sensor, false if sensor is expired
*/
public synchronized boolean add(CompoundStat stat, MetricConfig config) {
if (hasExpired())
return false;
final MetricConfig statConfig = config == null ? this.config : config;
stats.add(new StatAndConfig(Objects.requireNonNull(stat), () -> statConfig));
Object lock = metricLock();
for (NamedMeasurable m : stat.stats()) {
final KafkaMetric metric = new KafkaMetric(lock, m.name(), m.stat(), statConfig, time);
if (!metrics.containsKey(metric.metricName())) {
KafkaMetric existingMetric = registry.registerMetric(metric);
if (existingMetric != null) {
throw new IllegalArgumentException("A metric named '" + metric.metricName() + "' already exists, can't register another one.");
}
metrics.put(metric.metricName(), metric);
}
}
return true;
}
/**
* Register a metric with this sensor
* @param metricName The name of the metric
* @param stat The statistic to keep
* @return true if metric is added to sensor, false if sensor is expired
*/
public boolean add(MetricName metricName, MeasurableStat stat) {
return add(metricName, stat, null);
}
/**
* Register a metric with this sensor
*
* @param metricName The name of the metric
* @param stat The statistic to keep
* @param config A special configuration for this metric. If null use the sensor default configuration.
* @return true if metric is added to sensor, false if sensor is expired
*/
public synchronized boolean add(final MetricName metricName, final MeasurableStat stat, final MetricConfig config) {
if (hasExpired()) {
return false;
} else if (metrics.containsKey(metricName)) {
return true;
} else {
final MetricConfig statConfig = config == null ? this.config : config;
final KafkaMetric metric = new KafkaMetric(
metricLock(),
Objects.requireNonNull(metricName),
Objects.requireNonNull(stat),
statConfig,
time
);
KafkaMetric existingMetric = registry.registerMetric(metric);
if (existingMetric != null) {
throw new IllegalArgumentException("A metric named '" + metricName + "' already exists, can't register another one.");
}
metrics.put(metric.metricName(), metric);
stats.add(new StatAndConfig(Objects.requireNonNull(stat), metric::config));
return true;
}
}
/**
* Return if metrics were registered with this sensor.
*
* @return true if metrics were registered, false otherwise
*/
public synchronized boolean hasMetrics() {
return !metrics.isEmpty();
}
/**
* Return true if the Sensor is eligible for removal due to inactivity.
* false otherwise
*/
public boolean hasExpired() {
return (time.milliseconds() - this.lastRecordTime) > this.inactiveSensorExpirationTimeMs;
}
synchronized List<KafkaMetric> metrics() {
return unmodifiableList(new ArrayList<>(this.metrics.values()));
}
/**
* KafkaMetrics of sensors which use SampledStat should be synchronized on the same lock
* for sensor record and metric value read to allow concurrent reads and updates. For simplicity,
* all sensors are synchronized on this object.
* <p>
* Sensor object is not used as a lock for reading metric value since metrics reporter is
* invoked while holding Sensor and Metrics locks to report addition and removal of metrics
* and synchronized reporters may deadlock if Sensor lock is used for reading metrics values.
* Note that Sensor object itself is used as a lock to protect the access to stats and metrics
* while recording metric values, adding and deleting sensors.
* </p><p>
* Locking order (assume all MetricsReporter methods may be synchronized):
* <ul>
* <li>Sensor#add: Sensor -> Metrics -> MetricsReporter</li>
* <li>Metrics#removeSensor: Sensor -> Metrics -> MetricsReporter</li>
* <li>KafkaMetric#metricValue: MetricsReporter -> Sensor#metricLock</li>
* <li>Sensor#record: Sensor -> Sensor#metricLock</li>
* </ul>
* </p>
*/
private Object metricLock() {
return metricLock;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/Stat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics;
/**
* A Stat is a quantity such as average, max, etc that is computed off the stream of updates to a sensor
*/
public interface Stat {
/**
* Record the given value
* @param config The configuration to use for this metric
* @param value The value to record
* @param timeMs The POSIX time in milliseconds this value occurred
*/
void record(MetricConfig config, double value, long timeMs);
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides the API used by Kafka clients to emit metrics which are then exposed using the * {@link org.apache.kafka.common.metrics.MetricsReporter} interface.
*/
package org.apache.kafka.common.metrics; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/internals/IntGaugeSuite.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.internals;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.Gauge;
import org.apache.kafka.common.metrics.MetricConfig;
import org.apache.kafka.common.metrics.MetricValueProvider;
import org.apache.kafka.common.metrics.Metrics;
import org.slf4j.Logger;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedDeque;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.function.Function;
/**
* Manages a suite of integer Gauges.
*/
public final class IntGaugeSuite<K> implements AutoCloseable {
/**
* The log4j logger.
*/
private final Logger log;
/**
* The name of this suite.
*/
private final String suiteName;
/**
* The metrics object to use.
*/
private final Metrics metrics;
/**
* A user-supplied callback which translates keys into unique metric names.
*/
private final Function<K, MetricName> metricNameCalculator;
/**
* The maximum number of gauges that we will ever create at once.
*/
private final int maxEntries;
/**
* A map from keys to gauges. Protected by the object monitor.
*/
private final Map<K, StoredIntGauge> gauges;
/**
* The keys of gauges that can be removed, since their value is zero.
* Protected by the object monitor.
*/
private final Set<K> removable;
/**
* A lockless list of pending metrics additions and removals.
*/
private final ConcurrentLinkedDeque<PendingMetricsChange> pending;
/**
* A lock which serializes modifications to metrics. This lock is not
* required to create a new pending operation.
*/
private final Lock modifyMetricsLock;
/**
* True if this suite is closed. Protected by the object monitor.
*/
private boolean closed;
/**
* A pending metrics addition or removal.
*/
private static class PendingMetricsChange {
/**
* The name of the metric to add or remove.
*/
private final MetricName metricName;
/**
* In an addition, this field is the MetricValueProvider to add.
* In a removal, this field is null.
*/
private final MetricValueProvider<?> provider;
PendingMetricsChange(MetricName metricName, MetricValueProvider<?> provider) {
this.metricName = metricName;
this.provider = provider;
}
}
/**
* The gauge object which we register with the metrics system.
*/
private static class StoredIntGauge implements Gauge<Integer> {
private final MetricName metricName;
private int value;
StoredIntGauge(MetricName metricName) {
this.metricName = metricName;
this.value = 1;
}
/**
* This callback is invoked when the metrics system retrieves the value of this gauge.
*/
@Override
public synchronized Integer value(MetricConfig config, long now) {
return value;
}
synchronized int increment() {
return ++value;
}
synchronized int decrement() {
return --value;
}
synchronized int value() {
return value;
}
}
public IntGaugeSuite(Logger log,
String suiteName,
Metrics metrics,
Function<K, MetricName> metricNameCalculator,
int maxEntries) {
this.log = log;
this.suiteName = suiteName;
this.metrics = metrics;
this.metricNameCalculator = metricNameCalculator;
this.maxEntries = maxEntries;
this.gauges = new HashMap<>(1);
this.removable = new HashSet<>();
this.pending = new ConcurrentLinkedDeque<>();
this.modifyMetricsLock = new ReentrantLock();
this.closed = false;
log.trace("{}: created new gauge suite with maxEntries = {}.",
suiteName, maxEntries);
}
public void increment(K key) {
synchronized (this) {
if (closed) {
log.warn("{}: Attempted to increment {}, but the GaugeSuite was closed.",
suiteName, key.toString());
return;
}
StoredIntGauge gauge = gauges.get(key);
if (gauge != null) {
// Fast path: increment the existing counter.
if (gauge.increment() > 0) {
removable.remove(key);
}
return;
}
if (gauges.size() == maxEntries) {
if (removable.isEmpty()) {
log.debug("{}: Attempted to increment {}, but there are already {} entries.",
suiteName, key.toString(), maxEntries);
return;
}
Iterator<K> iter = removable.iterator();
K keyToRemove = iter.next();
iter.remove();
MetricName metricNameToRemove = gauges.get(keyToRemove).metricName;
gauges.remove(keyToRemove);
pending.push(new PendingMetricsChange(metricNameToRemove, null));
log.trace("{}: Removing the metric {}, which has a value of 0.",
suiteName, keyToRemove.toString());
}
MetricName metricNameToAdd = metricNameCalculator.apply(key);
gauge = new StoredIntGauge(metricNameToAdd);
gauges.put(key, gauge);
pending.push(new PendingMetricsChange(metricNameToAdd, gauge));
log.trace("{}: Adding a new metric {}.", suiteName, key.toString());
}
// Drop the object monitor and perform any pending metrics additions or removals.
performPendingMetricsOperations();
}
/**
* Perform pending metrics additions or removals.
* It is important to perform them in order. For example, we don't want to try
* to remove a metric that we haven't finished adding yet.
*/
private void performPendingMetricsOperations() {
modifyMetricsLock.lock();
try {
log.trace("{}: entering performPendingMetricsOperations", suiteName);
for (PendingMetricsChange change = pending.pollLast();
change != null;
change = pending.pollLast()) {
if (change.provider == null) {
if (log.isTraceEnabled()) {
log.trace("{}: removing metric {}", suiteName, change.metricName);
}
metrics.removeMetric(change.metricName);
} else {
if (log.isTraceEnabled()) {
log.trace("{}: adding metric {}", suiteName, change.metricName);
}
metrics.addMetric(change.metricName, change.provider);
}
}
log.trace("{}: leaving performPendingMetricsOperations", suiteName);
} finally {
modifyMetricsLock.unlock();
}
}
public synchronized void decrement(K key) {
if (closed) {
log.warn("{}: Attempted to decrement {}, but the gauge suite was closed.",
suiteName, key.toString());
return;
}
StoredIntGauge gauge = gauges.get(key);
if (gauge == null) {
log.debug("{}: Attempted to decrement {}, but no such metric was registered.",
suiteName, key.toString());
} else {
int cur = gauge.decrement();
log.trace("{}: Removed a reference to {}. {} reference(s) remaining.",
suiteName, key.toString(), cur);
if (cur <= 0) {
removable.add(key);
}
}
}
@Override
public synchronized void close() {
if (closed) {
log.trace("{}: gauge suite is already closed.", suiteName);
return;
}
closed = true;
int prevSize = 0;
for (Iterator<StoredIntGauge> iter = gauges.values().iterator(); iter.hasNext(); ) {
pending.push(new PendingMetricsChange(iter.next().metricName, null));
prevSize++;
iter.remove();
}
performPendingMetricsOperations();
log.trace("{}: closed {} metric(s).", suiteName, prevSize);
}
/**
* Get the maximum number of metrics this suite can create.
*/
public int maxEntries() {
return maxEntries;
}
// Visible for testing only.
Metrics metrics() {
return metrics;
}
/**
* Return a map from keys to current reference counts.
* Visible for testing only.
*/
synchronized Map<K, Integer> values() {
HashMap<K, Integer> values = new HashMap<>();
for (Map.Entry<K, StoredIntGauge> entry : gauges.entrySet()) {
values.put(entry.getKey(), entry.getValue().value());
}
return values;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/internals/MetricsUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.internals;
import org.apache.kafka.common.metrics.Metrics;
import java.util.LinkedHashMap;
import java.util.Map;
import java.util.concurrent.TimeUnit;
public class MetricsUtils {
/**
* Converts the provided time from milliseconds to the requested
* time unit.
*/
public static double convert(long timeMs, TimeUnit unit) {
switch (unit) {
case NANOSECONDS:
return timeMs * 1000.0 * 1000.0;
case MICROSECONDS:
return timeMs * 1000.0;
case MILLISECONDS:
return timeMs;
case SECONDS:
return timeMs / 1000.0;
case MINUTES:
return timeMs / (60.0 * 1000.0);
case HOURS:
return timeMs / (60.0 * 60.0 * 1000.0);
case DAYS:
return timeMs / (24.0 * 60.0 * 60.0 * 1000.0);
default:
throw new IllegalStateException("Unknown unit: " + unit);
}
}
/**
* Create a set of tags using the supplied key and value pairs. The order of the tags will be kept.
*
* @param keyValue the key and value pairs for the tags; must be an even number
* @return the map of tags that can be supplied to the {@link Metrics} methods; never null
*/
public static Map<String, String> getTags(String... keyValue) {
if ((keyValue.length % 2) != 0)
throw new IllegalArgumentException("keyValue needs to be specified in pairs");
Map<String, String> tags = new LinkedHashMap<>(keyValue.length / 2);
for (int i = 0; i < keyValue.length; i += 2)
tags.put(keyValue[i], keyValue[i + 1]);
return tags;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Avg.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import java.util.List;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* A {@link SampledStat} that maintains a simple average over its samples.
*/
public class Avg extends SampledStat {
public Avg() {
super(0.0);
}
@Override
protected void update(Sample sample, MetricConfig config, double value, long now) {
sample.value += value;
}
@Override
public double combine(List<Sample> samples, MetricConfig config, long now) {
double total = 0.0;
long count = 0;
for (Sample s : samples) {
total += s.value;
count += s.eventCount;
}
return count == 0 ? Double.NaN : total / count;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/CumulativeCount.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* A non-sampled version of {@link WindowedCount} maintained over all time.
*
* This is a special kind of {@link CumulativeSum} that always records {@code 1} instead of the provided value.
* In other words, it counts the number of
* {@link CumulativeCount#record(MetricConfig, double, long)} invocations,
* instead of summing the recorded values.
*/
public class CumulativeCount extends CumulativeSum {
@Override
public void record(final MetricConfig config, final double value, final long timeMs) {
super.record(config, 1, timeMs);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/CumulativeSum.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.metrics.MeasurableStat;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* An non-sampled cumulative total maintained over all time.
* This is a non-sampled version of {@link WindowedSum}.
*
* See also {@link CumulativeCount} if you just want to increment the value by 1 on each recording.
*/
public class CumulativeSum implements MeasurableStat {
private double total;
public CumulativeSum() {
total = 0.0;
}
public CumulativeSum(double value) {
total = value;
}
@Override
public void record(MetricConfig config, double value, long now) {
total += value;
}
@Override
public double measure(MetricConfig config, long now) {
return total;
}
@Override
public String toString() {
return "CumulativeSum(total=" + total + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Frequencies.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.CompoundStat;
import org.apache.kafka.common.metrics.Measurable;
import org.apache.kafka.common.metrics.MetricConfig;
import org.apache.kafka.common.metrics.stats.Histogram.BinScheme;
import org.apache.kafka.common.metrics.stats.Histogram.ConstantBinScheme;
import java.util.ArrayList;
import java.util.List;
/**
* A {@link CompoundStat} that represents a normalized distribution with a {@link Frequency} metric for each
* bucketed value. The values of the {@link Frequency} metrics specify the frequency of the center value appearing
* relative to the total number of values recorded.
* <p>
* For example, consider a component that records failure or success of an operation using boolean values, with
* one metric to capture the percentage of operations that failed another to capture the percentage of operations
* that succeeded.
* <p>
* This can be accomplish by created a {@link org.apache.kafka.common.metrics.Sensor Sensor} to record the values,
* with 0.0 for false and 1.0 for true. Then, create a single {@link Frequencies} object that has two
* {@link Frequency} metrics: one centered around 0.0 and another centered around 1.0. The {@link Frequencies}
* object is a {@link CompoundStat}, and so it can be {@link org.apache.kafka.common.metrics.Sensor#add(CompoundStat)
* added directly to a Sensor} so the metrics are created automatically.
*/
public class Frequencies extends SampledStat implements CompoundStat {
/**
* Create a Frequencies instance with metrics for the frequency of a boolean sensor that records 0.0 for
* false and 1.0 for true.
*
* @param falseMetricName the name of the metric capturing the frequency of failures; may be null if not needed
* @param trueMetricName the name of the metric capturing the frequency of successes; may be null if not needed
* @return the Frequencies instance; never null
* @throws IllegalArgumentException if both {@code falseMetricName} and {@code trueMetricName} are null
*/
public static Frequencies forBooleanValues(MetricName falseMetricName, MetricName trueMetricName) {
List<Frequency> frequencies = new ArrayList<>();
if (falseMetricName != null) {
frequencies.add(new Frequency(falseMetricName, 0.0));
}
if (trueMetricName != null) {
frequencies.add(new Frequency(trueMetricName, 1.0));
}
if (frequencies.isEmpty()) {
throw new IllegalArgumentException("Must specify at least one metric name");
}
Frequency[] frequencyArray = frequencies.toArray(new Frequency[0]);
return new Frequencies(2, 0.0, 1.0, frequencyArray);
}
private final Frequency[] frequencies;
private final BinScheme binScheme;
/**
* Create a Frequencies that captures the values in the specified range into the given number of buckets,
* where the buckets are centered around the minimum, maximum, and intermediate values.
*
* @param buckets the number of buckets; must be at least 1
* @param min the minimum value to be captured
* @param max the maximum value to be captured
* @param frequencies the list of {@link Frequency} metrics, which at most should be one per bucket centered
* on the bucket's value, though not every bucket need to correspond to a metric if the
* value is not needed
* @throws IllegalArgumentException if any of the {@link Frequency} objects do not have a
* {@link Frequency#centerValue() center value} within the specified range
*/
public Frequencies(int buckets, double min, double max, Frequency... frequencies) {
super(0.0); // initial value is unused by this implementation
if (max < min) {
throw new IllegalArgumentException("The maximum value " + max
+ " must be greater than the minimum value " + min);
}
if (buckets < 1) {
throw new IllegalArgumentException("Must be at least 1 bucket");
}
if (buckets < frequencies.length) {
throw new IllegalArgumentException("More frequencies than buckets");
}
this.frequencies = frequencies;
for (Frequency freq : frequencies) {
if (min > freq.centerValue() || max < freq.centerValue()) {
throw new IllegalArgumentException("The frequency centered at '" + freq.centerValue()
+ "' is not within the range [" + min + "," + max + "]");
}
}
double halfBucketWidth = (max - min) / (buckets - 1) / 2.0;
this.binScheme = new ConstantBinScheme(buckets, min - halfBucketWidth, max + halfBucketWidth);
}
@Override
public List<NamedMeasurable> stats() {
List<NamedMeasurable> ms = new ArrayList<>(frequencies.length);
for (Frequency frequency : frequencies) {
final double center = frequency.centerValue();
ms.add(new NamedMeasurable(frequency.name(), new Measurable() {
public double measure(MetricConfig config, long now) {
return frequency(config, now, center);
}
}));
}
return ms;
}
/**
* Return the computed frequency describing the number of occurrences of the values in the bucket for the given
* center point, relative to the total number of occurrences in the samples.
*
* @param config the metric configuration
* @param now the current time in milliseconds
* @param centerValue the value corresponding to the center point of the bucket
* @return the frequency of the values in the bucket relative to the total number of samples
*/
public double frequency(MetricConfig config, long now, double centerValue) {
purgeObsoleteSamples(config, now);
long totalCount = 0;
for (Sample sample : samples) {
totalCount += sample.eventCount;
}
if (totalCount == 0) {
return 0.0d;
}
// Add up all of the counts in the bin corresponding to the center value
float count = 0.0f;
int binNum = binScheme.toBin(centerValue);
for (Sample s : samples) {
HistogramSample sample = (HistogramSample) s;
float[] hist = sample.histogram.counts();
count += hist[binNum];
}
// Compute the ratio of counts to total counts
return count / (double) totalCount;
}
double totalCount() {
long count = 0;
for (Sample sample : samples) {
count += sample.eventCount;
}
return count;
}
@Override
public double combine(List<Sample> samples, MetricConfig config, long now) {
return totalCount();
}
@Override
protected HistogramSample newSample(long timeMs) {
return new HistogramSample(binScheme, timeMs);
}
@Override
protected void update(Sample sample, MetricConfig config, double value, long timeMs) {
HistogramSample hist = (HistogramSample) sample;
hist.histogram.record(value);
}
private static class HistogramSample extends SampledStat.Sample {
private final Histogram histogram;
private HistogramSample(BinScheme scheme, long now) {
super(0.0, now);
histogram = new Histogram(scheme);
}
@Override
public void reset(long now) {
super.reset(now);
histogram.clear();
}
}
} |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Frequency.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.MetricName;
/**
* Definition of a frequency metric used in a {@link Frequencies} compound statistic.
*/
public class Frequency {
private final MetricName name;
private final double centerValue;
/**
* Create an instance with the given name and center point value.
*
* @param name the name of the frequency metric; may not be null
* @param centerValue the value identifying the {@link Frequencies} bucket to be reported
*/
public Frequency(MetricName name, double centerValue) {
this.name = name;
this.centerValue = centerValue;
}
/**
* Get the name of this metric.
*
* @return the metric name; never null
*/
public MetricName name() {
return this.name;
}
/**
* Get the value of this metrics center point.
*
* @return the center point value
*/
public double centerValue() {
return this.centerValue;
}
@Override
public String toString() {
return "Frequency(" +
"name=" + name +
", centerValue=" + centerValue +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Histogram.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import java.util.Arrays;
public class Histogram {
private final BinScheme binScheme;
private final float[] hist;
private double count;
public Histogram(BinScheme binScheme) {
this.hist = new float[binScheme.bins()];
this.count = 0.0f;
this.binScheme = binScheme;
}
public void record(double value) {
this.hist[binScheme.toBin(value)] += 1.0f;
this.count += 1.0d;
}
public double value(double quantile) {
if (count == 0.0d)
return Double.NaN;
if (quantile > 1.00d)
return Float.POSITIVE_INFINITY;
if (quantile < 0.00d)
return Float.NEGATIVE_INFINITY;
float sum = 0.0f;
float quant = (float) quantile;
for (int i = 0; i < this.hist.length - 1; i++) {
sum += this.hist[i];
if (sum / count > quant)
return binScheme.fromBin(i);
}
return binScheme.fromBin(this.hist.length - 1);
}
public float[] counts() {
return this.hist;
}
public void clear() {
Arrays.fill(this.hist, 0.0f);
this.count = 0;
}
@Override
public String toString() {
StringBuilder b = new StringBuilder("{");
for (int i = 0; i < this.hist.length - 1; i++) {
b.append(String.format("%.10f", binScheme.fromBin(i)));
b.append(':');
b.append(String.format("%.0f", this.hist[i]));
b.append(',');
}
b.append(Float.POSITIVE_INFINITY);
b.append(':');
b.append(String.format("%.0f", this.hist[this.hist.length - 1]));
b.append('}');
return b.toString();
}
/**
* An algorithm for determining the bin in which a value is to be placed as well as calculating the upper end
* of each bin.
*/
public interface BinScheme {
/**
* Get the number of bins.
*
* @return the number of bins
*/
int bins();
/**
* Determine the 0-based bin number in which the supplied value should be placed.
*
* @param value the value
* @return the 0-based index of the bin
*/
int toBin(double value);
/**
* Determine the value at the upper range of the specified bin.
*
* @param bin the 0-based bin number
* @return the value at the upper end of the bin; or {@link Float#NEGATIVE_INFINITY negative infinity}
* if the bin number is negative or {@link Float#POSITIVE_INFINITY positive infinity} if the 0-based
* bin number is greater than or equal to the {@link #bins() number of bins}.
*/
double fromBin(int bin);
}
/**
* A scheme for calculating the bins where the width of each bin is a constant determined by the range of values
* and the number of bins.
*/
public static class ConstantBinScheme implements BinScheme {
private static final int MIN_BIN_NUMBER = 0;
private final double min;
private final double max;
private final int bins;
private final double bucketWidth;
private final int maxBinNumber;
/**
* Create a bin scheme with the specified number of bins that all have the same width.
*
* @param bins the number of bins; must be at least 2
* @param min the minimum value to be counted in the bins
* @param max the maximum value to be counted in the bins
*/
public ConstantBinScheme(int bins, double min, double max) {
if (bins < 2)
throw new IllegalArgumentException("Must have at least 2 bins.");
this.min = min;
this.max = max;
this.bins = bins;
this.bucketWidth = (max - min) / bins;
this.maxBinNumber = bins - 1;
}
public int bins() {
return this.bins;
}
public double fromBin(int b) {
if (b < MIN_BIN_NUMBER) {
return Float.NEGATIVE_INFINITY;
}
if (b > maxBinNumber) {
return Float.POSITIVE_INFINITY;
}
return min + b * bucketWidth;
}
public int toBin(double x) {
int binNumber = (int) ((x - min) / bucketWidth);
if (binNumber < MIN_BIN_NUMBER) {
return MIN_BIN_NUMBER;
}
return Math.min(binNumber, maxBinNumber);
}
}
/**
* A scheme for calculating the bins where the width of each bin is one more than the previous bin, and therefore
* the bin widths are increasing at a linear rate. However, the bin widths are scaled such that the specified range
* of values will all fit within the bins (e.g., the upper range of the last bin is equal to the maximum value).
*/
public static class LinearBinScheme implements BinScheme {
private final int bins;
private final double max;
private final double scale;
/**
* Create a linear bin scheme with the specified number of bins and the maximum value to be counted in the bins.
*
* @param numBins the number of bins; must be at least 2
* @param max the maximum value to be counted in the bins
*/
public LinearBinScheme(int numBins, double max) {
if (numBins < 2)
throw new IllegalArgumentException("Must have at least 2 bins.");
this.bins = numBins;
this.max = max;
double denom = numBins * (numBins - 1.0) / 2.0;
this.scale = max / denom;
}
public int bins() {
return this.bins;
}
public double fromBin(int b) {
if (b > this.bins - 1) {
return Float.POSITIVE_INFINITY;
} else if (b < 0.0000d) {
return Float.NEGATIVE_INFINITY;
} else {
return this.scale * (b * (b + 1.0)) / 2.0;
}
}
public int toBin(double x) {
if (x < 0.0d) {
throw new IllegalArgumentException("Values less than 0.0 not accepted.");
} else if (x > this.max) {
return this.bins - 1;
} else {
return (int) (-0.5 + 0.5 * Math.sqrt(1.0 + 8.0 * x / this.scale));
}
}
}
} |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Max.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import java.util.List;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* A {@link SampledStat} that gives the max over its samples.
*/
public final class Max extends SampledStat {
public Max() {
super(Double.NEGATIVE_INFINITY);
}
@Override
protected void update(Sample sample, MetricConfig config, double value, long now) {
sample.value = Math.max(sample.value, value);
}
@Override
public double combine(List<Sample> samples, MetricConfig config, long now) {
double max = Double.NEGATIVE_INFINITY;
long count = 0;
for (Sample sample : samples) {
max = Math.max(max, sample.value);
count += sample.eventCount;
}
return count == 0 ? Double.NaN : max;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Meter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.metrics.CompoundStat;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* A compound stat that includes a rate metric and a cumulative total metric.
*/
public class Meter implements CompoundStat {
private final MetricName rateMetricName;
private final MetricName totalMetricName;
private final Rate rate;
private final CumulativeSum total;
/**
* Construct a Meter with seconds as time unit
*/
public Meter(MetricName rateMetricName, MetricName totalMetricName) {
this(TimeUnit.SECONDS, new WindowedSum(), rateMetricName, totalMetricName);
}
/**
* Construct a Meter with provided time unit
*/
public Meter(TimeUnit unit, MetricName rateMetricName, MetricName totalMetricName) {
this(unit, new WindowedSum(), rateMetricName, totalMetricName);
}
/**
* Construct a Meter with seconds as time unit
*/
public Meter(SampledStat rateStat, MetricName rateMetricName, MetricName totalMetricName) {
this(TimeUnit.SECONDS, rateStat, rateMetricName, totalMetricName);
}
/**
* Construct a Meter with provided time unit
*/
public Meter(TimeUnit unit, SampledStat rateStat, MetricName rateMetricName, MetricName totalMetricName) {
if (!(rateStat instanceof WindowedSum)) {
throw new IllegalArgumentException("Meter is supported only for WindowedCount or WindowedSum.");
}
this.total = new CumulativeSum();
this.rate = new Rate(unit, rateStat);
this.rateMetricName = rateMetricName;
this.totalMetricName = totalMetricName;
}
@Override
public List<NamedMeasurable> stats() {
return Arrays.asList(
new NamedMeasurable(totalMetricName, total),
new NamedMeasurable(rateMetricName, rate));
}
@Override
public void record(MetricConfig config, double value, long timeMs) {
rate.record(config, value, timeMs);
// Total metrics with Count stat should record 1.0 (as recorded in the count)
double totalValue = (rate.stat instanceof WindowedCount) ? 1.0 : value;
total.record(config, totalValue, timeMs);
}
@Override
public String toString() {
return "Meter(" +
"rate=" + rate +
", total=" + total +
", rateMetricName=" + rateMetricName +
", totalMetricName=" + totalMetricName +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Min.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import java.util.List;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* A {@link SampledStat} that gives the min over its samples.
*/
public class Min extends SampledStat {
public Min() {
super(Double.MAX_VALUE);
}
@Override
protected void update(Sample sample, MetricConfig config, double value, long now) {
sample.value = Math.min(sample.value, value);
}
@Override
public double combine(List<Sample> samples, MetricConfig config, long now) {
double min = Double.MAX_VALUE;
long count = 0;
for (Sample sample : samples) {
min = Math.min(min, sample.value);
count += sample.eventCount;
}
return count == 0 ? Double.NaN : min;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Percentile.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.MetricName;
public class Percentile {
private final MetricName name;
private final double percentile;
public Percentile(MetricName name, double percentile) {
super();
this.name = name;
this.percentile = percentile;
}
public MetricName name() {
return this.name;
}
public double percentile() {
return this.percentile;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Percentiles.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.metrics.CompoundStat;
import org.apache.kafka.common.metrics.MetricConfig;
import org.apache.kafka.common.metrics.stats.Histogram.BinScheme;
import org.apache.kafka.common.metrics.stats.Histogram.ConstantBinScheme;
import org.apache.kafka.common.metrics.stats.Histogram.LinearBinScheme;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A compound stat that reports one or more percentiles
*/
public class Percentiles extends SampledStat implements CompoundStat {
private final Logger log = LoggerFactory.getLogger(Percentiles.class);
public enum BucketSizing {
CONSTANT, LINEAR
}
private final int buckets;
private final Percentile[] percentiles;
private final BinScheme binScheme;
private final double min;
private final double max;
public Percentiles(int sizeInBytes, double max, BucketSizing bucketing, Percentile... percentiles) {
this(sizeInBytes, 0.0, max, bucketing, percentiles);
}
public Percentiles(int sizeInBytes, double min, double max, BucketSizing bucketing, Percentile... percentiles) {
super(0.0);
this.percentiles = percentiles;
this.buckets = sizeInBytes / 4;
this.min = min;
this.max = max;
if (bucketing == BucketSizing.CONSTANT) {
this.binScheme = new ConstantBinScheme(buckets, min, max);
} else if (bucketing == BucketSizing.LINEAR) {
if (min != 0.0d)
throw new IllegalArgumentException("Linear bucket sizing requires min to be 0.0.");
this.binScheme = new LinearBinScheme(buckets, max);
} else {
throw new IllegalArgumentException("Unknown bucket type: " + bucketing);
}
}
@Override
public List<NamedMeasurable> stats() {
List<NamedMeasurable> ms = new ArrayList<>(this.percentiles.length);
for (Percentile percentile : this.percentiles) {
final double pct = percentile.percentile();
ms.add(new NamedMeasurable(
percentile.name(),
(config, now) -> value(config, now, pct / 100.0))
);
}
return ms;
}
public double value(MetricConfig config, long now, double quantile) {
purgeObsoleteSamples(config, now);
float count = 0.0f;
for (Sample sample : this.samples)
count += sample.eventCount;
if (count == 0.0f)
return Double.NaN;
float sum = 0.0f;
float quant = (float) quantile;
for (int b = 0; b < buckets; b++) {
for (Sample s : this.samples) {
HistogramSample sample = (HistogramSample) s;
float[] hist = sample.histogram.counts();
sum += hist[b];
if (sum / count > quant)
return binScheme.fromBin(b);
}
}
return Double.POSITIVE_INFINITY;
}
@Override
public double combine(List<Sample> samples, MetricConfig config, long now) {
return value(config, now, 0.5);
}
@Override
protected HistogramSample newSample(long timeMs) {
return new HistogramSample(this.binScheme, timeMs);
}
@Override
protected void update(Sample sample, MetricConfig config, double value, long timeMs) {
final double boundedValue;
if (value > max) {
log.debug("Received value {} which is greater than max recordable value {}, will be pinned to the max value",
value, max);
boundedValue = max;
} else if (value < min) {
log.debug("Received value {} which is less than min recordable value {}, will be pinned to the min value",
value, min);
boundedValue = min;
} else {
boundedValue = value;
}
HistogramSample hist = (HistogramSample) sample;
hist.histogram.record(boundedValue);
}
private static class HistogramSample extends SampledStat.Sample {
private final Histogram histogram;
private HistogramSample(BinScheme scheme, long now) {
super(0.0, now);
this.histogram = new Histogram(scheme);
}
@Override
public void reset(long now) {
super.reset(now);
this.histogram.clear();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Rate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import java.util.Locale;
import java.util.concurrent.TimeUnit;
import org.apache.kafka.common.metrics.MeasurableStat;
import org.apache.kafka.common.metrics.MetricConfig;
import static org.apache.kafka.common.metrics.internals.MetricsUtils.convert;
/**
* The rate of the given quantity. By default this is the total observed over a set of samples from a sampled statistic
* divided by the elapsed time over the sample windows. Alternative {@link SampledStat} implementations can be provided,
* however, to record the rate of occurrences (e.g. the count of values measured over the time interval) or other such
* values.
*/
public class Rate implements MeasurableStat {
protected final TimeUnit unit;
protected final SampledStat stat;
public Rate() {
this(TimeUnit.SECONDS);
}
public Rate(TimeUnit unit) {
this(unit, new WindowedSum());
}
public Rate(SampledStat stat) {
this(TimeUnit.SECONDS, stat);
}
public Rate(TimeUnit unit, SampledStat stat) {
this.stat = stat;
this.unit = unit;
}
public String unitName() {
return unit.name().substring(0, unit.name().length() - 2).toLowerCase(Locale.ROOT);
}
@Override
public void record(MetricConfig config, double value, long timeMs) {
this.stat.record(config, value, timeMs);
}
@Override
public double measure(MetricConfig config, long now) {
double value = stat.measure(config, now);
return value / convert(windowSize(config, now), unit);
}
public long windowSize(MetricConfig config, long now) {
// purge old samples before we compute the window size
stat.purgeObsoleteSamples(config, now);
/*
* Here we check the total amount of time elapsed since the oldest non-obsolete window.
* This give the total windowSize of the batch which is the time used for Rate computation.
* However, there is an issue if we do not have sufficient data for e.g. if only 1 second has elapsed in a 30 second
* window, the measured rate will be very high.
* Hence we assume that the elapsed time is always N-1 complete windows plus whatever fraction of the final window is complete.
*
* Note that we could simply count the amount of time elapsed in the current window and add n-1 windows to get the total time,
* but this approach does not account for sleeps. SampledStat only creates samples whenever record is called,
* if no record is called for a period of time that time is not accounted for in windowSize and produces incorrect results.
*/
long totalElapsedTimeMs = now - stat.oldest(now).lastWindowMs;
// Check how many full windows of data we have currently retained
int numFullWindows = (int) (totalElapsedTimeMs / config.timeWindowMs());
int minFullWindows = config.samples() - 1;
// If the available windows are less than the minimum required, add the difference to the totalElapsedTime
if (numFullWindows < minFullWindows)
totalElapsedTimeMs += (minFullWindows - numFullWindows) * config.timeWindowMs();
// If window size is being calculated at the exact beginning of the window with no prior samples, the window size
// will result in a value of 0. Calculation of rate over a window is size 0 is undefined, hence, we assume the
// minimum window size to be at least 1ms.
return Math.max(totalElapsedTimeMs, 1);
}
@Override
public String toString() {
return "Rate(" +
"unit=" + unit +
", stat=" + stat +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/SampledStat.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import java.util.ArrayList;
import java.util.List;
import org.apache.kafka.common.metrics.MeasurableStat;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* A SampledStat records a single scalar value measured over one or more samples. Each sample is recorded over a
* configurable window. The window can be defined by number of events or elapsed time (or both, if both are given the
* window is complete when <i>either</i> the event count or elapsed time criterion is met).
* <p>
* All the samples are combined to produce the measurement. When a window is complete the oldest sample is cleared and
* recycled to begin recording the next sample.
*
* Subclasses of this class define different statistics measured using this basic pattern.
*/
public abstract class SampledStat implements MeasurableStat {
private double initialValue;
private int current = 0;
protected List<Sample> samples;
public SampledStat(double initialValue) {
this.initialValue = initialValue;
this.samples = new ArrayList<>(2);
}
@Override
public void record(MetricConfig config, double value, long timeMs) {
Sample sample = current(timeMs);
if (sample.isComplete(timeMs, config))
sample = advance(config, timeMs);
update(sample, config, value, timeMs);
sample.eventCount += 1;
}
private Sample advance(MetricConfig config, long timeMs) {
this.current = (this.current + 1) % config.samples();
if (this.current >= samples.size()) {
Sample sample = newSample(timeMs);
this.samples.add(sample);
return sample;
} else {
Sample sample = current(timeMs);
sample.reset(timeMs);
return sample;
}
}
protected Sample newSample(long timeMs) {
return new Sample(this.initialValue, timeMs);
}
@Override
public double measure(MetricConfig config, long now) {
purgeObsoleteSamples(config, now);
return combine(this.samples, config, now);
}
public Sample current(long timeMs) {
if (samples.size() == 0)
this.samples.add(newSample(timeMs));
return this.samples.get(this.current);
}
public Sample oldest(long now) {
if (samples.size() == 0)
this.samples.add(newSample(now));
Sample oldest = this.samples.get(0);
for (int i = 1; i < this.samples.size(); i++) {
Sample curr = this.samples.get(i);
if (curr.lastWindowMs < oldest.lastWindowMs)
oldest = curr;
}
return oldest;
}
@Override
public String toString() {
return "SampledStat(" +
"initialValue=" + initialValue +
", current=" + current +
", samples=" + samples +
')';
}
protected abstract void update(Sample sample, MetricConfig config, double value, long timeMs);
public abstract double combine(List<Sample> samples, MetricConfig config, long now);
/* Timeout any windows that have expired in the absence of any events */
protected void purgeObsoleteSamples(MetricConfig config, long now) {
long expireAge = config.samples() * config.timeWindowMs();
for (Sample sample : samples) {
if (now - sample.lastWindowMs >= expireAge)
sample.reset(now);
}
}
protected static class Sample {
public double initialValue;
public long eventCount;
public long lastWindowMs;
public double value;
public Sample(double initialValue, long now) {
this.initialValue = initialValue;
this.eventCount = 0;
this.lastWindowMs = now;
this.value = initialValue;
}
public void reset(long now) {
this.eventCount = 0;
this.lastWindowMs = now;
this.value = initialValue;
}
public boolean isComplete(long timeMs, MetricConfig config) {
return timeMs - lastWindowMs >= config.timeWindowMs() || eventCount >= config.eventWindow();
}
@Override
public String toString() {
return "Sample(" +
"value=" + value +
", eventCount=" + eventCount +
", lastWindowMs=" + lastWindowMs +
", initialValue=" + initialValue +
')';
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/SimpleRate.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* A simple rate the rate is incrementally calculated
* based on the elapsed time between the earliest reading
* and now.
*
* An exception is made for the first window, which is
* considered of fixed size. This avoids the issue of
* an artificially high rate when the gap between readings
* is close to 0.
*/
public class SimpleRate extends Rate {
@Override
public long windowSize(MetricConfig config, long now) {
stat.purgeObsoleteSamples(config, now);
long elapsed = now - stat.oldest(now).lastWindowMs;
return Math.max(elapsed, config.timeWindowMs());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/TokenBucket.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import java.util.concurrent.TimeUnit;
import org.apache.kafka.common.metrics.MeasurableStat;
import org.apache.kafka.common.metrics.MetricConfig;
import org.apache.kafka.common.metrics.Quota;
import static org.apache.kafka.common.metrics.internals.MetricsUtils.convert;
/**
* The {@link TokenBucket} is a {@link MeasurableStat} implementing a token bucket algorithm
* that is usable within a {@link org.apache.kafka.common.metrics.Sensor}.
*
* The {@link Quota#bound()} defined the refill rate of the bucket while the maximum burst or
* the maximum number of credits of the bucket is defined by
* {@link MetricConfig#samples() * MetricConfig#timeWindowMs() * Quota#bound()}.
*
* The quota is considered as exhausted when the amount of remaining credits in the bucket
* is below zero. The enforcement is done by the {@link org.apache.kafka.common.metrics.Sensor}.
*
* Token Bucket vs Rate based Quota:
* The current sampled rate based quota does not cope well with bursty workloads. The issue is
* that a unique and large sample can hold the average above the quota until it is discarded.
* Practically, when this happens, one must wait until the sample is expired to bring the rate
* below the quota even though less time would be theoretically required. As an example, let's
* imagine that we have:
* - Quota (Q) = 5
* - Samples (S) = 100
* - Window (W) = 1s
* A burst of 560 brings the average rate (R) to 5.6 (560 / 100). The expected throttle time is
* computed as follow: ((R - Q / Q * S * W)) = ((5.6 - 5) / 5 * 100 * 1) = 12 secs. In practice,
* the average rate won't go below the quota before the burst is dropped from the samples so one
* must wait 100s (S * W).
*
* The token bucket relies on continuously updated amount of credits. Therefore, it does not
* suffers from the above issue. The same example would work as follow:
* - Quota (Q) = 5
* - Burst (B) = 5 * 1 * 100 = 500 (Q * S * W)
* A burst of 560 brings the amount of credits to -60. One must wait 12s (-(-60)/5) to refill the
* bucket to zero.
*/
public class TokenBucket implements MeasurableStat {
private final TimeUnit unit;
private double tokens;
private long lastUpdateMs;
public TokenBucket() {
this(TimeUnit.SECONDS);
}
public TokenBucket(TimeUnit unit) {
this.unit = unit;
this.tokens = 0;
this.lastUpdateMs = 0;
}
@Override
public double measure(final MetricConfig config, final long timeMs) {
if (config.quota() == null)
return Long.MAX_VALUE;
final double quota = config.quota().bound();
final double burst = burst(config);
refill(quota, burst, timeMs);
return this.tokens;
}
@Override
public void record(final MetricConfig config, final double value, final long timeMs) {
if (config.quota() == null)
return;
final double quota = config.quota().bound();
final double burst = burst(config);
refill(quota, burst, timeMs);
this.tokens = Math.min(burst, this.tokens - value);
}
private void refill(final double quota, final double burst, final long timeMs) {
this.tokens = Math.min(burst, this.tokens + quota * convert(timeMs - lastUpdateMs, unit));
this.lastUpdateMs = timeMs;
}
private double burst(final MetricConfig config) {
return config.samples() * convert(config.timeWindowMs(), unit) * config.quota().bound();
}
@Override
public String toString() {
return "TokenBucket(" +
"unit=" + unit +
", tokens=" + tokens +
", lastUpdateMs=" + lastUpdateMs +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/Value.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.metrics.MeasurableStat;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* An instantaneous value.
*/
public class Value implements MeasurableStat {
private double value = 0;
@Override
public double measure(MetricConfig config, long now) {
return value;
}
@Override
public void record(MetricConfig config, double value, long timeMs) {
this.value = value;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/WindowedCount.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.metrics.MetricConfig;
/**
* A {@link SampledStat} that maintains a simple count of what it has seen.
* This is a special kind of {@link WindowedSum} that always records a value of {@code 1} instead of the provided value.
* In other words, it counts the number of
* {@link WindowedCount#record(MetricConfig, double, long)} invocations,
* instead of summing the recorded values.
*
* See also {@link CumulativeCount} for a non-sampled version of this metric.
*/
public class WindowedCount extends WindowedSum {
@Override
protected void update(Sample sample, MetricConfig config, double value, long now) {
super.update(sample, config, 1.0, now);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/WindowedSum.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.metrics.stats;
import org.apache.kafka.common.metrics.MetricConfig;
import java.util.List;
/**
* A {@link SampledStat} that maintains the sum of what it has seen.
* This is a sampled version of {@link CumulativeSum}.
*
* See also {@link WindowedCount} if you want to increment the value by 1 on each recording.
*/
public class WindowedSum extends SampledStat {
public WindowedSum() {
super(0);
}
@Override
protected void update(Sample sample, MetricConfig config, double value, long now) {
sample.value += value;
}
@Override
public double combine(List<Sample> samples, MetricConfig config, long now) {
double total = 0.0;
for (Sample sample : samples)
total += sample.value;
return total;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/metrics/stats/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides methods of statistically aggregating metrics upon emission.
*/
package org.apache.kafka.common.metrics.stats; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/Authenticator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.auth.KafkaPrincipalSerde;
import java.io.Closeable;
import java.io.IOException;
import java.util.Optional;
/**
* Authentication for Channel
*/
public interface Authenticator extends Closeable {
/**
* Implements any authentication mechanism. Use transportLayer to read or write tokens.
* For security protocols PLAINTEXT and SSL, this is a no-op since no further authentication
* needs to be done. For SASL_PLAINTEXT and SASL_SSL, this performs the SASL authentication.
*
* @throws AuthenticationException if authentication fails due to invalid credentials or
* other security configuration errors
* @throws IOException if read/write fails due to an I/O error
*/
void authenticate() throws AuthenticationException, IOException;
/**
* Perform any processing related to authentication failure. This is invoked when the channel is about to be closed
* because of an {@link AuthenticationException} thrown from a prior {@link #authenticate()} call.
* @throws IOException if read/write fails due to an I/O error
*/
default void handleAuthenticationFailure() throws IOException {
}
/**
* Returns Principal using PrincipalBuilder
*/
KafkaPrincipal principal();
/**
* Returns the serializer/deserializer interface for principal
*/
Optional<KafkaPrincipalSerde> principalSerde();
/**
* returns true if authentication is complete otherwise returns false;
*/
boolean complete();
/**
* Begins re-authentication. Uses transportLayer to read or write tokens as is
* done for {@link #authenticate()}. For security protocols PLAINTEXT and SSL,
* this is a no-op since re-authentication does not apply/is not supported,
* respectively. For SASL_PLAINTEXT and SASL_SSL, this performs a SASL
* authentication. Any in-flight responses from prior requests can/will be read
* and collected for later processing as required. There must not be partially
* written requests; any request queued for writing (for which zero bytes have
* been written) remains queued until after re-authentication succeeds.
*
* @param reauthenticationContext
* the context in which this re-authentication is occurring. This
* instance is responsible for closing the previous Authenticator
* returned by
* {@link ReauthenticationContext#previousAuthenticator()}.
* @throws AuthenticationException
* if authentication fails due to invalid credentials or other
* security configuration errors
* @throws IOException
* if read/write fails due to an I/O error
*/
default void reauthenticate(ReauthenticationContext reauthenticationContext) throws IOException {
// empty
}
/**
* Return the session expiration time, if any, otherwise null. The value is in
* nanoseconds as per {@code System.nanoTime()} and is therefore only useful
* when compared to such a value -- it's absolute value is meaningless. This
* value may be non-null only on the server-side. It represents the time after
* which, in the absence of re-authentication, the broker will close the session
* if it receives a request unrelated to authentication. We store nanoseconds
* here to avoid having to invoke the more expensive {@code milliseconds()} call
* on the broker for every request
*
* @return the session expiration time, if any, otherwise null
*/
default Long serverSessionExpirationTimeNanos() {
return null;
}
/**
* Return the time on or after which a client should re-authenticate this
* session, if any, otherwise null. The value is in nanoseconds as per
* {@code System.nanoTime()} and is therefore only useful when compared to such
* a value -- it's absolute value is meaningless. This value may be non-null
* only on the client-side. It will be a random time between 85% and 95% of the
* full session lifetime to account for latency between client and server and to
* avoid re-authentication storms that could be caused by many sessions
* re-authenticating simultaneously.
*
* @return the time on or after which a client should re-authenticate this
* session, if any, otherwise null
*/
default Long clientSessionReauthenticationTimeNanos() {
return null;
}
/**
* Return the number of milliseconds that elapsed while re-authenticating this
* session from the perspective of this instance, if applicable, otherwise null.
* The server-side perspective will yield a lower value than the client-side
* perspective of the same re-authentication because the client-side observes an
* additional network round-trip.
*
* @return the number of milliseconds that elapsed while re-authenticating this
* session from the perspective of this instance, if applicable,
* otherwise null
*/
default Long reauthenticationLatencyMs() {
return null;
}
/**
* Return the next (always non-null but possibly empty) client-side
* {@link NetworkReceive} response that arrived during re-authentication that
* is unrelated to re-authentication, if any. These correspond to requests sent
* prior to the beginning of re-authentication; the requests were made when the
* channel was successfully authenticated, and the responses arrived during the
* re-authentication process. The response returned is removed from the authenticator's
* queue. Responses of requests sent after completion of re-authentication are
* processed only when the authenticator response queue is empty.
*
* @return the (always non-null but possibly empty) client-side
* {@link NetworkReceive} response that arrived during
* re-authentication that is unrelated to re-authentication, if any
*/
default Optional<NetworkReceive> pollResponseReceivedDuringReauthentication() {
return Optional.empty();
}
/**
* Return true if this is a server-side authenticator and the connected client
* has indicated that it supports re-authentication, otherwise false
*
* @return true if this is a server-side authenticator and the connected client
* has indicated that it supports re-authentication, otherwise false
*/
default boolean connectedClientSupportsReauthentication() {
return false;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/ByteBufferSend.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* A send backed by an array of byte buffers
*/
public class ByteBufferSend implements Send {
private final long size;
protected final ByteBuffer[] buffers;
private long remaining;
private boolean pending = false;
public ByteBufferSend(ByteBuffer... buffers) {
this.buffers = buffers;
for (ByteBuffer buffer : buffers)
remaining += buffer.remaining();
this.size = remaining;
}
public ByteBufferSend(ByteBuffer[] buffers, long size) {
this.buffers = buffers;
this.size = size;
this.remaining = size;
}
@Override
public boolean completed() {
return remaining <= 0 && !pending;
}
@Override
public long size() {
return this.size;
}
@Override
public long writeTo(TransferableChannel channel) throws IOException {
long written = channel.write(buffers);
if (written < 0)
throw new EOFException("Wrote negative bytes to channel. This shouldn't happen.");
remaining -= written;
pending = channel.hasPendingWrites();
return written;
}
public long remaining() {
return remaining;
}
@Override
public String toString() {
return "ByteBufferSend(" +
", size=" + size +
", remaining=" + remaining +
", pending=" + pending +
')';
}
public static ByteBufferSend sizePrefixed(ByteBuffer buffer) {
ByteBuffer sizeBuffer = ByteBuffer.allocate(4);
sizeBuffer.putInt(0, buffer.remaining());
return new ByteBufferSend(sizeBuffer, buffer);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/ChannelBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import java.nio.channels.SelectionKey;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Configurable;
import org.apache.kafka.common.memory.MemoryPool;
/**
* A ChannelBuilder interface to build Channel based on configs
*/
public interface ChannelBuilder extends AutoCloseable, Configurable {
/**
* returns a Channel with TransportLayer and Authenticator configured.
* @param id channel id
* @param key SelectionKey
* @param maxReceiveSize max size of a single receive buffer to allocate
* @param memoryPool memory pool from which to allocate buffers, or null for none
* @return KafkaChannel
*/
KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize,
MemoryPool memoryPool, ChannelMetadataRegistry metadataRegistry) throws KafkaException;
/**
* Closes ChannelBuilder
*/
@Override
void close();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/ChannelBuilders.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.Configurable;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.SslClientAuth;
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs;
import org.apache.kafka.common.errors.InvalidConfigurationException;
import org.apache.kafka.common.requests.ApiVersionsResponse;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.security.JaasContext;
import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder;
import org.apache.kafka.common.security.auth.KafkaPrincipalBuilder;
import org.apache.kafka.common.security.authenticator.CredentialCache;
import org.apache.kafka.common.security.kerberos.KerberosShortNamer;
import org.apache.kafka.common.security.ssl.SslPrincipalMapper;
import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.function.Supplier;
public class ChannelBuilders {
private static final Logger log = LoggerFactory.getLogger(ChannelBuilders.class);
private ChannelBuilders() { }
/**
* @param securityProtocol the securityProtocol
* @param contextType the contextType, it must be non-null if `securityProtocol` is SASL_*; it is ignored otherwise
* @param config client config
* @param listenerName the listenerName if contextType is SERVER or null otherwise
* @param clientSaslMechanism SASL mechanism if mode is CLIENT, ignored otherwise
* @param time the time instance
* @param saslHandshakeRequestEnable flag to enable Sasl handshake requests; disabled only for SASL
* inter-broker connections with inter-broker protocol version < 0.10
* @param logContext the log context instance
*
* @return the configured `ChannelBuilder`
* @throws IllegalArgumentException if `mode` invariants described above is not maintained
*/
public static ChannelBuilder clientChannelBuilder(
SecurityProtocol securityProtocol,
JaasContext.Type contextType,
AbstractConfig config,
ListenerName listenerName,
String clientSaslMechanism,
Time time,
boolean saslHandshakeRequestEnable,
LogContext logContext) {
if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) {
if (contextType == null)
throw new IllegalArgumentException("`contextType` must be non-null if `securityProtocol` is `" + securityProtocol + "`");
if (clientSaslMechanism == null)
throw new IllegalArgumentException("`clientSaslMechanism` must be non-null in client mode if `securityProtocol` is `" + securityProtocol + "`");
}
return create(securityProtocol, Mode.CLIENT, contextType, config, listenerName, false, clientSaslMechanism,
saslHandshakeRequestEnable, null, null, time, logContext, null);
}
/**
* @param listenerName the listenerName
* @param isInterBrokerListener whether or not this listener is used for inter-broker requests
* @param securityProtocol the securityProtocol
* @param config server config
* @param credentialCache Credential cache for SASL/SCRAM if SCRAM is enabled
* @param tokenCache Delegation token cache
* @param time the time instance
* @param logContext the log context instance
* @param apiVersionSupplier supplier for ApiVersions responses sent prior to authentication
*
* @return the configured `ChannelBuilder`
*/
public static ChannelBuilder serverChannelBuilder(ListenerName listenerName,
boolean isInterBrokerListener,
SecurityProtocol securityProtocol,
AbstractConfig config,
CredentialCache credentialCache,
DelegationTokenCache tokenCache,
Time time,
LogContext logContext,
Supplier<ApiVersionsResponse> apiVersionSupplier) {
return create(securityProtocol, Mode.SERVER, JaasContext.Type.SERVER, config, listenerName,
isInterBrokerListener, null, true, credentialCache,
tokenCache, time, logContext, apiVersionSupplier);
}
private static ChannelBuilder create(SecurityProtocol securityProtocol,
Mode mode,
JaasContext.Type contextType,
AbstractConfig config,
ListenerName listenerName,
boolean isInterBrokerListener,
String clientSaslMechanism,
boolean saslHandshakeRequestEnable,
CredentialCache credentialCache,
DelegationTokenCache tokenCache,
Time time,
LogContext logContext,
Supplier<ApiVersionsResponse> apiVersionSupplier) {
Map<String, Object> configs = channelBuilderConfigs(config, listenerName);
ChannelBuilder channelBuilder;
switch (securityProtocol) {
case SSL:
requireNonNullMode(mode, securityProtocol);
channelBuilder = new SslChannelBuilder(mode, listenerName, isInterBrokerListener, logContext);
break;
case SASL_SSL:
case SASL_PLAINTEXT:
requireNonNullMode(mode, securityProtocol);
Map<String, JaasContext> jaasContexts;
String sslClientAuthOverride = null;
if (mode == Mode.SERVER) {
@SuppressWarnings("unchecked")
List<String> enabledMechanisms = (List<String>) configs.get(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG);
jaasContexts = new HashMap<>(enabledMechanisms.size());
for (String mechanism : enabledMechanisms)
jaasContexts.put(mechanism, JaasContext.loadServerContext(listenerName, mechanism, configs));
// SSL client authentication is enabled in brokers for SASL_SSL only if listener-prefixed config is specified.
if (listenerName != null && securityProtocol == SecurityProtocol.SASL_SSL) {
String configuredClientAuth = (String) configs.get(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG);
String listenerClientAuth = (String) config.originalsWithPrefix(listenerName.configPrefix(), true)
.get(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG);
// If `ssl.client.auth` is configured at the listener-level, we don't set an override and SslFactory
// uses the value from `configs`. If not, we propagate `sslClientAuthOverride=NONE` to SslFactory and
// it applies the override to the latest configs when it is configured or reconfigured. `Note that
// ssl.client.auth` cannot be dynamically altered.
if (listenerClientAuth == null) {
sslClientAuthOverride = SslClientAuth.NONE.name().toLowerCase(Locale.ROOT);
if (configuredClientAuth != null && !configuredClientAuth.equalsIgnoreCase(SslClientAuth.NONE.name())) {
log.warn("Broker configuration '{}' is applied only to SSL listeners. Listener-prefixed configuration can be used" +
" to enable SSL client authentication for SASL_SSL listeners. In future releases, broker-wide option without" +
" listener prefix may be applied to SASL_SSL listeners as well. All configuration options intended for specific" +
" listeners should be listener-prefixed.", BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG);
}
}
}
} else {
// Use server context for inter-broker client connections and client context for other clients
JaasContext jaasContext = contextType == JaasContext.Type.CLIENT ? JaasContext.loadClientContext(configs) :
JaasContext.loadServerContext(listenerName, clientSaslMechanism, configs);
jaasContexts = Collections.singletonMap(clientSaslMechanism, jaasContext);
}
channelBuilder = new SaslChannelBuilder(mode,
jaasContexts,
securityProtocol,
listenerName,
isInterBrokerListener,
clientSaslMechanism,
saslHandshakeRequestEnable,
credentialCache,
tokenCache,
sslClientAuthOverride,
time,
logContext,
apiVersionSupplier);
break;
case PLAINTEXT:
channelBuilder = new PlaintextChannelBuilder(listenerName);
break;
default:
throw new IllegalArgumentException("Unexpected securityProtocol " + securityProtocol);
}
channelBuilder.configure(configs);
return channelBuilder;
}
/**
* @return a mutable RecordingMap. The elements got from RecordingMap are marked as "used".
*/
@SuppressWarnings("unchecked")
static Map<String, Object> channelBuilderConfigs(final AbstractConfig config, final ListenerName listenerName) {
Map<String, Object> parsedConfigs;
if (listenerName == null)
parsedConfigs = (Map<String, Object>) config.values();
else
parsedConfigs = config.valuesWithPrefixOverride(listenerName.configPrefix());
config.originals().entrySet().stream()
.filter(e -> !parsedConfigs.containsKey(e.getKey())) // exclude already parsed configs
// exclude already parsed listener prefix configs
.filter(e -> !(listenerName != null && e.getKey().startsWith(listenerName.configPrefix()) &&
parsedConfigs.containsKey(e.getKey().substring(listenerName.configPrefix().length()))))
// exclude keys like `{mechanism}.some.prop` if "listener.name." prefix is present and key `some.prop` exists in parsed configs.
.filter(e -> !(listenerName != null && parsedConfigs.containsKey(e.getKey().substring(e.getKey().indexOf('.') + 1))))
.forEach(e -> parsedConfigs.put(e.getKey(), e.getValue()));
return parsedConfigs;
}
private static void requireNonNullMode(Mode mode, SecurityProtocol securityProtocol) {
if (mode == null)
throw new IllegalArgumentException("`mode` must be non-null if `securityProtocol` is `" + securityProtocol + "`");
}
public static KafkaPrincipalBuilder createPrincipalBuilder(Map<String, ?> configs,
KerberosShortNamer kerberosShortNamer,
SslPrincipalMapper sslPrincipalMapper) {
Class<?> principalBuilderClass = (Class<?>) configs.get(BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG);
final KafkaPrincipalBuilder builder;
if (principalBuilderClass == null || principalBuilderClass == DefaultKafkaPrincipalBuilder.class) {
builder = new DefaultKafkaPrincipalBuilder(kerberosShortNamer, sslPrincipalMapper);
} else if (KafkaPrincipalBuilder.class.isAssignableFrom(principalBuilderClass)) {
builder = (KafkaPrincipalBuilder) Utils.newInstance(principalBuilderClass);
} else {
throw new InvalidConfigurationException("Type " + principalBuilderClass.getName() + " is not " +
"an instance of " + KafkaPrincipalBuilder.class.getName());
}
if (builder instanceof Configurable)
((Configurable) builder).configure(configs);
return builder;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/ChannelMetadataRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import java.io.Closeable;
/**
* Metadata about a channel is provided in various places in the network stack. This
* registry is used as a common place to collect them.
*/
public interface ChannelMetadataRegistry extends Closeable {
/**
* Register information about the SSL cipher we are using.
* Re-registering the information will overwrite the previous one.
*/
void registerCipherInformation(CipherInformation cipherInformation);
/**
* Get the currently registered cipher information.
*/
CipherInformation cipherInformation();
/**
* Register information about the client client we are using.
* Depending on the clients, the ApiVersionsRequest could be received
* multiple times or not at all. Re-registering the information will
* overwrite the previous one.
*/
void registerClientInformation(ClientInformation clientInformation);
/**
* Get the currently registered client information.
*/
ClientInformation clientInformation();
/**
* Unregister everything that has been registered and close the registry.
*/
void close();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/ChannelState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.errors.AuthenticationException;
/**
* States for KafkaChannel:
* <ul>
* <li>NOT_CONNECTED: Connections are created in NOT_CONNECTED state. State is updated
* on {@link TransportLayer#finishConnect()} when socket connection is established.
* PLAINTEXT channels transition from NOT_CONNECTED to READY, others transition
* to AUTHENTICATE. Failures in NOT_CONNECTED state typically indicate that the
* remote endpoint is unavailable, which may be due to misconfigured endpoints.</li>
* <li>AUTHENTICATE: SSL, SASL_SSL and SASL_PLAINTEXT channels are in AUTHENTICATE state during SSL and
* SASL handshake. Disconnections in AUTHENTICATE state may indicate that authentication failed with
* SSL or SASL (broker version < 1.0.0). Channels transition to READY state when authentication completes
* successfully.</li>
* <li>READY: Connected, authenticated channels are in READY state. Channels may transition from
* READY to EXPIRED, FAILED_SEND or LOCAL_CLOSE.</li>
* <li>EXPIRED: Idle connections are moved to EXPIRED state on idle timeout and the channel is closed.</li>
* <li>FAILED_SEND: Channels transition from READY to FAILED_SEND state if the channel is closed due
* to a send failure.</li>
* <li>AUTHENTICATION_FAILED: Channels are moved to this state if the requested SASL mechanism is not
* enabled in the broker or when brokers with versions 1.0.0 and above provide an error response
* during SASL authentication. {@link #exception()} gives the reason provided by the broker for
* authentication failure.</li>
* <li>LOCAL_CLOSE: Channels are moved to LOCAL_CLOSE state if close() is initiated locally.</li>
* </ul>
* If the remote endpoint closes a channel, the state of the channel reflects the state the channel
* was in at the time of disconnection. This state may be useful to identify the reason for disconnection.
* <p>
* Typical transitions:
* <ul>
* <li>PLAINTEXT Good path: NOT_CONNECTED => READY => LOCAL_CLOSE</li>
* <li>SASL/SSL Good path: NOT_CONNECTED => AUTHENTICATE => READY => LOCAL_CLOSE</li>
* <li>Bootstrap server misconfiguration: NOT_CONNECTED, disconnected in NOT_CONNECTED state</li>
* <li>Security misconfiguration: NOT_CONNECTED => AUTHENTICATE => AUTHENTICATION_FAILED, disconnected in AUTHENTICATION_FAILED state</li>
* <li>Security misconfiguration with older broker: NOT_CONNECTED => AUTHENTICATE, disconnected in AUTHENTICATE state</li>
* </ul>
*/
public class ChannelState {
public enum State {
NOT_CONNECTED,
AUTHENTICATE,
READY,
EXPIRED,
FAILED_SEND,
AUTHENTICATION_FAILED,
LOCAL_CLOSE
}
// AUTHENTICATION_FAILED has a custom exception. For other states,
// create a reusable `ChannelState` instance per-state.
public static final ChannelState NOT_CONNECTED = new ChannelState(State.NOT_CONNECTED);
public static final ChannelState AUTHENTICATE = new ChannelState(State.AUTHENTICATE);
public static final ChannelState READY = new ChannelState(State.READY);
public static final ChannelState EXPIRED = new ChannelState(State.EXPIRED);
public static final ChannelState FAILED_SEND = new ChannelState(State.FAILED_SEND);
public static final ChannelState LOCAL_CLOSE = new ChannelState(State.LOCAL_CLOSE);
private final State state;
private final AuthenticationException exception;
private final String remoteAddress;
public ChannelState(State state) {
this(state, null, null);
}
public ChannelState(State state, String remoteAddress) {
this(state, null, remoteAddress);
}
public ChannelState(State state, AuthenticationException exception, String remoteAddress) {
this.state = state;
this.exception = exception;
this.remoteAddress = remoteAddress;
}
public State state() {
return state;
}
public AuthenticationException exception() {
return exception;
}
public String remoteAddress() {
return remoteAddress;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/CipherInformation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import java.util.Objects;
public class CipherInformation {
private final String cipher;
private final String protocol;
public CipherInformation(String cipher, String protocol) {
this.cipher = cipher == null || cipher.isEmpty() ? "unknown" : cipher;
this.protocol = protocol == null || protocol.isEmpty() ? "unknown" : protocol;
}
public String cipher() {
return cipher;
}
public String protocol() {
return protocol;
}
@Override
public String toString() {
return "CipherInformation(cipher=" + cipher +
", protocol=" + protocol + ")";
}
@Override
public int hashCode() {
return Objects.hash(cipher, protocol);
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (!(o instanceof CipherInformation)) {
return false;
}
CipherInformation other = (CipherInformation) o;
return other.cipher.equals(cipher) &&
other.protocol.equals(protocol);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/ClientInformation.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import java.util.Objects;
public class ClientInformation {
public static final String UNKNOWN_NAME_OR_VERSION = "unknown";
public static final ClientInformation EMPTY = new ClientInformation(UNKNOWN_NAME_OR_VERSION, UNKNOWN_NAME_OR_VERSION);
private final String softwareName;
private final String softwareVersion;
public ClientInformation(String softwareName, String softwareVersion) {
this.softwareName = softwareName.isEmpty() ? UNKNOWN_NAME_OR_VERSION : softwareName;
this.softwareVersion = softwareVersion.isEmpty() ? UNKNOWN_NAME_OR_VERSION : softwareVersion;
}
public String softwareName() {
return this.softwareName;
}
public String softwareVersion() {
return this.softwareVersion;
}
@Override
public String toString() {
return "ClientInformation(softwareName=" + softwareName +
", softwareVersion=" + softwareVersion + ")";
}
@Override
public int hashCode() {
return Objects.hash(softwareName, softwareVersion);
}
@Override
public boolean equals(Object o) {
if (o == null) {
return false;
}
if (!(o instanceof ClientInformation)) {
return false;
}
ClientInformation other = (ClientInformation) o;
return other.softwareName.equals(softwareName) &&
other.softwareVersion.equals(softwareVersion);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/DefaultChannelMetadataRegistry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
public class DefaultChannelMetadataRegistry implements ChannelMetadataRegistry {
private CipherInformation cipherInformation;
private ClientInformation clientInformation;
@Override
public void registerCipherInformation(final CipherInformation cipherInformation) {
if (this.cipherInformation != null) {
this.cipherInformation = cipherInformation;
}
}
@Override
public CipherInformation cipherInformation() {
return this.cipherInformation;
}
@Override
public void registerClientInformation(final ClientInformation clientInformation) {
this.clientInformation = clientInformation;
}
@Override
public ClientInformation clientInformation() {
return this.clientInformation;
}
@Override
public void close() {
this.cipherInformation = null;
this.clientInformation = null;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/DelayedResponseAuthenticationException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.errors.AuthenticationException;
public class DelayedResponseAuthenticationException extends AuthenticationException {
private static final long serialVersionUID = 1L;
public DelayedResponseAuthenticationException(Throwable cause) {
super(cause);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/InvalidReceiveException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.KafkaException;
public class InvalidReceiveException extends KafkaException {
public InvalidReceiveException(String message) {
super(message);
}
public InvalidReceiveException(String message, Throwable cause) {
super(message, cause);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/KafkaChannel.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.SslAuthenticationException;
import org.apache.kafka.common.memory.MemoryPool;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.auth.KafkaPrincipalSerde;
import org.apache.kafka.common.utils.Utils;
import java.io.IOException;
import java.net.InetAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.nio.channels.SelectionKey;
import java.nio.channels.SocketChannel;
import java.util.Optional;
import java.util.function.Supplier;
/**
* A Kafka connection either existing on a client (which could be a broker in an
* inter-broker scenario) and representing the channel to a remote broker or the
* reverse (existing on a broker and representing the channel to a remote
* client, which could be a broker in an inter-broker scenario).
* <p>
* Each instance has the following:
* <ul>
* <li>a unique ID identifying it in the {@code KafkaClient} instance via which
* the connection was made on the client-side or in the instance where it was
* accepted on the server-side</li>
* <li>a reference to the underlying {@link TransportLayer} to allow reading and
* writing</li>
* <li>an {@link Authenticator} that performs the authentication (or
* re-authentication, if that feature is enabled and it applies to this
* connection) by reading and writing directly from/to the same
* {@link TransportLayer}.</li>
* <li>a {@link MemoryPool} into which responses are read (typically the JVM
* heap for clients, though smaller pools can be used for brokers and for
* testing out-of-memory scenarios)</li>
* <li>a {@link NetworkReceive} representing the current incomplete/in-progress
* request (from the server-side perspective) or response (from the client-side
* perspective) being read, if applicable; or a non-null value that has had no
* data read into it yet or a null value if there is no in-progress
* request/response (either could be the case)</li>
* <li>a {@link Send} representing the current request (from the client-side
* perspective) or response (from the server-side perspective) that is either
* waiting to be sent or partially sent, if applicable, or null</li>
* <li>a {@link ChannelMuteState} to document if the channel has been muted due
* to memory pressure or other reasons</li>
* </ul>
*/
public class KafkaChannel implements AutoCloseable {
private static final long MIN_REAUTH_INTERVAL_ONE_SECOND_NANOS = 1000 * 1000 * 1000;
/**
* Mute States for KafkaChannel:
* <ul>
* <li> NOT_MUTED: Channel is not muted. This is the default state. </li>
* <li> MUTED: Channel is muted. Channel must be in this state to be unmuted. </li>
* <li> MUTED_AND_RESPONSE_PENDING: (SocketServer only) Channel is muted and SocketServer has not sent a response
* back to the client yet (acks != 0) or is currently waiting to receive a
* response from the API layer (acks == 0). </li>
* <li> MUTED_AND_THROTTLED: (SocketServer only) Channel is muted and throttling is in progress due to quota
* violation. </li>
* <li> MUTED_AND_THROTTLED_AND_RESPONSE_PENDING: (SocketServer only) Channel is muted, throttling is in progress,
* and a response is currently pending. </li>
* </ul>
*/
public enum ChannelMuteState {
NOT_MUTED,
MUTED,
MUTED_AND_RESPONSE_PENDING,
MUTED_AND_THROTTLED,
MUTED_AND_THROTTLED_AND_RESPONSE_PENDING
}
/** Socket server events that will change the mute state:
* <ul>
* <li> REQUEST_RECEIVED: A request has been received from the client. </li>
* <li> RESPONSE_SENT: A response has been sent out to the client (ack != 0) or SocketServer has heard back from
* the API layer (acks = 0) </li>
* <li> THROTTLE_STARTED: Throttling started due to quota violation. </li>
* <li> THROTTLE_ENDED: Throttling ended. </li>
* </ul>
*
* Valid transitions on each event are:
* <ul>
* <li> REQUEST_RECEIVED: MUTED => MUTED_AND_RESPONSE_PENDING </li>
* <li> RESPONSE_SENT: MUTED_AND_RESPONSE_PENDING => MUTED, MUTED_AND_THROTTLED_AND_RESPONSE_PENDING => MUTED_AND_THROTTLED </li>
* <li> THROTTLE_STARTED: MUTED_AND_RESPONSE_PENDING => MUTED_AND_THROTTLED_AND_RESPONSE_PENDING </li>
* <li> THROTTLE_ENDED: MUTED_AND_THROTTLED => MUTED, MUTED_AND_THROTTLED_AND_RESPONSE_PENDING => MUTED_AND_RESPONSE_PENDING </li>
* </ul>
*/
public enum ChannelMuteEvent {
REQUEST_RECEIVED,
RESPONSE_SENT,
THROTTLE_STARTED,
THROTTLE_ENDED
}
private final String id;
private final TransportLayer transportLayer;
private final Supplier<Authenticator> authenticatorCreator;
private Authenticator authenticator;
// Tracks accumulated network thread time. This is updated on the network thread.
// The values are read and reset after each response is sent.
private long networkThreadTimeNanos;
private final int maxReceiveSize;
private final MemoryPool memoryPool;
private final ChannelMetadataRegistry metadataRegistry;
private NetworkReceive receive;
private NetworkSend send;
// Track connection and mute state of channels to enable outstanding requests on channels to be
// processed after the channel is disconnected.
private boolean disconnected;
private ChannelMuteState muteState;
private ChannelState state;
private SocketAddress remoteAddress;
private int successfulAuthentications;
private boolean midWrite;
private long lastReauthenticationStartNanos;
public KafkaChannel(String id, TransportLayer transportLayer, Supplier<Authenticator> authenticatorCreator,
int maxReceiveSize, MemoryPool memoryPool, ChannelMetadataRegistry metadataRegistry) {
this.id = id;
this.transportLayer = transportLayer;
this.authenticatorCreator = authenticatorCreator;
this.authenticator = authenticatorCreator.get();
this.networkThreadTimeNanos = 0L;
this.maxReceiveSize = maxReceiveSize;
this.memoryPool = memoryPool;
this.metadataRegistry = metadataRegistry;
this.disconnected = false;
this.muteState = ChannelMuteState.NOT_MUTED;
this.state = ChannelState.NOT_CONNECTED;
}
public void close() throws IOException {
this.disconnected = true;
Utils.closeAll(transportLayer, authenticator, receive, metadataRegistry);
}
/**
* Returns the principal returned by `authenticator.principal()`.
*/
public KafkaPrincipal principal() {
return authenticator.principal();
}
public Optional<KafkaPrincipalSerde> principalSerde() {
return authenticator.principalSerde();
}
/**
* Does handshake of transportLayer and authentication using configured authenticator.
* For SSL with client authentication enabled, {@link TransportLayer#handshake()} performs
* authentication. For SASL, authentication is performed by {@link Authenticator#authenticate()}.
*/
public void prepare() throws AuthenticationException, IOException {
boolean authenticating = false;
try {
if (!transportLayer.ready())
transportLayer.handshake();
if (transportLayer.ready() && !authenticator.complete()) {
authenticating = true;
authenticator.authenticate();
}
} catch (AuthenticationException e) {
// Clients are notified of authentication exceptions to enable operations to be terminated
// without retries. Other errors are handled as network exceptions in Selector.
String remoteDesc = remoteAddress != null ? remoteAddress.toString() : null;
state = new ChannelState(ChannelState.State.AUTHENTICATION_FAILED, e, remoteDesc);
if (authenticating) {
delayCloseOnAuthenticationFailure();
throw new DelayedResponseAuthenticationException(e);
}
throw e;
}
if (ready()) {
++successfulAuthentications;
state = ChannelState.READY;
}
}
public void disconnect() {
disconnected = true;
if (state == ChannelState.NOT_CONNECTED && remoteAddress != null) {
//if we captured the remote address we can provide more information
state = new ChannelState(ChannelState.State.NOT_CONNECTED, remoteAddress.toString());
}
transportLayer.disconnect();
}
public void state(ChannelState state) {
this.state = state;
}
public ChannelState state() {
return this.state;
}
public boolean finishConnect() throws IOException {
//we need to grab remoteAddr before finishConnect() is called otherwise
//it becomes inaccessible if the connection was refused.
SocketChannel socketChannel = transportLayer.socketChannel();
if (socketChannel != null) {
remoteAddress = socketChannel.getRemoteAddress();
}
boolean connected = transportLayer.finishConnect();
if (connected) {
if (ready()) {
state = ChannelState.READY;
} else if (remoteAddress != null) {
state = new ChannelState(ChannelState.State.AUTHENTICATE, remoteAddress.toString());
} else {
state = ChannelState.AUTHENTICATE;
}
}
return connected;
}
public boolean isConnected() {
return transportLayer.isConnected();
}
public String id() {
return id;
}
public SelectionKey selectionKey() {
return transportLayer.selectionKey();
}
/**
* externally muting a channel should be done via selector to ensure proper state handling
*/
void mute() {
if (muteState == ChannelMuteState.NOT_MUTED) {
if (!disconnected) transportLayer.removeInterestOps(SelectionKey.OP_READ);
muteState = ChannelMuteState.MUTED;
}
}
/**
* Unmute the channel. The channel can be unmuted only if it is in the MUTED state. For other muted states
* (MUTED_AND_*), this is a no-op.
*
* @return Whether or not the channel is in the NOT_MUTED state after the call
*/
boolean maybeUnmute() {
if (muteState == ChannelMuteState.MUTED) {
if (!disconnected) transportLayer.addInterestOps(SelectionKey.OP_READ);
muteState = ChannelMuteState.NOT_MUTED;
}
return muteState == ChannelMuteState.NOT_MUTED;
}
// Handle the specified channel mute-related event and transition the mute state according to the state machine.
public void handleChannelMuteEvent(ChannelMuteEvent event) {
boolean stateChanged = false;
switch (event) {
case REQUEST_RECEIVED:
if (muteState == ChannelMuteState.MUTED) {
muteState = ChannelMuteState.MUTED_AND_RESPONSE_PENDING;
stateChanged = true;
}
break;
case RESPONSE_SENT:
if (muteState == ChannelMuteState.MUTED_AND_RESPONSE_PENDING) {
muteState = ChannelMuteState.MUTED;
stateChanged = true;
}
if (muteState == ChannelMuteState.MUTED_AND_THROTTLED_AND_RESPONSE_PENDING) {
muteState = ChannelMuteState.MUTED_AND_THROTTLED;
stateChanged = true;
}
break;
case THROTTLE_STARTED:
if (muteState == ChannelMuteState.MUTED_AND_RESPONSE_PENDING) {
muteState = ChannelMuteState.MUTED_AND_THROTTLED_AND_RESPONSE_PENDING;
stateChanged = true;
}
break;
case THROTTLE_ENDED:
if (muteState == ChannelMuteState.MUTED_AND_THROTTLED) {
muteState = ChannelMuteState.MUTED;
stateChanged = true;
}
if (muteState == ChannelMuteState.MUTED_AND_THROTTLED_AND_RESPONSE_PENDING) {
muteState = ChannelMuteState.MUTED_AND_RESPONSE_PENDING;
stateChanged = true;
}
}
if (!stateChanged) {
throw new IllegalStateException("Cannot transition from " + muteState.name() + " for " + event.name());
}
}
public ChannelMuteState muteState() {
return muteState;
}
/**
* Delay channel close on authentication failure. This will remove all read/write operations from the channel until
* {@link #completeCloseOnAuthenticationFailure()} is called to finish up the channel close.
*/
private void delayCloseOnAuthenticationFailure() {
transportLayer.removeInterestOps(SelectionKey.OP_WRITE);
}
/**
* Finish up any processing on {@link #prepare()} failure.
* @throws IOException
*/
void completeCloseOnAuthenticationFailure() throws IOException {
transportLayer.addInterestOps(SelectionKey.OP_WRITE);
// Invoke the underlying handler to finish up any processing on authentication failure
authenticator.handleAuthenticationFailure();
}
/**
* Returns true if this channel has been explicitly muted using {@link KafkaChannel#mute()}
*/
public boolean isMuted() {
return muteState != ChannelMuteState.NOT_MUTED;
}
public boolean isInMutableState() {
//some requests do not require memory, so if we do not know what the current (or future) request is
//(receive == null) we dont mute. we also dont mute if whatever memory required has already been
//successfully allocated (if none is required for the currently-being-read request
//receive.memoryAllocated() is expected to return true)
if (receive == null || receive.memoryAllocated())
return false;
//also cannot mute if underlying transport is not in the ready state
return transportLayer.ready();
}
public boolean ready() {
return transportLayer.ready() && authenticator.complete();
}
public boolean hasSend() {
return send != null;
}
/**
* Returns the address to which this channel's socket is connected or `null` if the socket has never been connected.
*
* If the socket was connected prior to being closed, then this method will continue to return the
* connected address after the socket is closed.
*/
public InetAddress socketAddress() {
return transportLayer.socketChannel().socket().getInetAddress();
}
public String socketDescription() {
Socket socket = transportLayer.socketChannel().socket();
if (socket.getInetAddress() == null)
return socket.getLocalAddress().toString();
return socket.getInetAddress().toString();
}
public void setSend(NetworkSend send) {
if (this.send != null)
throw new IllegalStateException("Attempt to begin a send operation with prior send operation still in progress, connection id is " + id);
this.send = send;
this.transportLayer.addInterestOps(SelectionKey.OP_WRITE);
}
public NetworkSend maybeCompleteSend() {
if (send != null && send.completed()) {
midWrite = false;
transportLayer.removeInterestOps(SelectionKey.OP_WRITE);
NetworkSend result = send;
send = null;
return result;
}
return null;
}
public long read() throws IOException {
if (receive == null) {
receive = new NetworkReceive(maxReceiveSize, id, memoryPool);
}
long bytesReceived = receive(this.receive);
if (this.receive.requiredMemoryAmountKnown() && !this.receive.memoryAllocated() && isInMutableState()) {
//pool must be out of memory, mute ourselves.
mute();
}
return bytesReceived;
}
public NetworkReceive currentReceive() {
return receive;
}
public NetworkReceive maybeCompleteReceive() {
if (receive != null && receive.complete()) {
receive.payload().rewind();
NetworkReceive result = receive;
receive = null;
return result;
}
return null;
}
public long write() throws IOException {
if (send == null)
return 0;
midWrite = true;
return send.writeTo(transportLayer);
}
/**
* Accumulates network thread time for this channel.
*/
public void addNetworkThreadTimeNanos(long nanos) {
networkThreadTimeNanos += nanos;
}
/**
* Returns accumulated network thread time for this channel and resets
* the value to zero.
*/
public long getAndResetNetworkThreadTimeNanos() {
long current = networkThreadTimeNanos;
networkThreadTimeNanos = 0;
return current;
}
private long receive(NetworkReceive receive) throws IOException {
try {
return receive.readFrom(transportLayer);
} catch (SslAuthenticationException e) {
// With TLSv1.3, post-handshake messages may throw SSLExceptions, which are
// handled as authentication failures
String remoteDesc = remoteAddress != null ? remoteAddress.toString() : null;
state = new ChannelState(ChannelState.State.AUTHENTICATION_FAILED, e, remoteDesc);
throw e;
}
}
/**
* @return true if underlying transport has bytes remaining to be read from any underlying intermediate buffers.
*/
public boolean hasBytesBuffered() {
return transportLayer.hasBytesBuffered();
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
KafkaChannel that = (KafkaChannel) o;
return id.equals(that.id);
}
@Override
public int hashCode() {
return id.hashCode();
}
@Override
public String toString() {
return super.toString() + " id=" + id;
}
/**
* Return the number of times this instance has successfully authenticated. This
* value can only exceed 1 when re-authentication is enabled and it has
* succeeded at least once.
*
* @return the number of times this instance has successfully authenticated
*/
public int successfulAuthentications() {
return successfulAuthentications;
}
/**
* If this is a server-side connection that has an expiration time and at least
* 1 second has passed since the prior re-authentication (if any) started then
* begin the process of re-authenticating the connection and return true,
* otherwise return false
*
* @param saslHandshakeNetworkReceive
* the mandatory {@link NetworkReceive} containing the
* {@code SaslHandshakeRequest} that has been received on the server
* and that initiates re-authentication.
* @param nowNanosSupplier
* {@code Supplier} of the current time. The value must be in
* nanoseconds as per {@code System.nanoTime()} and is therefore only
* useful when compared to such a value -- it's absolute value is
* meaningless.
*
* @return true if this is a server-side connection that has an expiration time
* and at least 1 second has passed since the prior re-authentication
* (if any) started to indicate that the re-authentication process has
* begun, otherwise false
* @throws AuthenticationException
* if re-authentication fails due to invalid credentials or other
* security configuration errors
* @throws IOException
* if read/write fails due to an I/O error
* @throws IllegalStateException
* if this channel is not "ready"
*/
public boolean maybeBeginServerReauthentication(NetworkReceive saslHandshakeNetworkReceive,
Supplier<Long> nowNanosSupplier) throws AuthenticationException, IOException {
if (!ready())
throw new IllegalStateException(
"KafkaChannel should be \"ready\" when processing SASL Handshake for potential re-authentication");
/*
* Re-authentication is disabled if there is no session expiration time, in
* which case the SASL handshake network receive will be processed normally,
* which results in a failure result being sent to the client. Also, no need to
* check if we are muted since we are processing a received packet when we invoke
* this.
*/
if (authenticator.serverSessionExpirationTimeNanos() == null)
return false;
/*
* We've delayed getting the time as long as possible in case we don't need it,
* but at this point we need it -- so get it now.
*/
long nowNanos = nowNanosSupplier.get();
/*
* Cannot re-authenticate more than once every second; an attempt to do so will
* result in the SASL handshake network receive being processed normally, which
* results in a failure result being sent to the client.
*/
if (lastReauthenticationStartNanos != 0
&& nowNanos - lastReauthenticationStartNanos < MIN_REAUTH_INTERVAL_ONE_SECOND_NANOS)
return false;
lastReauthenticationStartNanos = nowNanos;
swapAuthenticatorsAndBeginReauthentication(
new ReauthenticationContext(authenticator, saslHandshakeNetworkReceive, nowNanos));
return true;
}
/**
* If this is a client-side connection that is not muted, there is no
* in-progress write, and there is a session expiration time defined that has
* past then begin the process of re-authenticating the connection and return
* true, otherwise return false
*
* @param nowNanosSupplier
* {@code Supplier} of the current time. The value must be in
* nanoseconds as per {@code System.nanoTime()} and is therefore only
* useful when compared to such a value -- it's absolute value is
* meaningless.
*
* @return true if this is a client-side connection that is not muted, there is
* no in-progress write, and there is a session expiration time defined
* that has past to indicate that the re-authentication process has
* begun, otherwise false
* @throws AuthenticationException
* if re-authentication fails due to invalid credentials or other
* security configuration errors
* @throws IOException
* if read/write fails due to an I/O error
* @throws IllegalStateException
* if this channel is not "ready"
*/
public boolean maybeBeginClientReauthentication(Supplier<Long> nowNanosSupplier)
throws AuthenticationException, IOException {
if (!ready())
throw new IllegalStateException(
"KafkaChannel should always be \"ready\" when it is checked for possible re-authentication");
if (muteState != ChannelMuteState.NOT_MUTED || midWrite
|| authenticator.clientSessionReauthenticationTimeNanos() == null)
return false;
/*
* We've delayed getting the time as long as possible in case we don't need it,
* but at this point we need it -- so get it now.
*/
long nowNanos = nowNanosSupplier.get();
if (nowNanos < authenticator.clientSessionReauthenticationTimeNanos())
return false;
swapAuthenticatorsAndBeginReauthentication(new ReauthenticationContext(authenticator, receive, nowNanos));
receive = null;
return true;
}
/**
* Return the number of milliseconds that elapsed while re-authenticating this
* session from the perspective of this instance, if applicable, otherwise null.
* The server-side perspective will yield a lower value than the client-side
* perspective of the same re-authentication because the client-side observes an
* additional network round-trip.
*
* @return the number of milliseconds that elapsed while re-authenticating this
* session from the perspective of this instance, if applicable,
* otherwise null
*/
public Long reauthenticationLatencyMs() {
return authenticator.reauthenticationLatencyMs();
}
/**
* Return true if this is a server-side channel and the given time is past the
* session expiration time, if any, otherwise false
*
* @param nowNanos
* the current time in nanoseconds as per {@code System.nanoTime()}
* @return true if this is a server-side channel and the given time is past the
* session expiration time, if any, otherwise false
*/
public boolean serverAuthenticationSessionExpired(long nowNanos) {
Long serverSessionExpirationTimeNanos = authenticator.serverSessionExpirationTimeNanos();
return serverSessionExpirationTimeNanos != null && nowNanos - serverSessionExpirationTimeNanos > 0;
}
/**
* Return the (always non-null but possibly empty) client-side
* {@link NetworkReceive} response that arrived during re-authentication but
* is unrelated to re-authentication. This corresponds to a request sent
* prior to the beginning of re-authentication; the request was made when the
* channel was successfully authenticated, and the response arrived during the
* re-authentication process.
*
* @return client-side {@link NetworkReceive} response that arrived during
* re-authentication that is unrelated to re-authentication. This may
* be empty.
*/
public Optional<NetworkReceive> pollResponseReceivedDuringReauthentication() {
return authenticator.pollResponseReceivedDuringReauthentication();
}
/**
* Return true if this is a server-side channel and the connected client has
* indicated that it supports re-authentication, otherwise false
*
* @return true if this is a server-side channel and the connected client has
* indicated that it supports re-authentication, otherwise false
*/
boolean connectedClientSupportsReauthentication() {
return authenticator.connectedClientSupportsReauthentication();
}
private void swapAuthenticatorsAndBeginReauthentication(ReauthenticationContext reauthenticationContext)
throws IOException {
// it is up to the new authenticator to close the old one
// replace with a new one and begin the process of re-authenticating
authenticator = authenticatorCreator.get();
authenticator.reauthenticate(reauthenticationContext);
}
public ChannelMetadataRegistry channelMetadataRegistry() {
return metadataRegistry;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/ListenerName.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import java.util.Locale;
import java.util.Objects;
public final class ListenerName {
private static final String CONFIG_STATIC_PREFIX = "listener.name";
/**
* Create an instance with the security protocol name as the value.
*/
public static ListenerName forSecurityProtocol(SecurityProtocol securityProtocol) {
return new ListenerName(securityProtocol.name);
}
/**
* Create an instance with the provided value converted to uppercase.
*/
public static ListenerName normalised(String value) {
return new ListenerName(value.toUpperCase(Locale.ROOT));
}
private final String value;
public ListenerName(String value) {
Objects.requireNonNull(value, "value should not be null");
this.value = value;
}
public String value() {
return value;
}
@Override
public boolean equals(Object o) {
if (!(o instanceof ListenerName))
return false;
ListenerName that = (ListenerName) o;
return value.equals(that.value);
}
@Override
public int hashCode() {
return value.hashCode();
}
@Override
public String toString() {
return "ListenerName(" + value + ")";
}
public String configPrefix() {
return CONFIG_STATIC_PREFIX + "." + value.toLowerCase(Locale.ROOT) + ".";
}
public String saslMechanismConfigPrefix(String saslMechanism) {
return configPrefix() + saslMechanismPrefix(saslMechanism);
}
public static String saslMechanismPrefix(String saslMechanism) {
return saslMechanism.toLowerCase(Locale.ROOT) + ".";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/ListenerReconfigurable.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.Reconfigurable;
/**
* Interface for reconfigurable entities associated with a listener.
*/
public interface ListenerReconfigurable extends Reconfigurable {
/**
* Returns the listener name associated with this reconfigurable. Listener-specific
* configs corresponding to this listener name are provided for reconfiguration.
*/
ListenerName listenerName();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/Mode.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
/**
* Connection mode for SSL and SASL connections.
*/
public enum Mode { CLIENT, SERVER }
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/NetworkReceive.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.memory.MemoryPool;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.EOFException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ScatteringByteChannel;
/**
* A size delimited Receive that consists of a 4 byte network-ordered size N followed by N bytes of content
*/
public class NetworkReceive implements Receive {
public static final String UNKNOWN_SOURCE = "";
public static final int UNLIMITED = -1;
private static final Logger log = LoggerFactory.getLogger(NetworkReceive.class);
private static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0);
private final String source;
private final ByteBuffer size;
private final int maxSize;
private final MemoryPool memoryPool;
private int requestedBufferSize = -1;
private ByteBuffer buffer;
public NetworkReceive(String source, ByteBuffer buffer) {
this(UNLIMITED, source);
this.buffer = buffer;
}
public NetworkReceive(String source) {
this(UNLIMITED, source);
}
public NetworkReceive(int maxSize, String source) {
this(maxSize, source, MemoryPool.NONE);
}
public NetworkReceive(int maxSize, String source, MemoryPool memoryPool) {
this.source = source;
this.size = ByteBuffer.allocate(4);
this.buffer = null;
this.maxSize = maxSize;
this.memoryPool = memoryPool;
}
public NetworkReceive() {
this(UNKNOWN_SOURCE);
}
@Override
public String source() {
return source;
}
@Override
public boolean complete() {
return !size.hasRemaining() && buffer != null && !buffer.hasRemaining();
}
public long readFrom(ScatteringByteChannel channel) throws IOException {
int read = 0;
if (size.hasRemaining()) {
int bytesRead = channel.read(size);
if (bytesRead < 0)
throw new EOFException();
read += bytesRead;
if (!size.hasRemaining()) {
size.rewind();
int receiveSize = size.getInt();
if (receiveSize < 0)
throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + ")");
if (maxSize != UNLIMITED && receiveSize > maxSize)
throw new InvalidReceiveException("Invalid receive (size = " + receiveSize + " larger than " + maxSize + ")");
requestedBufferSize = receiveSize; //may be 0 for some payloads (SASL)
if (receiveSize == 0) {
buffer = EMPTY_BUFFER;
}
}
}
if (buffer == null && requestedBufferSize != -1) { //we know the size we want but havent been able to allocate it yet
buffer = memoryPool.tryAllocate(requestedBufferSize);
if (buffer == null)
log.trace("Broker low on memory - could not allocate buffer of size {} for source {}", requestedBufferSize, source);
}
if (buffer != null) {
int bytesRead = channel.read(buffer);
if (bytesRead < 0)
throw new EOFException();
read += bytesRead;
}
return read;
}
@Override
public boolean requiredMemoryAmountKnown() {
return requestedBufferSize != -1;
}
@Override
public boolean memoryAllocated() {
return buffer != null;
}
@Override
public void close() throws IOException {
if (buffer != null && buffer != EMPTY_BUFFER) {
memoryPool.release(buffer);
buffer = null;
}
}
public ByteBuffer payload() {
return this.buffer;
}
public int bytesRead() {
if (buffer == null)
return size.position();
return buffer.position() + size.position();
}
/**
* Returns the total size of the receive including payload and size buffer
* for use in metrics. This is consistent with {@link NetworkSend#size()}
*/
public int size() {
return payload().limit() + size.limit();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/NetworkSend.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import java.io.IOException;
public class NetworkSend implements Send {
private final String destinationId;
private final Send send;
public NetworkSend(String destinationId, Send send) {
this.destinationId = destinationId;
this.send = send;
}
public String destinationId() {
return destinationId;
}
public Send send() {
return send;
}
@Override
public boolean completed() {
return send.completed();
}
@Override
public long writeTo(TransferableChannel channel) throws IOException {
return send.writeTo(channel);
}
@Override
public long size() {
return send.size();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/PlaintextChannelBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.memory.MemoryPool;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.auth.KafkaPrincipalBuilder;
import org.apache.kafka.common.security.auth.KafkaPrincipalSerde;
import org.apache.kafka.common.security.auth.PlaintextAuthenticationContext;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetAddress;
import java.nio.channels.SelectionKey;
import java.util.Map;
import java.util.Optional;
import java.util.function.Supplier;
public class PlaintextChannelBuilder implements ChannelBuilder {
private static final Logger log = LoggerFactory.getLogger(PlaintextChannelBuilder.class);
private final ListenerName listenerName;
private Map<String, ?> configs;
/**
* Constructs a plaintext channel builder. ListenerName is non-null whenever
* it's instantiated in the broker and null otherwise.
*/
public PlaintextChannelBuilder(ListenerName listenerName) {
this.listenerName = listenerName;
}
public void configure(Map<String, ?> configs) throws KafkaException {
this.configs = configs;
}
@Override
public KafkaChannel buildChannel(String id, SelectionKey key, int maxReceiveSize,
MemoryPool memoryPool, ChannelMetadataRegistry metadataRegistry) throws KafkaException {
try {
PlaintextTransportLayer transportLayer = buildTransportLayer(key);
Supplier<Authenticator> authenticatorCreator = () -> new PlaintextAuthenticator(configs, transportLayer, listenerName);
return buildChannel(id, transportLayer, authenticatorCreator, maxReceiveSize,
memoryPool != null ? memoryPool : MemoryPool.NONE, metadataRegistry);
} catch (Exception e) {
throw new KafkaException(e);
}
}
// visible for testing
KafkaChannel buildChannel(String id, TransportLayer transportLayer, Supplier<Authenticator> authenticatorCreator,
int maxReceiveSize, MemoryPool memoryPool, ChannelMetadataRegistry metadataRegistry) {
return new KafkaChannel(id, transportLayer, authenticatorCreator, maxReceiveSize, memoryPool, metadataRegistry);
}
protected PlaintextTransportLayer buildTransportLayer(SelectionKey key) throws IOException {
return new PlaintextTransportLayer(key);
}
@Override
public void close() {}
private static class PlaintextAuthenticator implements Authenticator {
private final PlaintextTransportLayer transportLayer;
private final KafkaPrincipalBuilder principalBuilder;
private final ListenerName listenerName;
private PlaintextAuthenticator(Map<String, ?> configs, PlaintextTransportLayer transportLayer, ListenerName listenerName) {
this.transportLayer = transportLayer;
this.principalBuilder = ChannelBuilders.createPrincipalBuilder(configs, null, null);
this.listenerName = listenerName;
}
@Override
public void authenticate() {}
@Override
public KafkaPrincipal principal() {
InetAddress clientAddress = transportLayer.socketChannel().socket().getInetAddress();
// listenerName should only be null in Client mode where principal() should not be called
if (listenerName == null)
throw new IllegalStateException("Unexpected call to principal() when listenerName is null");
return principalBuilder.build(new PlaintextAuthenticationContext(clientAddress, listenerName.value()));
}
@Override
public Optional<KafkaPrincipalSerde> principalSerde() {
return principalBuilder instanceof KafkaPrincipalSerde ? Optional.of((KafkaPrincipalSerde) principalBuilder) : Optional.empty();
}
@Override
public boolean complete() {
return true;
}
@Override
public void close() {
if (principalBuilder instanceof Closeable)
Utils.closeQuietly((Closeable) principalBuilder, "principal builder");
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/network/PlaintextTransportLayer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.network;
/*
* Transport layer for PLAINTEXT communication
*/
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.SocketChannel;
import java.nio.channels.SelectionKey;
import java.security.Principal;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
public class PlaintextTransportLayer implements TransportLayer {
private final SelectionKey key;
private final SocketChannel socketChannel;
private final Principal principal = KafkaPrincipal.ANONYMOUS;
public PlaintextTransportLayer(SelectionKey key) throws IOException {
this.key = key;
this.socketChannel = (SocketChannel) key.channel();
}
@Override
public boolean ready() {
return true;
}
@Override
public boolean finishConnect() throws IOException {
boolean connected = socketChannel.finishConnect();
if (connected)
key.interestOps(key.interestOps() & ~SelectionKey.OP_CONNECT | SelectionKey.OP_READ);
return connected;
}
@Override
public void disconnect() {
key.cancel();
}
@Override
public SocketChannel socketChannel() {
return socketChannel;
}
@Override
public SelectionKey selectionKey() {
return key;
}
@Override
public boolean isOpen() {
return socketChannel.isOpen();
}
@Override
public boolean isConnected() {
return socketChannel.isConnected();
}
@Override
public void close() throws IOException {
socketChannel.socket().close();
socketChannel.close();
}
/**
* Performs SSL handshake hence is a no-op for the non-secure
* implementation
*/
@Override
public void handshake() {}
/**
* Reads a sequence of bytes from this channel into the given buffer.
*
* @param dst The buffer into which bytes are to be transferred
* @return The number of bytes read, possible zero or -1 if the channel has reached end-of-stream
* @throws IOException if some other I/O error occurs
*/
@Override
public int read(ByteBuffer dst) throws IOException {
return socketChannel.read(dst);
}
/**
* Reads a sequence of bytes from this channel into the given buffers.
*
* @param dsts - The buffers into which bytes are to be transferred.
* @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream.
* @throws IOException if some other I/O error occurs
*/
@Override
public long read(ByteBuffer[] dsts) throws IOException {
return socketChannel.read(dsts);
}
/**
* Reads a sequence of bytes from this channel into a subsequence of the given buffers.
* @param dsts - The buffers into which bytes are to be transferred
* @param offset - The offset within the buffer array of the first buffer into which bytes are to be transferred; must be non-negative and no larger than dsts.length.
* @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than dsts.length - offset
* @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream.
* @throws IOException if some other I/O error occurs
*/
@Override
public long read(ByteBuffer[] dsts, int offset, int length) throws IOException {
return socketChannel.read(dsts, offset, length);
}
/**
* Writes a sequence of bytes to this channel from the given buffer.
*
* @param src The buffer from which bytes are to be retrieved
* @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream
* @throws IOException If some other I/O error occurs
*/
@Override
public int write(ByteBuffer src) throws IOException {
return socketChannel.write(src);
}
/**
* Writes a sequence of bytes to this channel from the given buffer.
*
* @param srcs The buffer from which bytes are to be retrieved
* @return The number of bytes read, possibly zero, or -1 if the channel has reached end-of-stream
* @throws IOException If some other I/O error occurs
*/
@Override
public long write(ByteBuffer[] srcs) throws IOException {
return socketChannel.write(srcs);
}
/**
* Writes a sequence of bytes to this channel from the subsequence of the given buffers.
*
* @param srcs The buffers from which bytes are to be retrieved
* @param offset The offset within the buffer array of the first buffer from which bytes are to be retrieved; must be non-negative and no larger than srcs.length.
* @param length - The maximum number of buffers to be accessed; must be non-negative and no larger than srcs.length - offset.
* @return returns no.of bytes written , possibly zero.
* @throws IOException If some other I/O error occurs
*/
@Override
public long write(ByteBuffer[] srcs, int offset, int length) throws IOException {
return socketChannel.write(srcs, offset, length);
}
/**
* always returns false as there will be not be any
* pending writes since we directly write to socketChannel.
*/
@Override
public boolean hasPendingWrites() {
return false;
}
/**
* Returns ANONYMOUS as Principal.
*/
@Override
public Principal peerPrincipal() {
return principal;
}
/**
* Adds the interestOps to selectionKey.
*/
@Override
public void addInterestOps(int ops) {
key.interestOps(key.interestOps() | ops);
}
/**
* Removes the interestOps from selectionKey.
*/
@Override
public void removeInterestOps(int ops) {
key.interestOps(key.interestOps() & ~ops);
}
@Override
public boolean isMute() {
return key.isValid() && (key.interestOps() & SelectionKey.OP_READ) == 0;
}
@Override
public boolean hasBytesBuffered() {
return false;
}
@Override
public long transferFrom(FileChannel fileChannel, long position, long count) throws IOException {
return fileChannel.transferTo(position, count, socketChannel);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.