index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/EndQuorumEpochResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.EndQuorumEpochResponseData.*; public class EndQuorumEpochResponseDataJsonConverter { public static EndQuorumEpochResponseData read(JsonNode _node, short _version) { EndQuorumEpochResponseData _object = new EndQuorumEpochResponseData(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("EndQuorumEpochResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "EndQuorumEpochResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("EndQuorumEpochResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("EndQuorumEpochResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<TopicData> _collection = new ArrayList<TopicData>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(TopicDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(EndQuorumEpochResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (TopicData _element : _object.topics) { _topicsArray.add(TopicDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(EndQuorumEpochResponseData _object, short _version) { return write(_object, _version, true); } public static class PartitionDataJsonConverter { public static PartitionData read(JsonNode _node, short _version) { PartitionData _object = new PartitionData(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "PartitionData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "PartitionData"); } JsonNode _leaderIdNode = _node.get("leaderId"); if (_leaderIdNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'leaderId', which is mandatory in version " + _version); } else { _object.leaderId = MessageUtil.jsonNodeToInt(_leaderIdNode, "PartitionData"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "PartitionData"); } return _object; } public static JsonNode write(PartitionData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("leaderId", new IntNode(_object.leaderId)); _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); return _node; } public static JsonNode write(PartitionData _object, short _version) { return write(_object, _version, true); } } public static class TopicDataJsonConverter { public static TopicData read(JsonNode _node, short _version) { TopicData _object = new TopicData(); JsonNode _topicNameNode = _node.get("topicName"); if (_topicNameNode == null) { throw new RuntimeException("TopicData: unable to locate field 'topicName', which is mandatory in version " + _version); } else { if (!_topicNameNode.isTextual()) { throw new RuntimeException("TopicData expected a string type, but got " + _node.getNodeType()); } _object.topicName = _topicNameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("TopicData: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("TopicData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionData> _collection = new ArrayList<PartitionData>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(PartitionDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(TopicData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("topicName", new TextNode(_object.topicName)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionData _element : _object.partitions) { _partitionsArray.add(PartitionDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(TopicData _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/EndTxnRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class EndTxnRequestData implements ApiMessage { String transactionalId; long producerId; short producerEpoch; boolean committed; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("transactional_id", Type.STRING, "The ID of the transaction to end."), new Field("producer_id", Type.INT64, "The producer ID."), new Field("producer_epoch", Type.INT16, "The current epoch associated with the producer."), new Field("committed", Type.BOOLEAN, "True if the transaction was committed, false if it was aborted.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("transactional_id", Type.COMPACT_STRING, "The ID of the transaction to end."), new Field("producer_id", Type.INT64, "The producer ID."), new Field("producer_epoch", Type.INT16, "The current epoch associated with the producer."), new Field("committed", Type.BOOLEAN, "True if the transaction was committed, false if it was aborted."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public EndTxnRequestData(Readable _readable, short _version) { read(_readable, _version); } public EndTxnRequestData() { this.transactionalId = ""; this.producerId = 0L; this.producerEpoch = (short) 0; this.committed = false; } @Override public short apiKey() { return 26; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field transactionalId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field transactionalId had invalid length " + length); } else { this.transactionalId = _readable.readString(length); } } this.producerId = _readable.readLong(); this.producerEpoch = _readable.readShort(); this.committed = _readable.readByte() != 0; this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(transactionalId); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeLong(producerId); _writable.writeShort(producerEpoch); _writable.writeByte(committed ? (byte) 1 : (byte) 0); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'transactionalId' field is too long to be serialized"); } _cache.cacheSerializedValue(transactionalId, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(8); _size.addBytes(2); _size.addBytes(1); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof EndTxnRequestData)) return false; EndTxnRequestData other = (EndTxnRequestData) obj; if (this.transactionalId == null) { if (other.transactionalId != null) return false; } else { if (!this.transactionalId.equals(other.transactionalId)) return false; } if (producerId != other.producerId) return false; if (producerEpoch != other.producerEpoch) return false; if (committed != other.committed) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode()); hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId); hashCode = 31 * hashCode + producerEpoch; hashCode = 31 * hashCode + (committed ? 1231 : 1237); return hashCode; } @Override public EndTxnRequestData duplicate() { EndTxnRequestData _duplicate = new EndTxnRequestData(); _duplicate.transactionalId = transactionalId; _duplicate.producerId = producerId; _duplicate.producerEpoch = producerEpoch; _duplicate.committed = committed; return _duplicate; } @Override public String toString() { return "EndTxnRequestData(" + "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'") + ", producerId=" + producerId + ", producerEpoch=" + producerEpoch + ", committed=" + (committed ? "true" : "false") + ")"; } public String transactionalId() { return this.transactionalId; } public long producerId() { return this.producerId; } public short producerEpoch() { return this.producerEpoch; } public boolean committed() { return this.committed; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EndTxnRequestData setTransactionalId(String v) { this.transactionalId = v; return this; } public EndTxnRequestData setProducerId(long v) { this.producerId = v; return this; } public EndTxnRequestData setProducerEpoch(short v) { this.producerEpoch = v; return this; } public EndTxnRequestData setCommitted(boolean v) { this.committed = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/EndTxnRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.EndTxnRequestData.*; public class EndTxnRequestDataJsonConverter { public static EndTxnRequestData read(JsonNode _node, short _version) { EndTxnRequestData _object = new EndTxnRequestData(); JsonNode _transactionalIdNode = _node.get("transactionalId"); if (_transactionalIdNode == null) { throw new RuntimeException("EndTxnRequestData: unable to locate field 'transactionalId', which is mandatory in version " + _version); } else { if (!_transactionalIdNode.isTextual()) { throw new RuntimeException("EndTxnRequestData expected a string type, but got " + _node.getNodeType()); } _object.transactionalId = _transactionalIdNode.asText(); } JsonNode _producerIdNode = _node.get("producerId"); if (_producerIdNode == null) { throw new RuntimeException("EndTxnRequestData: unable to locate field 'producerId', which is mandatory in version " + _version); } else { _object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "EndTxnRequestData"); } JsonNode _producerEpochNode = _node.get("producerEpoch"); if (_producerEpochNode == null) { throw new RuntimeException("EndTxnRequestData: unable to locate field 'producerEpoch', which is mandatory in version " + _version); } else { _object.producerEpoch = MessageUtil.jsonNodeToShort(_producerEpochNode, "EndTxnRequestData"); } JsonNode _committedNode = _node.get("committed"); if (_committedNode == null) { throw new RuntimeException("EndTxnRequestData: unable to locate field 'committed', which is mandatory in version " + _version); } else { if (!_committedNode.isBoolean()) { throw new RuntimeException("EndTxnRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.committed = _committedNode.asBoolean(); } return _object; } public static JsonNode write(EndTxnRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("transactionalId", new TextNode(_object.transactionalId)); _node.set("producerId", new LongNode(_object.producerId)); _node.set("producerEpoch", new ShortNode(_object.producerEpoch)); _node.set("committed", BooleanNode.valueOf(_object.committed)); return _node; } public static JsonNode write(EndTxnRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/EndTxnResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class EndTxnResponseData implements ApiMessage { int throttleTimeMs; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public EndTxnResponseData(Readable _readable, short _version) { read(_readable, _version); } public EndTxnResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; } @Override public short apiKey() { return 26; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof EndTxnResponseData)) return false; EndTxnResponseData other = (EndTxnResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public EndTxnResponseData duplicate() { EndTxnResponseData _duplicate = new EndTxnResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "EndTxnResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EndTxnResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public EndTxnResponseData setErrorCode(short v) { this.errorCode = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/EndTxnResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.EndTxnResponseData.*; public class EndTxnResponseDataJsonConverter { public static EndTxnResponseData read(JsonNode _node, short _version) { EndTxnResponseData _object = new EndTxnResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("EndTxnResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "EndTxnResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("EndTxnResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "EndTxnResponseData"); } return _object; } public static JsonNode write(EndTxnResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(EndTxnResponseData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/EnvelopeRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Objects; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Bytes; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class EnvelopeRequestData implements ApiMessage { ByteBuffer requestData; byte[] requestPrincipal; byte[] clientHostAddress; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("request_data", Type.COMPACT_BYTES, "The embedded request header and data."), new Field("request_principal", Type.COMPACT_NULLABLE_BYTES, "Value of the initial client principal when the request is redirected by a broker."), new Field("client_host_address", Type.COMPACT_BYTES, "The original client's address in bytes."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public EnvelopeRequestData(Readable _readable, short _version) { read(_readable, _version); } public EnvelopeRequestData() { this.requestData = ByteUtils.EMPTY_BUF; this.requestPrincipal = Bytes.EMPTY; this.clientHostAddress = Bytes.EMPTY; } @Override public short apiKey() { return 58; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field requestData was serialized as null"); } else { this.requestData = _readable.readByteBuffer(length); } } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.requestPrincipal = null; } else { byte[] newBytes = _readable.readArray(length); this.requestPrincipal = newBytes; } } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field clientHostAddress was serialized as null"); } else { byte[] newBytes = _readable.readArray(length); this.clientHostAddress = newBytes; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeUnsignedVarint(requestData.remaining() + 1); _writable.writeByteBuffer(requestData); if (requestPrincipal == null) { _writable.writeUnsignedVarint(0); } else { _writable.writeUnsignedVarint(requestPrincipal.length + 1); _writable.writeByteArray(requestPrincipal); } _writable.writeUnsignedVarint(clientHostAddress.length + 1); _writable.writeByteArray(clientHostAddress); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { _size.addZeroCopyBytes(requestData.remaining()); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(requestData.remaining() + 1)); } if (requestPrincipal == null) { _size.addBytes(1); } else { _size.addBytes(requestPrincipal.length); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(requestPrincipal.length + 1)); } { _size.addBytes(clientHostAddress.length); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(clientHostAddress.length + 1)); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof EnvelopeRequestData)) return false; EnvelopeRequestData other = (EnvelopeRequestData) obj; if (!Objects.equals(this.requestData, other.requestData)) return false; if (!Arrays.equals(this.requestPrincipal, other.requestPrincipal)) return false; if (!Arrays.equals(this.clientHostAddress, other.clientHostAddress)) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + Objects.hashCode(requestData); hashCode = 31 * hashCode + Arrays.hashCode(requestPrincipal); hashCode = 31 * hashCode + Arrays.hashCode(clientHostAddress); return hashCode; } @Override public EnvelopeRequestData duplicate() { EnvelopeRequestData _duplicate = new EnvelopeRequestData(); _duplicate.requestData = requestData.duplicate(); if (requestPrincipal == null) { _duplicate.requestPrincipal = null; } else { _duplicate.requestPrincipal = MessageUtil.duplicate(requestPrincipal); } _duplicate.clientHostAddress = MessageUtil.duplicate(clientHostAddress); return _duplicate; } @Override public String toString() { return "EnvelopeRequestData(" + "requestData=" + requestData + ", requestPrincipal=" + Arrays.toString(requestPrincipal) + ", clientHostAddress=" + Arrays.toString(clientHostAddress) + ")"; } public ByteBuffer requestData() { return this.requestData; } public byte[] requestPrincipal() { return this.requestPrincipal; } public byte[] clientHostAddress() { return this.clientHostAddress; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EnvelopeRequestData setRequestData(ByteBuffer v) { this.requestData = v; return this; } public EnvelopeRequestData setRequestPrincipal(byte[] v) { this.requestPrincipal = v; return this; } public EnvelopeRequestData setClientHostAddress(byte[] v) { this.clientHostAddress = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/EnvelopeRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.nio.ByteBuffer; import java.util.Arrays; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.EnvelopeRequestData.*; public class EnvelopeRequestDataJsonConverter { public static EnvelopeRequestData read(JsonNode _node, short _version) { EnvelopeRequestData _object = new EnvelopeRequestData(); JsonNode _requestDataNode = _node.get("requestData"); if (_requestDataNode == null) { throw new RuntimeException("EnvelopeRequestData: unable to locate field 'requestData', which is mandatory in version " + _version); } else { _object.requestData = ByteBuffer.wrap(MessageUtil.jsonNodeToBinary(_requestDataNode, "EnvelopeRequestData")); } JsonNode _requestPrincipalNode = _node.get("requestPrincipal"); if (_requestPrincipalNode == null) { throw new RuntimeException("EnvelopeRequestData: unable to locate field 'requestPrincipal', which is mandatory in version " + _version); } else { if (_requestPrincipalNode.isNull()) { _object.requestPrincipal = null; } else { _object.requestPrincipal = MessageUtil.jsonNodeToBinary(_requestPrincipalNode, "EnvelopeRequestData"); } } JsonNode _clientHostAddressNode = _node.get("clientHostAddress"); if (_clientHostAddressNode == null) { throw new RuntimeException("EnvelopeRequestData: unable to locate field 'clientHostAddress', which is mandatory in version " + _version); } else { _object.clientHostAddress = MessageUtil.jsonNodeToBinary(_clientHostAddressNode, "EnvelopeRequestData"); } return _object; } public static JsonNode write(EnvelopeRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("requestData", new BinaryNode(MessageUtil.byteBufferToArray(_object.requestData))); if (_object.requestPrincipal == null) { _node.set("requestPrincipal", NullNode.instance); } else { _node.set("requestPrincipal", new BinaryNode(Arrays.copyOf(_object.requestPrincipal, _object.requestPrincipal.length))); } _node.set("clientHostAddress", new BinaryNode(Arrays.copyOf(_object.clientHostAddress, _object.clientHostAddress.length))); return _node; } public static JsonNode write(EnvelopeRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/EnvelopeResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.Objects; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class EnvelopeResponseData implements ApiMessage { ByteBuffer responseData; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("response_data", Type.COMPACT_NULLABLE_BYTES, "The embedded response header and data."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public EnvelopeResponseData(Readable _readable, short _version) { read(_readable, _version); } public EnvelopeResponseData() { this.responseData = null; this.errorCode = (short) 0; } @Override public short apiKey() { return 58; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.responseData = null; } else { this.responseData = _readable.readByteBuffer(length); } } this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (responseData == null) { _writable.writeUnsignedVarint(0); } else { _writable.writeUnsignedVarint(responseData.remaining() + 1); _writable.writeByteBuffer(responseData); } _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (responseData == null) { _size.addBytes(1); } else { _size.addZeroCopyBytes(responseData.remaining()); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(responseData.remaining() + 1)); } _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof EnvelopeResponseData)) return false; EnvelopeResponseData other = (EnvelopeResponseData) obj; if (!Objects.equals(this.responseData, other.responseData)) return false; if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + Objects.hashCode(responseData); hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public EnvelopeResponseData duplicate() { EnvelopeResponseData _duplicate = new EnvelopeResponseData(); if (responseData == null) { _duplicate.responseData = null; } else { _duplicate.responseData = responseData.duplicate(); } _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "EnvelopeResponseData(" + "responseData=" + responseData + ", errorCode=" + errorCode + ")"; } public ByteBuffer responseData() { return this.responseData; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EnvelopeResponseData setResponseData(ByteBuffer v) { this.responseData = v; return this; } public EnvelopeResponseData setErrorCode(short v) { this.errorCode = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/EnvelopeResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import java.nio.ByteBuffer; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.EnvelopeResponseData.*; public class EnvelopeResponseDataJsonConverter { public static EnvelopeResponseData read(JsonNode _node, short _version) { EnvelopeResponseData _object = new EnvelopeResponseData(); JsonNode _responseDataNode = _node.get("responseData"); if (_responseDataNode == null) { throw new RuntimeException("EnvelopeResponseData: unable to locate field 'responseData', which is mandatory in version " + _version); } else { if (_responseDataNode.isNull()) { _object.responseData = null; } else { _object.responseData = ByteBuffer.wrap(MessageUtil.jsonNodeToBinary(_responseDataNode, "EnvelopeResponseData")); } } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("EnvelopeResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "EnvelopeResponseData"); } return _object; } public static JsonNode write(EnvelopeResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_object.responseData == null) { _node.set("responseData", NullNode.instance); } else { _node.set("responseData", new BinaryNode(MessageUtil.byteBufferToArray(_object.responseData))); } _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(EnvelopeResponseData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ExpireDelegationTokenRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Bytes; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ExpireDelegationTokenRequestData implements ApiMessage { byte[] hmac; long expiryTimePeriodMs; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("hmac", Type.BYTES, "The HMAC of the delegation token to be expired."), new Field("expiry_time_period_ms", Type.INT64, "The expiry time period in milliseconds.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("hmac", Type.COMPACT_BYTES, "The HMAC of the delegation token to be expired."), new Field("expiry_time_period_ms", Type.INT64, "The expiry time period in milliseconds."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public ExpireDelegationTokenRequestData(Readable _readable, short _version) { read(_readable, _version); } public ExpireDelegationTokenRequestData() { this.hmac = Bytes.EMPTY; this.expiryTimePeriodMs = 0L; } @Override public short apiKey() { return 40; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readInt(); } if (length < 0) { throw new RuntimeException("non-nullable field hmac was serialized as null"); } else { byte[] newBytes = _readable.readArray(length); this.hmac = newBytes; } } this.expiryTimePeriodMs = _readable.readLong(); this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _writable.writeUnsignedVarint(hmac.length + 1); } else { _writable.writeInt(hmac.length); } _writable.writeByteArray(hmac); _writable.writeLong(expiryTimePeriodMs); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { _size.addBytes(hmac.length); if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(hmac.length + 1)); } else { _size.addBytes(4); } } _size.addBytes(8); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ExpireDelegationTokenRequestData)) return false; ExpireDelegationTokenRequestData other = (ExpireDelegationTokenRequestData) obj; if (!Arrays.equals(this.hmac, other.hmac)) return false; if (expiryTimePeriodMs != other.expiryTimePeriodMs) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + Arrays.hashCode(hmac); hashCode = 31 * hashCode + ((int) (expiryTimePeriodMs >> 32) ^ (int) expiryTimePeriodMs); return hashCode; } @Override public ExpireDelegationTokenRequestData duplicate() { ExpireDelegationTokenRequestData _duplicate = new ExpireDelegationTokenRequestData(); _duplicate.hmac = MessageUtil.duplicate(hmac); _duplicate.expiryTimePeriodMs = expiryTimePeriodMs; return _duplicate; } @Override public String toString() { return "ExpireDelegationTokenRequestData(" + "hmac=" + Arrays.toString(hmac) + ", expiryTimePeriodMs=" + expiryTimePeriodMs + ")"; } public byte[] hmac() { return this.hmac; } public long expiryTimePeriodMs() { return this.expiryTimePeriodMs; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ExpireDelegationTokenRequestData setHmac(byte[] v) { this.hmac = v; return this; } public ExpireDelegationTokenRequestData setExpiryTimePeriodMs(long v) { this.expiryTimePeriodMs = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ExpireDelegationTokenRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.util.Arrays; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ExpireDelegationTokenRequestData.*; public class ExpireDelegationTokenRequestDataJsonConverter { public static ExpireDelegationTokenRequestData read(JsonNode _node, short _version) { ExpireDelegationTokenRequestData _object = new ExpireDelegationTokenRequestData(); JsonNode _hmacNode = _node.get("hmac"); if (_hmacNode == null) { throw new RuntimeException("ExpireDelegationTokenRequestData: unable to locate field 'hmac', which is mandatory in version " + _version); } else { _object.hmac = MessageUtil.jsonNodeToBinary(_hmacNode, "ExpireDelegationTokenRequestData"); } JsonNode _expiryTimePeriodMsNode = _node.get("expiryTimePeriodMs"); if (_expiryTimePeriodMsNode == null) { throw new RuntimeException("ExpireDelegationTokenRequestData: unable to locate field 'expiryTimePeriodMs', which is mandatory in version " + _version); } else { _object.expiryTimePeriodMs = MessageUtil.jsonNodeToLong(_expiryTimePeriodMsNode, "ExpireDelegationTokenRequestData"); } return _object; } public static JsonNode write(ExpireDelegationTokenRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("hmac", new BinaryNode(Arrays.copyOf(_object.hmac, _object.hmac.length))); _node.set("expiryTimePeriodMs", new LongNode(_object.expiryTimePeriodMs)); return _node; } public static JsonNode write(ExpireDelegationTokenRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ExpireDelegationTokenResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ExpireDelegationTokenResponseData implements ApiMessage { short errorCode; long expiryTimestampMs; int throttleTimeMs; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("expiry_timestamp_ms", Type.INT64, "The timestamp in milliseconds at which this token expires."), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("expiry_timestamp_ms", Type.INT64, "The timestamp in milliseconds at which this token expires."), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public ExpireDelegationTokenResponseData(Readable _readable, short _version) { read(_readable, _version); } public ExpireDelegationTokenResponseData() { this.errorCode = (short) 0; this.expiryTimestampMs = 0L; this.throttleTimeMs = 0; } @Override public short apiKey() { return 40; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { this.errorCode = _readable.readShort(); this.expiryTimestampMs = _readable.readLong(); this.throttleTimeMs = _readable.readInt(); this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); _writable.writeLong(expiryTimestampMs); _writable.writeInt(throttleTimeMs); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(2); _size.addBytes(8); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ExpireDelegationTokenResponseData)) return false; ExpireDelegationTokenResponseData other = (ExpireDelegationTokenResponseData) obj; if (errorCode != other.errorCode) return false; if (expiryTimestampMs != other.expiryTimestampMs) return false; if (throttleTimeMs != other.throttleTimeMs) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + ((int) (expiryTimestampMs >> 32) ^ (int) expiryTimestampMs); hashCode = 31 * hashCode + throttleTimeMs; return hashCode; } @Override public ExpireDelegationTokenResponseData duplicate() { ExpireDelegationTokenResponseData _duplicate = new ExpireDelegationTokenResponseData(); _duplicate.errorCode = errorCode; _duplicate.expiryTimestampMs = expiryTimestampMs; _duplicate.throttleTimeMs = throttleTimeMs; return _duplicate; } @Override public String toString() { return "ExpireDelegationTokenResponseData(" + "errorCode=" + errorCode + ", expiryTimestampMs=" + expiryTimestampMs + ", throttleTimeMs=" + throttleTimeMs + ")"; } public short errorCode() { return this.errorCode; } public long expiryTimestampMs() { return this.expiryTimestampMs; } public int throttleTimeMs() { return this.throttleTimeMs; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ExpireDelegationTokenResponseData setErrorCode(short v) { this.errorCode = v; return this; } public ExpireDelegationTokenResponseData setExpiryTimestampMs(long v) { this.expiryTimestampMs = v; return this; } public ExpireDelegationTokenResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ExpireDelegationTokenResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ExpireDelegationTokenResponseData.*; public class ExpireDelegationTokenResponseDataJsonConverter { public static ExpireDelegationTokenResponseData read(JsonNode _node, short _version) { ExpireDelegationTokenResponseData _object = new ExpireDelegationTokenResponseData(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("ExpireDelegationTokenResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "ExpireDelegationTokenResponseData"); } JsonNode _expiryTimestampMsNode = _node.get("expiryTimestampMs"); if (_expiryTimestampMsNode == null) { throw new RuntimeException("ExpireDelegationTokenResponseData: unable to locate field 'expiryTimestampMs', which is mandatory in version " + _version); } else { _object.expiryTimestampMs = MessageUtil.jsonNodeToLong(_expiryTimestampMsNode, "ExpireDelegationTokenResponseData"); } JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("ExpireDelegationTokenResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "ExpireDelegationTokenResponseData"); } return _object; } public static JsonNode write(ExpireDelegationTokenResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("expiryTimestampMs", new LongNode(_object.expiryTimestampMs)); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); return _node; } public static JsonNode write(ExpireDelegationTokenResponseData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FetchRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class FetchRequestData implements ApiMessage { String clusterId; int replicaId; ReplicaState replicaState; int maxWaitMs; int minBytes; int maxBytes; byte isolationLevel; int sessionId; int sessionEpoch; List<FetchTopic> topics; List<ForgottenTopic> forgottenTopicsData; String rackId; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("topics", new ArrayOf(FetchTopic.SCHEMA_0), "The topics to fetch.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored."), new Field("topics", new ArrayOf(FetchTopic.SCHEMA_0), "The topics to fetch.") ); public static final Schema SCHEMA_4 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("topics", new ArrayOf(FetchTopic.SCHEMA_0), "The topics to fetch.") ); public static final Schema SCHEMA_5 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("topics", new ArrayOf(FetchTopic.SCHEMA_5), "The topics to fetch.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("session_id", Type.INT32, "The fetch session ID."), new Field("session_epoch", Type.INT32, "The fetch session epoch, which is used for ordering requests in a session."), new Field("topics", new ArrayOf(FetchTopic.SCHEMA_5), "The topics to fetch."), new Field("forgotten_topics_data", new ArrayOf(ForgottenTopic.SCHEMA_7), "In an incremental fetch request, the partitions to remove.") ); public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("session_id", Type.INT32, "The fetch session ID."), new Field("session_epoch", Type.INT32, "The fetch session epoch, which is used for ordering requests in a session."), new Field("topics", new ArrayOf(FetchTopic.SCHEMA_9), "The topics to fetch."), new Field("forgotten_topics_data", new ArrayOf(ForgottenTopic.SCHEMA_7), "In an incremental fetch request, the partitions to remove.") ); public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("session_id", Type.INT32, "The fetch session ID."), new Field("session_epoch", Type.INT32, "The fetch session epoch, which is used for ordering requests in a session."), new Field("topics", new ArrayOf(FetchTopic.SCHEMA_9), "The topics to fetch."), new Field("forgotten_topics_data", new ArrayOf(ForgottenTopic.SCHEMA_7), "In an incremental fetch request, the partitions to remove."), new Field("rack_id", Type.STRING, "Rack ID of the consumer making this request") ); public static final Schema SCHEMA_12 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("session_id", Type.INT32, "The fetch session ID."), new Field("session_epoch", Type.INT32, "The fetch session epoch, which is used for ordering requests in a session."), new Field("topics", new CompactArrayOf(FetchTopic.SCHEMA_12), "The topics to fetch."), new Field("forgotten_topics_data", new CompactArrayOf(ForgottenTopic.SCHEMA_12), "In an incremental fetch request, the partitions to remove."), new Field("rack_id", Type.COMPACT_STRING, "Rack ID of the consumer making this request"), TaggedFieldsSection.of( 0, new Field("cluster_id", Type.COMPACT_NULLABLE_STRING, "The clusterId if known. This is used to validate metadata fetches prior to broker registration.") ) ); public static final Schema SCHEMA_13 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("session_id", Type.INT32, "The fetch session ID."), new Field("session_epoch", Type.INT32, "The fetch session epoch, which is used for ordering requests in a session."), new Field("topics", new CompactArrayOf(FetchTopic.SCHEMA_13), "The topics to fetch."), new Field("forgotten_topics_data", new CompactArrayOf(ForgottenTopic.SCHEMA_13), "In an incremental fetch request, the partitions to remove."), new Field("rack_id", Type.COMPACT_STRING, "Rack ID of the consumer making this request"), TaggedFieldsSection.of( 0, new Field("cluster_id", Type.COMPACT_NULLABLE_STRING, "The clusterId if known. This is used to validate metadata fetches prior to broker registration.") ) ); public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = new Schema( new Field("max_wait_ms", Type.INT32, "The maximum time in milliseconds to wait for the response."), new Field("min_bytes", Type.INT32, "The minimum bytes to accumulate in the response."), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("session_id", Type.INT32, "The fetch session ID."), new Field("session_epoch", Type.INT32, "The fetch session epoch, which is used for ordering requests in a session."), new Field("topics", new CompactArrayOf(FetchTopic.SCHEMA_13), "The topics to fetch."), new Field("forgotten_topics_data", new CompactArrayOf(ForgottenTopic.SCHEMA_13), "In an incremental fetch request, the partitions to remove."), new Field("rack_id", Type.COMPACT_STRING, "Rack ID of the consumer making this request"), TaggedFieldsSection.of( 0, new Field("cluster_id", Type.COMPACT_NULLABLE_STRING, "The clusterId if known. This is used to validate metadata fetches prior to broker registration."), 1, new Field("replica_state", ReplicaState.SCHEMA_15, "") ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 15; public FetchRequestData(Readable _readable, short _version) { read(_readable, _version); } public FetchRequestData() { this.clusterId = null; this.replicaId = -1; this.replicaState = new ReplicaState(); this.maxWaitMs = 0; this.minBytes = 0; this.maxBytes = 0x7fffffff; this.isolationLevel = (byte) 0; this.sessionId = 0; this.sessionEpoch = -1; this.topics = new ArrayList<FetchTopic>(0); this.forgottenTopicsData = new ArrayList<ForgottenTopic>(0); this.rackId = ""; } @Override public short apiKey() { return 1; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { { this.clusterId = null; } if (_version <= 14) { this.replicaId = _readable.readInt(); } else { this.replicaId = -1; } { this.replicaState = new ReplicaState(); } this.maxWaitMs = _readable.readInt(); this.minBytes = _readable.readInt(); if (_version >= 3) { this.maxBytes = _readable.readInt(); } else { this.maxBytes = 0x7fffffff; } if (_version >= 4) { this.isolationLevel = _readable.readByte(); } else { this.isolationLevel = (byte) 0; } if (_version >= 7) { this.sessionId = _readable.readInt(); } else { this.sessionId = 0; } if (_version >= 7) { this.sessionEpoch = _readable.readInt(); } else { this.sessionEpoch = -1; } { if (_version >= 12) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<FetchTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new FetchTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<FetchTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new FetchTopic(_readable, _version)); } this.topics = newCollection; } } } if (_version >= 7) { if (_version >= 12) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field forgottenTopicsData was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ForgottenTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ForgottenTopic(_readable, _version)); } this.forgottenTopicsData = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field forgottenTopicsData was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ForgottenTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ForgottenTopic(_readable, _version)); } this.forgottenTopicsData = newCollection; } } } else { this.forgottenTopicsData = new ArrayList<ForgottenTopic>(0); } if (_version >= 11) { int length; if (_version >= 12) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field rackId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field rackId had invalid length " + length); } else { this.rackId = _readable.readString(length); } } else { this.rackId = ""; } this._unknownTaggedFields = null; if (_version >= 12) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { case 0: { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.clusterId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field clusterId had invalid length " + length); } else { this.clusterId = _readable.readString(length); } break; } case 1: { if (_version >= 15) { this.replicaState = new ReplicaState(_readable, _version); break; } else { throw new RuntimeException("Tag 1 is not valid for version " + _version); } } default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 12) { if (this.clusterId != null) { _numTaggedFields++; } } if (_version <= 14) { _writable.writeInt(replicaId); } else { if (this.replicaId != -1) { throw new UnsupportedVersionException("Attempted to write a non-default replicaId at version " + _version); } } if (_version >= 15) { if (!this.replicaState.equals(new ReplicaState())) { _numTaggedFields++; } } else { if (!this.replicaState.equals(new ReplicaState())) { throw new UnsupportedVersionException("Attempted to write a non-default replicaState at version " + _version); } } _writable.writeInt(maxWaitMs); _writable.writeInt(minBytes); if (_version >= 3) { _writable.writeInt(maxBytes); } if (_version >= 4) { _writable.writeByte(isolationLevel); } if (_version >= 7) { _writable.writeInt(sessionId); } if (_version >= 7) { _writable.writeInt(sessionEpoch); } if (_version >= 12) { _writable.writeUnsignedVarint(topics.size() + 1); for (FetchTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (FetchTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } if (_version >= 7) { if (_version >= 12) { _writable.writeUnsignedVarint(forgottenTopicsData.size() + 1); for (ForgottenTopic forgottenTopicsDataElement : forgottenTopicsData) { forgottenTopicsDataElement.write(_writable, _cache, _version); } } else { _writable.writeInt(forgottenTopicsData.size()); for (ForgottenTopic forgottenTopicsDataElement : forgottenTopicsData) { forgottenTopicsDataElement.write(_writable, _cache, _version); } } } else { if (!this.forgottenTopicsData.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default forgottenTopicsData at version " + _version); } } if (_version >= 11) { { byte[] _stringBytes = _cache.getSerializedValue(rackId); if (_version >= 12) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 12) { _writable.writeUnsignedVarint(_numTaggedFields); if (clusterId != null) { _writable.writeUnsignedVarint(0); byte[] _stringBytes = _cache.getSerializedValue(this.clusterId); _writable.writeUnsignedVarint(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } if (_version >= 15) { { if (!this.replicaState.equals(new ReplicaState())) { _writable.writeUnsignedVarint(1); _writable.writeUnsignedVarint(this.replicaState.size(_cache, _version)); replicaState.write(_writable, _cache, _version); } } } _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 12) { if (clusterId == null) { } else { _numTaggedFields++; _size.addBytes(1); byte[] _stringBytes = clusterId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'clusterId' field is too long to be serialized"); } _cache.cacheSerializedValue(clusterId, _stringBytes); int _stringPrefixSize = ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1); _size.addBytes(_stringBytes.length + _stringPrefixSize + ByteUtils.sizeOfUnsignedVarint(_stringPrefixSize + _stringBytes.length)); } } if (_version <= 14) { _size.addBytes(4); } if (_version >= 15) { { if (!this.replicaState.equals(new ReplicaState())) { _numTaggedFields++; _size.addBytes(1); int _sizeBeforeStruct = _size.totalSize(); this.replicaState.addSize(_size, _cache, _version); int _structSize = _size.totalSize() - _sizeBeforeStruct; _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_structSize)); } } } _size.addBytes(4); _size.addBytes(4); if (_version >= 3) { _size.addBytes(4); } if (_version >= 4) { _size.addBytes(1); } if (_version >= 7) { _size.addBytes(4); } if (_version >= 7) { _size.addBytes(4); } { if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (FetchTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_version >= 7) { { if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(forgottenTopicsData.size() + 1)); } else { _size.addBytes(4); } for (ForgottenTopic forgottenTopicsDataElement : forgottenTopicsData) { forgottenTopicsDataElement.addSize(_size, _cache, _version); } } } if (_version >= 11) { { byte[] _stringBytes = rackId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'rackId' field is too long to be serialized"); } _cache.cacheSerializedValue(rackId, _stringBytes); if (_version >= 12) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof FetchRequestData)) return false; FetchRequestData other = (FetchRequestData) obj; if (this.clusterId == null) { if (other.clusterId != null) return false; } else { if (!this.clusterId.equals(other.clusterId)) return false; } if (replicaId != other.replicaId) return false; if (this.replicaState == null) { if (other.replicaState != null) return false; } else { if (!this.replicaState.equals(other.replicaState)) return false; } if (maxWaitMs != other.maxWaitMs) return false; if (minBytes != other.minBytes) return false; if (maxBytes != other.maxBytes) return false; if (isolationLevel != other.isolationLevel) return false; if (sessionId != other.sessionId) return false; if (sessionEpoch != other.sessionEpoch) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } if (this.forgottenTopicsData == null) { if (other.forgottenTopicsData != null) return false; } else { if (!this.forgottenTopicsData.equals(other.forgottenTopicsData)) return false; } if (this.rackId == null) { if (other.rackId != null) return false; } else { if (!this.rackId.equals(other.rackId)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (clusterId == null ? 0 : clusterId.hashCode()); hashCode = 31 * hashCode + replicaId; hashCode = 31 * hashCode + (replicaState == null ? 0 : replicaState.hashCode()); hashCode = 31 * hashCode + maxWaitMs; hashCode = 31 * hashCode + minBytes; hashCode = 31 * hashCode + maxBytes; hashCode = 31 * hashCode + isolationLevel; hashCode = 31 * hashCode + sessionId; hashCode = 31 * hashCode + sessionEpoch; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); hashCode = 31 * hashCode + (forgottenTopicsData == null ? 0 : forgottenTopicsData.hashCode()); hashCode = 31 * hashCode + (rackId == null ? 0 : rackId.hashCode()); return hashCode; } @Override public FetchRequestData duplicate() { FetchRequestData _duplicate = new FetchRequestData(); if (clusterId == null) { _duplicate.clusterId = null; } else { _duplicate.clusterId = clusterId; } _duplicate.replicaId = replicaId; _duplicate.replicaState = replicaState.duplicate(); _duplicate.maxWaitMs = maxWaitMs; _duplicate.minBytes = minBytes; _duplicate.maxBytes = maxBytes; _duplicate.isolationLevel = isolationLevel; _duplicate.sessionId = sessionId; _duplicate.sessionEpoch = sessionEpoch; ArrayList<FetchTopic> newTopics = new ArrayList<FetchTopic>(topics.size()); for (FetchTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; ArrayList<ForgottenTopic> newForgottenTopicsData = new ArrayList<ForgottenTopic>(forgottenTopicsData.size()); for (ForgottenTopic _element : forgottenTopicsData) { newForgottenTopicsData.add(_element.duplicate()); } _duplicate.forgottenTopicsData = newForgottenTopicsData; _duplicate.rackId = rackId; return _duplicate; } @Override public String toString() { return "FetchRequestData(" + "clusterId=" + ((clusterId == null) ? "null" : "'" + clusterId.toString() + "'") + ", replicaId=" + replicaId + ", replicaState=" + replicaState.toString() + ", maxWaitMs=" + maxWaitMs + ", minBytes=" + minBytes + ", maxBytes=" + maxBytes + ", isolationLevel=" + isolationLevel + ", sessionId=" + sessionId + ", sessionEpoch=" + sessionEpoch + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ", forgottenTopicsData=" + MessageUtil.deepToString(forgottenTopicsData.iterator()) + ", rackId=" + ((rackId == null) ? "null" : "'" + rackId.toString() + "'") + ")"; } public String clusterId() { return this.clusterId; } public int replicaId() { return this.replicaId; } public ReplicaState replicaState() { return this.replicaState; } public int maxWaitMs() { return this.maxWaitMs; } public int minBytes() { return this.minBytes; } public int maxBytes() { return this.maxBytes; } public byte isolationLevel() { return this.isolationLevel; } public int sessionId() { return this.sessionId; } public int sessionEpoch() { return this.sessionEpoch; } public List<FetchTopic> topics() { return this.topics; } public List<ForgottenTopic> forgottenTopicsData() { return this.forgottenTopicsData; } public String rackId() { return this.rackId; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FetchRequestData setClusterId(String v) { this.clusterId = v; return this; } public FetchRequestData setReplicaId(int v) { this.replicaId = v; return this; } public FetchRequestData setReplicaState(ReplicaState v) { this.replicaState = v; return this; } public FetchRequestData setMaxWaitMs(int v) { this.maxWaitMs = v; return this; } public FetchRequestData setMinBytes(int v) { this.minBytes = v; return this; } public FetchRequestData setMaxBytes(int v) { this.maxBytes = v; return this; } public FetchRequestData setIsolationLevel(byte v) { this.isolationLevel = v; return this; } public FetchRequestData setSessionId(int v) { this.sessionId = v; return this; } public FetchRequestData setSessionEpoch(int v) { this.sessionEpoch = v; return this; } public FetchRequestData setTopics(List<FetchTopic> v) { this.topics = v; return this; } public FetchRequestData setForgottenTopicsData(List<ForgottenTopic> v) { this.forgottenTopicsData = v; return this; } public FetchRequestData setRackId(String v) { this.rackId = v; return this; } public static class ReplicaState implements Message { int replicaId; long replicaEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_15 = new Schema( new Field("replica_id", Type.INT32, "The replica ID of the follower, or -1 if this request is from a consumer."), new Field("replica_epoch", Type.INT64, "The epoch of this follower, or -1 if not available."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 15; public static final short HIGHEST_SUPPORTED_VERSION = 15; public ReplicaState(Readable _readable, short _version) { read(_readable, _version); } public ReplicaState() { this.replicaId = -1; this.replicaEpoch = -1L; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of ReplicaState"); } this.replicaId = _readable.readInt(); this.replicaEpoch = _readable.readLong(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 15) { throw new UnsupportedVersionException("Can't write version " + _version + " of ReplicaState"); } int _numTaggedFields = 0; _writable.writeInt(replicaId); _writable.writeLong(replicaEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of ReplicaState"); } _size.addBytes(4); _size.addBytes(8); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ReplicaState)) return false; ReplicaState other = (ReplicaState) obj; if (replicaId != other.replicaId) return false; if (replicaEpoch != other.replicaEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + replicaId; hashCode = 31 * hashCode + ((int) (replicaEpoch >> 32) ^ (int) replicaEpoch); return hashCode; } @Override public ReplicaState duplicate() { ReplicaState _duplicate = new ReplicaState(); _duplicate.replicaId = replicaId; _duplicate.replicaEpoch = replicaEpoch; return _duplicate; } @Override public String toString() { return "ReplicaState(" + "replicaId=" + replicaId + ", replicaEpoch=" + replicaEpoch + ")"; } public int replicaId() { return this.replicaId; } public long replicaEpoch() { return this.replicaEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ReplicaState setReplicaId(int v) { this.replicaId = v; return this; } public ReplicaState setReplicaEpoch(long v) { this.replicaEpoch = v; return this; } } public static class FetchTopic implements Message { String topic; Uuid topicId; List<FetchPartition> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topic", Type.STRING, "The name of the topic to fetch."), new Field("partitions", new ArrayOf(FetchPartition.SCHEMA_0), "The partitions to fetch.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("topic", Type.STRING, "The name of the topic to fetch."), new Field("partitions", new ArrayOf(FetchPartition.SCHEMA_5), "The partitions to fetch.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("topic", Type.STRING, "The name of the topic to fetch."), new Field("partitions", new ArrayOf(FetchPartition.SCHEMA_9), "The partitions to fetch.") ); public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = SCHEMA_10; public static final Schema SCHEMA_12 = new Schema( new Field("topic", Type.COMPACT_STRING, "The name of the topic to fetch."), new Field("partitions", new CompactArrayOf(FetchPartition.SCHEMA_12), "The partitions to fetch."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_13 = new Schema( new Field("topic_id", Type.UUID, "The unique topic ID"), new Field("partitions", new CompactArrayOf(FetchPartition.SCHEMA_12), "The partitions to fetch."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 15; public FetchTopic(Readable _readable, short _version) { read(_readable, _version); } public FetchTopic() { this.topic = ""; this.topicId = Uuid.ZERO_UUID; this.partitions = new ArrayList<FetchPartition>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of FetchTopic"); } if (_version <= 12) { int length; if (_version >= 12) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field topic was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topic had invalid length " + length); } else { this.topic = _readable.readString(length); } } else { this.topic = ""; } if (_version >= 13) { this.topicId = _readable.readUuid(); } else { this.topicId = Uuid.ZERO_UUID; } { if (_version >= 12) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<FetchPartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new FetchPartition(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<FetchPartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new FetchPartition(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 12) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 12) { { byte[] _stringBytes = _cache.getSerializedValue(topic); if (_version >= 12) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } if (_version >= 13) { _writable.writeUuid(topicId); } if (_version >= 12) { _writable.writeUnsignedVarint(partitions.size() + 1); for (FetchPartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (FetchPartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 12) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of FetchTopic"); } if (_version <= 12) { { byte[] _stringBytes = topic.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topic' field is too long to be serialized"); } _cache.cacheSerializedValue(topic, _stringBytes); if (_version >= 12) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version >= 13) { _size.addBytes(16); } { if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (FetchPartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof FetchTopic)) return false; FetchTopic other = (FetchTopic) obj; if (this.topic == null) { if (other.topic != null) return false; } else { if (!this.topic.equals(other.topic)) return false; } if (!this.topicId.equals(other.topicId)) return false; if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topic == null ? 0 : topic.hashCode()); hashCode = 31 * hashCode + topicId.hashCode(); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public FetchTopic duplicate() { FetchTopic _duplicate = new FetchTopic(); _duplicate.topic = topic; _duplicate.topicId = topicId; ArrayList<FetchPartition> newPartitions = new ArrayList<FetchPartition>(partitions.size()); for (FetchPartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "FetchTopic(" + "topic=" + ((topic == null) ? "null" : "'" + topic.toString() + "'") + ", topicId=" + topicId.toString() + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topic() { return this.topic; } public Uuid topicId() { return this.topicId; } public List<FetchPartition> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FetchTopic setTopic(String v) { this.topic = v; return this; } public FetchTopic setTopicId(Uuid v) { this.topicId = v; return this; } public FetchTopic setPartitions(List<FetchPartition> v) { this.partitions = v; return this; } } public static class FetchPartition implements Message { int partition; int currentLeaderEpoch; long fetchOffset; int lastFetchedEpoch; long logStartOffset; int partitionMaxBytes; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition", Type.INT32, "The partition index."), new Field("fetch_offset", Type.INT64, "The message offset."), new Field("partition_max_bytes", Type.INT32, "The maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("partition", Type.INT32, "The partition index."), new Field("fetch_offset", Type.INT64, "The message offset."), new Field("log_start_offset", Type.INT64, "The earliest available offset of the follower replica. The field is only used when the request is sent by the follower."), new Field("partition_max_bytes", Type.INT32, "The maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("partition", Type.INT32, "The partition index."), new Field("current_leader_epoch", Type.INT32, "The current leader epoch of the partition."), new Field("fetch_offset", Type.INT64, "The message offset."), new Field("log_start_offset", Type.INT64, "The earliest available offset of the follower replica. The field is only used when the request is sent by the follower."), new Field("partition_max_bytes", Type.INT32, "The maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored.") ); public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = SCHEMA_10; public static final Schema SCHEMA_12 = new Schema( new Field("partition", Type.INT32, "The partition index."), new Field("current_leader_epoch", Type.INT32, "The current leader epoch of the partition."), new Field("fetch_offset", Type.INT64, "The message offset."), new Field("last_fetched_epoch", Type.INT32, "The epoch of the last fetched record or -1 if there is none"), new Field("log_start_offset", Type.INT64, "The earliest available offset of the follower replica. The field is only used when the request is sent by the follower."), new Field("partition_max_bytes", Type.INT32, "The maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_13 = SCHEMA_12; public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 15; public FetchPartition(Readable _readable, short _version) { read(_readable, _version); } public FetchPartition() { this.partition = 0; this.currentLeaderEpoch = -1; this.fetchOffset = 0L; this.lastFetchedEpoch = -1; this.logStartOffset = -1L; this.partitionMaxBytes = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of FetchPartition"); } this.partition = _readable.readInt(); if (_version >= 9) { this.currentLeaderEpoch = _readable.readInt(); } else { this.currentLeaderEpoch = -1; } this.fetchOffset = _readable.readLong(); if (_version >= 12) { this.lastFetchedEpoch = _readable.readInt(); } else { this.lastFetchedEpoch = -1; } if (_version >= 5) { this.logStartOffset = _readable.readLong(); } else { this.logStartOffset = -1L; } this.partitionMaxBytes = _readable.readInt(); this._unknownTaggedFields = null; if (_version >= 12) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partition); if (_version >= 9) { _writable.writeInt(currentLeaderEpoch); } _writable.writeLong(fetchOffset); if (_version >= 12) { _writable.writeInt(lastFetchedEpoch); } else { if (this.lastFetchedEpoch != -1) { throw new UnsupportedVersionException("Attempted to write a non-default lastFetchedEpoch at version " + _version); } } if (_version >= 5) { _writable.writeLong(logStartOffset); } _writable.writeInt(partitionMaxBytes); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 12) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of FetchPartition"); } _size.addBytes(4); if (_version >= 9) { _size.addBytes(4); } _size.addBytes(8); if (_version >= 12) { _size.addBytes(4); } if (_version >= 5) { _size.addBytes(8); } _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof FetchPartition)) return false; FetchPartition other = (FetchPartition) obj; if (partition != other.partition) return false; if (currentLeaderEpoch != other.currentLeaderEpoch) return false; if (fetchOffset != other.fetchOffset) return false; if (lastFetchedEpoch != other.lastFetchedEpoch) return false; if (logStartOffset != other.logStartOffset) return false; if (partitionMaxBytes != other.partitionMaxBytes) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partition; hashCode = 31 * hashCode + currentLeaderEpoch; hashCode = 31 * hashCode + ((int) (fetchOffset >> 32) ^ (int) fetchOffset); hashCode = 31 * hashCode + lastFetchedEpoch; hashCode = 31 * hashCode + ((int) (logStartOffset >> 32) ^ (int) logStartOffset); hashCode = 31 * hashCode + partitionMaxBytes; return hashCode; } @Override public FetchPartition duplicate() { FetchPartition _duplicate = new FetchPartition(); _duplicate.partition = partition; _duplicate.currentLeaderEpoch = currentLeaderEpoch; _duplicate.fetchOffset = fetchOffset; _duplicate.lastFetchedEpoch = lastFetchedEpoch; _duplicate.logStartOffset = logStartOffset; _duplicate.partitionMaxBytes = partitionMaxBytes; return _duplicate; } @Override public String toString() { return "FetchPartition(" + "partition=" + partition + ", currentLeaderEpoch=" + currentLeaderEpoch + ", fetchOffset=" + fetchOffset + ", lastFetchedEpoch=" + lastFetchedEpoch + ", logStartOffset=" + logStartOffset + ", partitionMaxBytes=" + partitionMaxBytes + ")"; } public int partition() { return this.partition; } public int currentLeaderEpoch() { return this.currentLeaderEpoch; } public long fetchOffset() { return this.fetchOffset; } public int lastFetchedEpoch() { return this.lastFetchedEpoch; } public long logStartOffset() { return this.logStartOffset; } public int partitionMaxBytes() { return this.partitionMaxBytes; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FetchPartition setPartition(int v) { this.partition = v; return this; } public FetchPartition setCurrentLeaderEpoch(int v) { this.currentLeaderEpoch = v; return this; } public FetchPartition setFetchOffset(long v) { this.fetchOffset = v; return this; } public FetchPartition setLastFetchedEpoch(int v) { this.lastFetchedEpoch = v; return this; } public FetchPartition setLogStartOffset(long v) { this.logStartOffset = v; return this; } public FetchPartition setPartitionMaxBytes(int v) { this.partitionMaxBytes = v; return this; } } public static class ForgottenTopic implements Message { String topic; Uuid topicId; List<Integer> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_7 = new Schema( new Field("topic", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(Type.INT32), "The partitions indexes to forget.") ); public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = SCHEMA_8; public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = SCHEMA_10; public static final Schema SCHEMA_12 = new Schema( new Field("topic", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(Type.INT32), "The partitions indexes to forget."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_13 = new Schema( new Field("topic_id", Type.UUID, "The unique topic ID"), new Field("partitions", new CompactArrayOf(Type.INT32), "The partitions indexes to forget."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 7; public static final short HIGHEST_SUPPORTED_VERSION = 15; public ForgottenTopic(Readable _readable, short _version) { read(_readable, _version); } public ForgottenTopic() { this.topic = ""; this.topicId = Uuid.ZERO_UUID; this.partitions = new ArrayList<Integer>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of ForgottenTopic"); } if (_version <= 12) { int length; if (_version >= 12) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field topic was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topic had invalid length " + length); } else { this.topic = _readable.readString(length); } } else { this.topic = ""; } if (_version >= 13) { this.topicId = _readable.readUuid(); } else { this.topicId = Uuid.ZERO_UUID; } { int arrayLength; if (_version >= 12) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.partitions = newCollection; } } this._unknownTaggedFields = null; if (_version >= 12) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 7) { throw new UnsupportedVersionException("Can't write version " + _version + " of ForgottenTopic"); } int _numTaggedFields = 0; if (_version <= 12) { { byte[] _stringBytes = _cache.getSerializedValue(topic); if (_version >= 12) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } if (_version >= 13) { _writable.writeUuid(topicId); } if (_version >= 12) { _writable.writeUnsignedVarint(partitions.size() + 1); } else { _writable.writeInt(partitions.size()); } for (Integer partitionsElement : partitions) { _writable.writeInt(partitionsElement); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 12) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of ForgottenTopic"); } if (_version <= 12) { { byte[] _stringBytes = topic.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topic' field is too long to be serialized"); } _cache.cacheSerializedValue(topic, _stringBytes); if (_version >= 12) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version >= 13) { _size.addBytes(16); } { if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(partitions.size() * 4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ForgottenTopic)) return false; ForgottenTopic other = (ForgottenTopic) obj; if (this.topic == null) { if (other.topic != null) return false; } else { if (!this.topic.equals(other.topic)) return false; } if (!this.topicId.equals(other.topicId)) return false; if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topic == null ? 0 : topic.hashCode()); hashCode = 31 * hashCode + topicId.hashCode(); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public ForgottenTopic duplicate() { ForgottenTopic _duplicate = new ForgottenTopic(); _duplicate.topic = topic; _duplicate.topicId = topicId; ArrayList<Integer> newPartitions = new ArrayList<Integer>(partitions.size()); for (Integer _element : partitions) { newPartitions.add(_element); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "ForgottenTopic(" + "topic=" + ((topic == null) ? "null" : "'" + topic.toString() + "'") + ", topicId=" + topicId.toString() + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topic() { return this.topic; } public Uuid topicId() { return this.topicId; } public List<Integer> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ForgottenTopic setTopic(String v) { this.topic = v; return this; } public ForgottenTopic setTopicId(Uuid v) { this.topicId = v; return this; } public ForgottenTopic setPartitions(List<Integer> v) { this.partitions = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FetchRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.FetchRequestData.*; public class FetchRequestDataJsonConverter { public static FetchRequestData read(JsonNode _node, short _version) { FetchRequestData _object = new FetchRequestData(); JsonNode _clusterIdNode = _node.get("clusterId"); if (_clusterIdNode == null) { _object.clusterId = null; } else { if (_clusterIdNode.isNull()) { _object.clusterId = null; } else { if (!_clusterIdNode.isTextual()) { throw new RuntimeException("FetchRequestData expected a string type, but got " + _node.getNodeType()); } _object.clusterId = _clusterIdNode.asText(); } } JsonNode _replicaIdNode = _node.get("replicaId"); if (_replicaIdNode == null) { if (_version <= 14) { throw new RuntimeException("FetchRequestData: unable to locate field 'replicaId', which is mandatory in version " + _version); } else { _object.replicaId = -1; } } else { _object.replicaId = MessageUtil.jsonNodeToInt(_replicaIdNode, "FetchRequestData"); } JsonNode _replicaStateNode = _node.get("replicaState"); if (_replicaStateNode == null) { _object.replicaState = new ReplicaState(); } else { _object.replicaState = ReplicaStateJsonConverter.read(_replicaStateNode, _version); } JsonNode _maxWaitMsNode = _node.get("maxWaitMs"); if (_maxWaitMsNode == null) { throw new RuntimeException("FetchRequestData: unable to locate field 'maxWaitMs', which is mandatory in version " + _version); } else { _object.maxWaitMs = MessageUtil.jsonNodeToInt(_maxWaitMsNode, "FetchRequestData"); } JsonNode _minBytesNode = _node.get("minBytes"); if (_minBytesNode == null) { throw new RuntimeException("FetchRequestData: unable to locate field 'minBytes', which is mandatory in version " + _version); } else { _object.minBytes = MessageUtil.jsonNodeToInt(_minBytesNode, "FetchRequestData"); } JsonNode _maxBytesNode = _node.get("maxBytes"); if (_maxBytesNode == null) { if (_version >= 3) { throw new RuntimeException("FetchRequestData: unable to locate field 'maxBytes', which is mandatory in version " + _version); } else { _object.maxBytes = 0x7fffffff; } } else { _object.maxBytes = MessageUtil.jsonNodeToInt(_maxBytesNode, "FetchRequestData"); } JsonNode _isolationLevelNode = _node.get("isolationLevel"); if (_isolationLevelNode == null) { if (_version >= 4) { throw new RuntimeException("FetchRequestData: unable to locate field 'isolationLevel', which is mandatory in version " + _version); } else { _object.isolationLevel = (byte) 0; } } else { _object.isolationLevel = MessageUtil.jsonNodeToByte(_isolationLevelNode, "FetchRequestData"); } JsonNode _sessionIdNode = _node.get("sessionId"); if (_sessionIdNode == null) { if (_version >= 7) { throw new RuntimeException("FetchRequestData: unable to locate field 'sessionId', which is mandatory in version " + _version); } else { _object.sessionId = 0; } } else { _object.sessionId = MessageUtil.jsonNodeToInt(_sessionIdNode, "FetchRequestData"); } JsonNode _sessionEpochNode = _node.get("sessionEpoch"); if (_sessionEpochNode == null) { if (_version >= 7) { throw new RuntimeException("FetchRequestData: unable to locate field 'sessionEpoch', which is mandatory in version " + _version); } else { _object.sessionEpoch = -1; } } else { _object.sessionEpoch = MessageUtil.jsonNodeToInt(_sessionEpochNode, "FetchRequestData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("FetchRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("FetchRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<FetchTopic> _collection = new ArrayList<FetchTopic>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(FetchTopicJsonConverter.read(_element, _version)); } } JsonNode _forgottenTopicsDataNode = _node.get("forgottenTopicsData"); if (_forgottenTopicsDataNode == null) { if (_version >= 7) { throw new RuntimeException("FetchRequestData: unable to locate field 'forgottenTopicsData', which is mandatory in version " + _version); } else { _object.forgottenTopicsData = new ArrayList<ForgottenTopic>(0); } } else { if (!_forgottenTopicsDataNode.isArray()) { throw new RuntimeException("FetchRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ForgottenTopic> _collection = new ArrayList<ForgottenTopic>(_forgottenTopicsDataNode.size()); _object.forgottenTopicsData = _collection; for (JsonNode _element : _forgottenTopicsDataNode) { _collection.add(ForgottenTopicJsonConverter.read(_element, _version)); } } JsonNode _rackIdNode = _node.get("rackId"); if (_rackIdNode == null) { if (_version >= 11) { throw new RuntimeException("FetchRequestData: unable to locate field 'rackId', which is mandatory in version " + _version); } else { _object.rackId = ""; } } else { if (!_rackIdNode.isTextual()) { throw new RuntimeException("FetchRequestData expected a string type, but got " + _node.getNodeType()); } _object.rackId = _rackIdNode.asText(); } return _object; } public static JsonNode write(FetchRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 12) { if (_object.clusterId != null) { _node.set("clusterId", new TextNode(_object.clusterId)); } } if (_version <= 14) { _node.set("replicaId", new IntNode(_object.replicaId)); } else { if (_object.replicaId != -1) { throw new UnsupportedVersionException("Attempted to write a non-default replicaId at version " + _version); } } if (_version >= 15) { if (!_object.replicaState.equals(new ReplicaState())) { _node.set("replicaState", ReplicaStateJsonConverter.write(_object.replicaState, _version, _serializeRecords)); } } else { if (!_object.replicaState.equals(new ReplicaState())) { throw new UnsupportedVersionException("Attempted to write a non-default replicaState at version " + _version); } } _node.set("maxWaitMs", new IntNode(_object.maxWaitMs)); _node.set("minBytes", new IntNode(_object.minBytes)); if (_version >= 3) { _node.set("maxBytes", new IntNode(_object.maxBytes)); } if (_version >= 4) { _node.set("isolationLevel", new ShortNode(_object.isolationLevel)); } if (_version >= 7) { _node.set("sessionId", new IntNode(_object.sessionId)); } if (_version >= 7) { _node.set("sessionEpoch", new IntNode(_object.sessionEpoch)); } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (FetchTopic _element : _object.topics) { _topicsArray.add(FetchTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); if (_version >= 7) { ArrayNode _forgottenTopicsDataArray = new ArrayNode(JsonNodeFactory.instance); for (ForgottenTopic _element : _object.forgottenTopicsData) { _forgottenTopicsDataArray.add(ForgottenTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("forgottenTopicsData", _forgottenTopicsDataArray); } else { if (!_object.forgottenTopicsData.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default forgottenTopicsData at version " + _version); } } if (_version >= 11) { _node.set("rackId", new TextNode(_object.rackId)); } return _node; } public static JsonNode write(FetchRequestData _object, short _version) { return write(_object, _version, true); } public static class FetchPartitionJsonConverter { public static FetchPartition read(JsonNode _node, short _version) { FetchPartition _object = new FetchPartition(); JsonNode _partitionNode = _node.get("partition"); if (_partitionNode == null) { throw new RuntimeException("FetchPartition: unable to locate field 'partition', which is mandatory in version " + _version); } else { _object.partition = MessageUtil.jsonNodeToInt(_partitionNode, "FetchPartition"); } JsonNode _currentLeaderEpochNode = _node.get("currentLeaderEpoch"); if (_currentLeaderEpochNode == null) { if (_version >= 9) { throw new RuntimeException("FetchPartition: unable to locate field 'currentLeaderEpoch', which is mandatory in version " + _version); } else { _object.currentLeaderEpoch = -1; } } else { _object.currentLeaderEpoch = MessageUtil.jsonNodeToInt(_currentLeaderEpochNode, "FetchPartition"); } JsonNode _fetchOffsetNode = _node.get("fetchOffset"); if (_fetchOffsetNode == null) { throw new RuntimeException("FetchPartition: unable to locate field 'fetchOffset', which is mandatory in version " + _version); } else { _object.fetchOffset = MessageUtil.jsonNodeToLong(_fetchOffsetNode, "FetchPartition"); } JsonNode _lastFetchedEpochNode = _node.get("lastFetchedEpoch"); if (_lastFetchedEpochNode == null) { if (_version >= 12) { throw new RuntimeException("FetchPartition: unable to locate field 'lastFetchedEpoch', which is mandatory in version " + _version); } else { _object.lastFetchedEpoch = -1; } } else { _object.lastFetchedEpoch = MessageUtil.jsonNodeToInt(_lastFetchedEpochNode, "FetchPartition"); } JsonNode _logStartOffsetNode = _node.get("logStartOffset"); if (_logStartOffsetNode == null) { if (_version >= 5) { throw new RuntimeException("FetchPartition: unable to locate field 'logStartOffset', which is mandatory in version " + _version); } else { _object.logStartOffset = -1L; } } else { _object.logStartOffset = MessageUtil.jsonNodeToLong(_logStartOffsetNode, "FetchPartition"); } JsonNode _partitionMaxBytesNode = _node.get("partitionMaxBytes"); if (_partitionMaxBytesNode == null) { throw new RuntimeException("FetchPartition: unable to locate field 'partitionMaxBytes', which is mandatory in version " + _version); } else { _object.partitionMaxBytes = MessageUtil.jsonNodeToInt(_partitionMaxBytesNode, "FetchPartition"); } return _object; } public static JsonNode write(FetchPartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partition", new IntNode(_object.partition)); if (_version >= 9) { _node.set("currentLeaderEpoch", new IntNode(_object.currentLeaderEpoch)); } _node.set("fetchOffset", new LongNode(_object.fetchOffset)); if (_version >= 12) { _node.set("lastFetchedEpoch", new IntNode(_object.lastFetchedEpoch)); } else { if (_object.lastFetchedEpoch != -1) { throw new UnsupportedVersionException("Attempted to write a non-default lastFetchedEpoch at version " + _version); } } if (_version >= 5) { _node.set("logStartOffset", new LongNode(_object.logStartOffset)); } _node.set("partitionMaxBytes", new IntNode(_object.partitionMaxBytes)); return _node; } public static JsonNode write(FetchPartition _object, short _version) { return write(_object, _version, true); } } public static class FetchTopicJsonConverter { public static FetchTopic read(JsonNode _node, short _version) { FetchTopic _object = new FetchTopic(); JsonNode _topicNode = _node.get("topic"); if (_topicNode == null) { if (_version <= 12) { throw new RuntimeException("FetchTopic: unable to locate field 'topic', which is mandatory in version " + _version); } else { _object.topic = ""; } } else { if (!_topicNode.isTextual()) { throw new RuntimeException("FetchTopic expected a string type, but got " + _node.getNodeType()); } _object.topic = _topicNode.asText(); } JsonNode _topicIdNode = _node.get("topicId"); if (_topicIdNode == null) { if (_version >= 13) { throw new RuntimeException("FetchTopic: unable to locate field 'topicId', which is mandatory in version " + _version); } else { _object.topicId = Uuid.ZERO_UUID; } } else { if (!_topicIdNode.isTextual()) { throw new RuntimeException("FetchTopic expected a JSON string type, but got " + _node.getNodeType()); } _object.topicId = Uuid.fromString(_topicIdNode.asText()); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("FetchTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("FetchTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<FetchPartition> _collection = new ArrayList<FetchPartition>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(FetchPartitionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(FetchTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version <= 12) { _node.set("topic", new TextNode(_object.topic)); } if (_version >= 13) { _node.set("topicId", new TextNode(_object.topicId.toString())); } ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (FetchPartition _element : _object.partitions) { _partitionsArray.add(FetchPartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(FetchTopic _object, short _version) { return write(_object, _version, true); } } public static class ForgottenTopicJsonConverter { public static ForgottenTopic read(JsonNode _node, short _version) { ForgottenTopic _object = new ForgottenTopic(); if (_version < 7) { throw new UnsupportedVersionException("Can't read version " + _version + " of ForgottenTopic"); } JsonNode _topicNode = _node.get("topic"); if (_topicNode == null) { if (_version <= 12) { throw new RuntimeException("ForgottenTopic: unable to locate field 'topic', which is mandatory in version " + _version); } else { _object.topic = ""; } } else { if (!_topicNode.isTextual()) { throw new RuntimeException("ForgottenTopic expected a string type, but got " + _node.getNodeType()); } _object.topic = _topicNode.asText(); } JsonNode _topicIdNode = _node.get("topicId"); if (_topicIdNode == null) { if (_version >= 13) { throw new RuntimeException("ForgottenTopic: unable to locate field 'topicId', which is mandatory in version " + _version); } else { _object.topicId = Uuid.ZERO_UUID; } } else { if (!_topicIdNode.isTextual()) { throw new RuntimeException("ForgottenTopic expected a JSON string type, but got " + _node.getNodeType()); } _object.topicId = Uuid.fromString(_topicIdNode.asText()); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("ForgottenTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("ForgottenTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "ForgottenTopic element")); } } return _object; } public static JsonNode write(ForgottenTopic _object, short _version, boolean _serializeRecords) { if (_version < 7) { throw new UnsupportedVersionException("Can't write version " + _version + " of ForgottenTopic"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version <= 12) { _node.set("topic", new TextNode(_object.topic)); } if (_version >= 13) { _node.set("topicId", new TextNode(_object.topicId.toString())); } ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.partitions) { _partitionsArray.add(new IntNode(_element)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(ForgottenTopic _object, short _version) { return write(_object, _version, true); } } public static class ReplicaStateJsonConverter { public static ReplicaState read(JsonNode _node, short _version) { ReplicaState _object = new ReplicaState(); if (_version < 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of ReplicaState"); } JsonNode _replicaIdNode = _node.get("replicaId"); if (_replicaIdNode == null) { throw new RuntimeException("ReplicaState: unable to locate field 'replicaId', which is mandatory in version " + _version); } else { _object.replicaId = MessageUtil.jsonNodeToInt(_replicaIdNode, "ReplicaState"); } JsonNode _replicaEpochNode = _node.get("replicaEpoch"); if (_replicaEpochNode == null) { throw new RuntimeException("ReplicaState: unable to locate field 'replicaEpoch', which is mandatory in version " + _version); } else { _object.replicaEpoch = MessageUtil.jsonNodeToLong(_replicaEpochNode, "ReplicaState"); } return _object; } public static JsonNode write(ReplicaState _object, short _version, boolean _serializeRecords) { if (_version < 15) { throw new UnsupportedVersionException("Can't write version " + _version + " of ReplicaState"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("replicaId", new IntNode(_object.replicaId)); _node.set("replicaEpoch", new LongNode(_object.replicaEpoch)); return _node; } public static JsonNode write(ReplicaState _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FetchResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Objects; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.record.BaseRecords; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class FetchResponseData implements ApiMessage { int throttleTimeMs; short errorCode; int sessionId; List<FetchableTopicResponse> responses; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("responses", new ArrayOf(FetchableTopicResponse.SCHEMA_0), "The response topics.") ); public static final Schema SCHEMA_1 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("responses", new ArrayOf(FetchableTopicResponse.SCHEMA_0), "The response topics.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("responses", new ArrayOf(FetchableTopicResponse.SCHEMA_4), "The response topics.") ); public static final Schema SCHEMA_5 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("responses", new ArrayOf(FetchableTopicResponse.SCHEMA_5), "The response topics.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top level response error code."), new Field("session_id", Type.INT32, "The fetch session ID, or 0 if this is not part of a fetch session."), new Field("responses", new ArrayOf(FetchableTopicResponse.SCHEMA_5), "The response topics.") ); public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = SCHEMA_8; public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top level response error code."), new Field("session_id", Type.INT32, "The fetch session ID, or 0 if this is not part of a fetch session."), new Field("responses", new ArrayOf(FetchableTopicResponse.SCHEMA_11), "The response topics.") ); public static final Schema SCHEMA_12 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top level response error code."), new Field("session_id", Type.INT32, "The fetch session ID, or 0 if this is not part of a fetch session."), new Field("responses", new CompactArrayOf(FetchableTopicResponse.SCHEMA_12), "The response topics."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_13 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top level response error code."), new Field("session_id", Type.INT32, "The fetch session ID, or 0 if this is not part of a fetch session."), new Field("responses", new CompactArrayOf(FetchableTopicResponse.SCHEMA_13), "The response topics."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 15; public FetchResponseData(Readable _readable, short _version) { read(_readable, _version); } public FetchResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.sessionId = 0; this.responses = new ArrayList<FetchableTopicResponse>(0); } @Override public short apiKey() { return 1; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version >= 1) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } if (_version >= 7) { this.errorCode = _readable.readShort(); } else { this.errorCode = (short) 0; } if (_version >= 7) { this.sessionId = _readable.readInt(); } else { this.sessionId = 0; } { if (_version >= 12) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field responses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<FetchableTopicResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new FetchableTopicResponse(_readable, _version)); } this.responses = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field responses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<FetchableTopicResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new FetchableTopicResponse(_readable, _version)); } this.responses = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 12) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _writable.writeInt(throttleTimeMs); } if (_version >= 7) { _writable.writeShort(errorCode); } if (_version >= 7) { _writable.writeInt(sessionId); } else { if (this.sessionId != 0) { throw new UnsupportedVersionException("Attempted to write a non-default sessionId at version " + _version); } } if (_version >= 12) { _writable.writeUnsignedVarint(responses.size() + 1); for (FetchableTopicResponse responsesElement : responses) { responsesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(responses.size()); for (FetchableTopicResponse responsesElement : responses) { responsesElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 12) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _size.addBytes(4); } if (_version >= 7) { _size.addBytes(2); } if (_version >= 7) { _size.addBytes(4); } { if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(responses.size() + 1)); } else { _size.addBytes(4); } for (FetchableTopicResponse responsesElement : responses) { responsesElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof FetchResponseData)) return false; FetchResponseData other = (FetchResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (sessionId != other.sessionId) return false; if (this.responses == null) { if (other.responses != null) return false; } else { if (!this.responses.equals(other.responses)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + sessionId; hashCode = 31 * hashCode + (responses == null ? 0 : responses.hashCode()); return hashCode; } @Override public FetchResponseData duplicate() { FetchResponseData _duplicate = new FetchResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; _duplicate.sessionId = sessionId; ArrayList<FetchableTopicResponse> newResponses = new ArrayList<FetchableTopicResponse>(responses.size()); for (FetchableTopicResponse _element : responses) { newResponses.add(_element.duplicate()); } _duplicate.responses = newResponses; return _duplicate; } @Override public String toString() { return "FetchResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", sessionId=" + sessionId + ", responses=" + MessageUtil.deepToString(responses.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public int sessionId() { return this.sessionId; } public List<FetchableTopicResponse> responses() { return this.responses; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FetchResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public FetchResponseData setErrorCode(short v) { this.errorCode = v; return this; } public FetchResponseData setSessionId(int v) { this.sessionId = v; return this; } public FetchResponseData setResponses(List<FetchableTopicResponse> v) { this.responses = v; return this; } public static class FetchableTopicResponse implements Message { String topic; Uuid topicId; List<PartitionData> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topic", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(PartitionData.SCHEMA_0), "The topic partitions.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("topic", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(PartitionData.SCHEMA_4), "The topic partitions.") ); public static final Schema SCHEMA_5 = new Schema( new Field("topic", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(PartitionData.SCHEMA_5), "The topic partitions.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = SCHEMA_8; public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = new Schema( new Field("topic", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(PartitionData.SCHEMA_11), "The topic partitions.") ); public static final Schema SCHEMA_12 = new Schema( new Field("topic", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_12), "The topic partitions."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_13 = new Schema( new Field("topic_id", Type.UUID, "The unique topic ID"), new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_12), "The topic partitions."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 15; public FetchableTopicResponse(Readable _readable, short _version) { read(_readable, _version); } public FetchableTopicResponse() { this.topic = ""; this.topicId = Uuid.ZERO_UUID; this.partitions = new ArrayList<PartitionData>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of FetchableTopicResponse"); } if (_version <= 12) { int length; if (_version >= 12) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field topic was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topic had invalid length " + length); } else { this.topic = _readable.readString(length); } } else { this.topic = ""; } if (_version >= 13) { this.topicId = _readable.readUuid(); } else { this.topicId = Uuid.ZERO_UUID; } { if (_version >= 12) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionData(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionData(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 12) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 12) { { byte[] _stringBytes = _cache.getSerializedValue(topic); if (_version >= 12) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } if (_version >= 13) { _writable.writeUuid(topicId); } if (_version >= 12) { _writable.writeUnsignedVarint(partitions.size() + 1); for (PartitionData partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (PartitionData partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 12) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of FetchableTopicResponse"); } if (_version <= 12) { { byte[] _stringBytes = topic.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topic' field is too long to be serialized"); } _cache.cacheSerializedValue(topic, _stringBytes); if (_version >= 12) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version >= 13) { _size.addBytes(16); } { if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (PartitionData partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof FetchableTopicResponse)) return false; FetchableTopicResponse other = (FetchableTopicResponse) obj; if (this.topic == null) { if (other.topic != null) return false; } else { if (!this.topic.equals(other.topic)) return false; } if (!this.topicId.equals(other.topicId)) return false; if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topic == null ? 0 : topic.hashCode()); hashCode = 31 * hashCode + topicId.hashCode(); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public FetchableTopicResponse duplicate() { FetchableTopicResponse _duplicate = new FetchableTopicResponse(); _duplicate.topic = topic; _duplicate.topicId = topicId; ArrayList<PartitionData> newPartitions = new ArrayList<PartitionData>(partitions.size()); for (PartitionData _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "FetchableTopicResponse(" + "topic=" + ((topic == null) ? "null" : "'" + topic.toString() + "'") + ", topicId=" + topicId.toString() + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topic() { return this.topic; } public Uuid topicId() { return this.topicId; } public List<PartitionData> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FetchableTopicResponse setTopic(String v) { this.topic = v; return this; } public FetchableTopicResponse setTopicId(Uuid v) { this.topicId = v; return this; } public FetchableTopicResponse setPartitions(List<PartitionData> v) { this.partitions = v; return this; } } public static class PartitionData implements Message { int partitionIndex; short errorCode; long highWatermark; long lastStableOffset; long logStartOffset; EpochEndOffset divergingEpoch; LeaderIdAndEpoch currentLeader; SnapshotId snapshotId; List<AbortedTransaction> abortedTransactions; int preferredReadReplica; BaseRecords records; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no fetch error."), new Field("high_watermark", Type.INT64, "The current high water mark."), new Field("records", Type.RECORDS, "The record data.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no fetch error."), new Field("high_watermark", Type.INT64, "The current high water mark."), new Field("last_stable_offset", Type.INT64, "The last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED)"), new Field("aborted_transactions", ArrayOf.nullable(AbortedTransaction.SCHEMA_4), "The aborted transactions."), new Field("records", Type.RECORDS, "The record data.") ); public static final Schema SCHEMA_5 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no fetch error."), new Field("high_watermark", Type.INT64, "The current high water mark."), new Field("last_stable_offset", Type.INT64, "The last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED)"), new Field("log_start_offset", Type.INT64, "The current log start offset."), new Field("aborted_transactions", ArrayOf.nullable(AbortedTransaction.SCHEMA_4), "The aborted transactions."), new Field("records", Type.RECORDS, "The record data.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = SCHEMA_8; public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no fetch error."), new Field("high_watermark", Type.INT64, "The current high water mark."), new Field("last_stable_offset", Type.INT64, "The last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED)"), new Field("log_start_offset", Type.INT64, "The current log start offset."), new Field("aborted_transactions", ArrayOf.nullable(AbortedTransaction.SCHEMA_4), "The aborted transactions."), new Field("preferred_read_replica", Type.INT32, "The preferred read replica for the consumer to use on its next fetch request"), new Field("records", Type.RECORDS, "The record data.") ); public static final Schema SCHEMA_12 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no fetch error."), new Field("high_watermark", Type.INT64, "The current high water mark."), new Field("last_stable_offset", Type.INT64, "The last stable offset (or LSO) of the partition. This is the last offset such that the state of all transactional records prior to this offset have been decided (ABORTED or COMMITTED)"), new Field("log_start_offset", Type.INT64, "The current log start offset."), new Field("aborted_transactions", CompactArrayOf.nullable(AbortedTransaction.SCHEMA_12), "The aborted transactions."), new Field("preferred_read_replica", Type.INT32, "The preferred read replica for the consumer to use on its next fetch request"), new Field("records", Type.COMPACT_RECORDS, "The record data."), TaggedFieldsSection.of( 0, new Field("diverging_epoch", EpochEndOffset.SCHEMA_12, "In case divergence is detected based on the `LastFetchedEpoch` and `FetchOffset` in the request, this field indicates the largest epoch and its end offset such that subsequent records are known to diverge"), 1, new Field("current_leader", LeaderIdAndEpoch.SCHEMA_12, ""), 2, new Field("snapshot_id", SnapshotId.SCHEMA_12, "In the case of fetching an offset less than the LogStartOffset, this is the end offset and epoch that should be used in the FetchSnapshot request.") ) ); public static final Schema SCHEMA_13 = SCHEMA_12; public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 15; public PartitionData(Readable _readable, short _version) { read(_readable, _version); } public PartitionData() { this.partitionIndex = 0; this.errorCode = (short) 0; this.highWatermark = 0L; this.lastStableOffset = -1L; this.logStartOffset = -1L; this.divergingEpoch = new EpochEndOffset(); this.currentLeader = new LeaderIdAndEpoch(); this.snapshotId = new SnapshotId(); this.abortedTransactions = new ArrayList<AbortedTransaction>(0); this.preferredReadReplica = -1; this.records = null; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionData"); } this.partitionIndex = _readable.readInt(); this.errorCode = _readable.readShort(); this.highWatermark = _readable.readLong(); if (_version >= 4) { this.lastStableOffset = _readable.readLong(); } else { this.lastStableOffset = -1L; } if (_version >= 5) { this.logStartOffset = _readable.readLong(); } else { this.logStartOffset = -1L; } { this.divergingEpoch = new EpochEndOffset(); } { this.currentLeader = new LeaderIdAndEpoch(); } { this.snapshotId = new SnapshotId(); } if (_version >= 4) { if (_version >= 12) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { this.abortedTransactions = null; } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AbortedTransaction> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AbortedTransaction(_readable, _version)); } this.abortedTransactions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { this.abortedTransactions = null; } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AbortedTransaction> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AbortedTransaction(_readable, _version)); } this.abortedTransactions = newCollection; } } } else { this.abortedTransactions = new ArrayList<AbortedTransaction>(0); } if (_version >= 11) { this.preferredReadReplica = _readable.readInt(); } else { this.preferredReadReplica = -1; } { int length; if (_version >= 12) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readInt(); } if (length < 0) { this.records = null; } else { this.records = _readable.readRecords(length); } } this._unknownTaggedFields = null; if (_version >= 12) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { case 0: { this.divergingEpoch = new EpochEndOffset(_readable, _version); break; } case 1: { this.currentLeader = new LeaderIdAndEpoch(_readable, _version); break; } case 2: { this.snapshotId = new SnapshotId(_readable, _version); break; } default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeShort(errorCode); _writable.writeLong(highWatermark); if (_version >= 4) { _writable.writeLong(lastStableOffset); } if (_version >= 5) { _writable.writeLong(logStartOffset); } if (_version >= 12) { if (!this.divergingEpoch.equals(new EpochEndOffset())) { _numTaggedFields++; } } else { if (!this.divergingEpoch.equals(new EpochEndOffset())) { throw new UnsupportedVersionException("Attempted to write a non-default divergingEpoch at version " + _version); } } if (_version >= 12) { if (!this.currentLeader.equals(new LeaderIdAndEpoch())) { _numTaggedFields++; } } else { if (!this.currentLeader.equals(new LeaderIdAndEpoch())) { throw new UnsupportedVersionException("Attempted to write a non-default currentLeader at version " + _version); } } if (_version >= 12) { if (!this.snapshotId.equals(new SnapshotId())) { _numTaggedFields++; } } else { if (!this.snapshotId.equals(new SnapshotId())) { throw new UnsupportedVersionException("Attempted to write a non-default snapshotId at version " + _version); } } if (_version >= 4) { if (_version >= 12) { if (abortedTransactions == null) { _writable.writeUnsignedVarint(0); } else { _writable.writeUnsignedVarint(abortedTransactions.size() + 1); for (AbortedTransaction abortedTransactionsElement : abortedTransactions) { abortedTransactionsElement.write(_writable, _cache, _version); } } } else { if (abortedTransactions == null) { _writable.writeInt(-1); } else { _writable.writeInt(abortedTransactions.size()); for (AbortedTransaction abortedTransactionsElement : abortedTransactions) { abortedTransactionsElement.write(_writable, _cache, _version); } } } } if (_version >= 11) { _writable.writeInt(preferredReadReplica); } else { if (this.preferredReadReplica != -1) { throw new UnsupportedVersionException("Attempted to write a non-default preferredReadReplica at version " + _version); } } if (records == null) { if (_version >= 12) { _writable.writeUnsignedVarint(0); } else { _writable.writeInt(-1); } } else { if (_version >= 12) { _writable.writeUnsignedVarint(records.sizeInBytes() + 1); } else { _writable.writeInt(records.sizeInBytes()); } _writable.writeRecords(records); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 12) { _writable.writeUnsignedVarint(_numTaggedFields); { if (!this.divergingEpoch.equals(new EpochEndOffset())) { _writable.writeUnsignedVarint(0); _writable.writeUnsignedVarint(this.divergingEpoch.size(_cache, _version)); divergingEpoch.write(_writable, _cache, _version); } } { if (!this.currentLeader.equals(new LeaderIdAndEpoch())) { _writable.writeUnsignedVarint(1); _writable.writeUnsignedVarint(this.currentLeader.size(_cache, _version)); currentLeader.write(_writable, _cache, _version); } } { if (!this.snapshotId.equals(new SnapshotId())) { _writable.writeUnsignedVarint(2); _writable.writeUnsignedVarint(this.snapshotId.size(_cache, _version)); snapshotId.write(_writable, _cache, _version); } } _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionData"); } _size.addBytes(4); _size.addBytes(2); _size.addBytes(8); if (_version >= 4) { _size.addBytes(8); } if (_version >= 5) { _size.addBytes(8); } if (_version >= 12) { { if (!this.divergingEpoch.equals(new EpochEndOffset())) { _numTaggedFields++; _size.addBytes(1); int _sizeBeforeStruct = _size.totalSize(); this.divergingEpoch.addSize(_size, _cache, _version); int _structSize = _size.totalSize() - _sizeBeforeStruct; _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_structSize)); } } } if (_version >= 12) { { if (!this.currentLeader.equals(new LeaderIdAndEpoch())) { _numTaggedFields++; _size.addBytes(1); int _sizeBeforeStruct = _size.totalSize(); this.currentLeader.addSize(_size, _cache, _version); int _structSize = _size.totalSize() - _sizeBeforeStruct; _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_structSize)); } } } if (_version >= 12) { { if (!this.snapshotId.equals(new SnapshotId())) { _numTaggedFields++; _size.addBytes(1); int _sizeBeforeStruct = _size.totalSize(); this.snapshotId.addSize(_size, _cache, _version); int _structSize = _size.totalSize() - _sizeBeforeStruct; _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_structSize)); } } } if (_version >= 4) { if (abortedTransactions == null) { if (_version >= 12) { _size.addBytes(1); } else { _size.addBytes(4); } } else { if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(abortedTransactions.size() + 1)); } else { _size.addBytes(4); } for (AbortedTransaction abortedTransactionsElement : abortedTransactions) { abortedTransactionsElement.addSize(_size, _cache, _version); } } } if (_version >= 11) { _size.addBytes(4); } if (records == null) { if (_version >= 12) { _size.addBytes(1); } else { _size.addBytes(4); } } else { _size.addZeroCopyBytes(records.sizeInBytes()); if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(records.sizeInBytes() + 1)); } else { _size.addBytes(4); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof PartitionData)) return false; PartitionData other = (PartitionData) obj; if (partitionIndex != other.partitionIndex) return false; if (errorCode != other.errorCode) return false; if (highWatermark != other.highWatermark) return false; if (lastStableOffset != other.lastStableOffset) return false; if (logStartOffset != other.logStartOffset) return false; if (this.divergingEpoch == null) { if (other.divergingEpoch != null) return false; } else { if (!this.divergingEpoch.equals(other.divergingEpoch)) return false; } if (this.currentLeader == null) { if (other.currentLeader != null) return false; } else { if (!this.currentLeader.equals(other.currentLeader)) return false; } if (this.snapshotId == null) { if (other.snapshotId != null) return false; } else { if (!this.snapshotId.equals(other.snapshotId)) return false; } if (this.abortedTransactions == null) { if (other.abortedTransactions != null) return false; } else { if (!this.abortedTransactions.equals(other.abortedTransactions)) return false; } if (preferredReadReplica != other.preferredReadReplica) return false; if (!Objects.equals(this.records, other.records)) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + ((int) (highWatermark >> 32) ^ (int) highWatermark); hashCode = 31 * hashCode + ((int) (lastStableOffset >> 32) ^ (int) lastStableOffset); hashCode = 31 * hashCode + ((int) (logStartOffset >> 32) ^ (int) logStartOffset); hashCode = 31 * hashCode + (divergingEpoch == null ? 0 : divergingEpoch.hashCode()); hashCode = 31 * hashCode + (currentLeader == null ? 0 : currentLeader.hashCode()); hashCode = 31 * hashCode + (snapshotId == null ? 0 : snapshotId.hashCode()); hashCode = 31 * hashCode + (abortedTransactions == null ? 0 : abortedTransactions.hashCode()); hashCode = 31 * hashCode + preferredReadReplica; hashCode = 31 * hashCode + Objects.hashCode(records); return hashCode; } @Override public PartitionData duplicate() { PartitionData _duplicate = new PartitionData(); _duplicate.partitionIndex = partitionIndex; _duplicate.errorCode = errorCode; _duplicate.highWatermark = highWatermark; _duplicate.lastStableOffset = lastStableOffset; _duplicate.logStartOffset = logStartOffset; _duplicate.divergingEpoch = divergingEpoch.duplicate(); _duplicate.currentLeader = currentLeader.duplicate(); _duplicate.snapshotId = snapshotId.duplicate(); if (abortedTransactions == null) { _duplicate.abortedTransactions = null; } else { ArrayList<AbortedTransaction> newAbortedTransactions = new ArrayList<AbortedTransaction>(abortedTransactions.size()); for (AbortedTransaction _element : abortedTransactions) { newAbortedTransactions.add(_element.duplicate()); } _duplicate.abortedTransactions = newAbortedTransactions; } _duplicate.preferredReadReplica = preferredReadReplica; if (records == null) { _duplicate.records = null; } else { _duplicate.records = MemoryRecords.readableRecords(((MemoryRecords) records).buffer().duplicate()); } return _duplicate; } @Override public String toString() { return "PartitionData(" + "partitionIndex=" + partitionIndex + ", errorCode=" + errorCode + ", highWatermark=" + highWatermark + ", lastStableOffset=" + lastStableOffset + ", logStartOffset=" + logStartOffset + ", divergingEpoch=" + divergingEpoch.toString() + ", currentLeader=" + currentLeader.toString() + ", snapshotId=" + snapshotId.toString() + ", abortedTransactions=" + ((abortedTransactions == null) ? "null" : MessageUtil.deepToString(abortedTransactions.iterator())) + ", preferredReadReplica=" + preferredReadReplica + ", records=" + records + ")"; } public int partitionIndex() { return this.partitionIndex; } public short errorCode() { return this.errorCode; } public long highWatermark() { return this.highWatermark; } public long lastStableOffset() { return this.lastStableOffset; } public long logStartOffset() { return this.logStartOffset; } public EpochEndOffset divergingEpoch() { return this.divergingEpoch; } public LeaderIdAndEpoch currentLeader() { return this.currentLeader; } public SnapshotId snapshotId() { return this.snapshotId; } public List<AbortedTransaction> abortedTransactions() { return this.abortedTransactions; } public int preferredReadReplica() { return this.preferredReadReplica; } public BaseRecords records() { return this.records; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public PartitionData setPartitionIndex(int v) { this.partitionIndex = v; return this; } public PartitionData setErrorCode(short v) { this.errorCode = v; return this; } public PartitionData setHighWatermark(long v) { this.highWatermark = v; return this; } public PartitionData setLastStableOffset(long v) { this.lastStableOffset = v; return this; } public PartitionData setLogStartOffset(long v) { this.logStartOffset = v; return this; } public PartitionData setDivergingEpoch(EpochEndOffset v) { this.divergingEpoch = v; return this; } public PartitionData setCurrentLeader(LeaderIdAndEpoch v) { this.currentLeader = v; return this; } public PartitionData setSnapshotId(SnapshotId v) { this.snapshotId = v; return this; } public PartitionData setAbortedTransactions(List<AbortedTransaction> v) { this.abortedTransactions = v; return this; } public PartitionData setPreferredReadReplica(int v) { this.preferredReadReplica = v; return this; } public PartitionData setRecords(BaseRecords v) { this.records = v; return this; } } public static class EpochEndOffset implements Message { int epoch; long endOffset; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_12 = new Schema( new Field("epoch", Type.INT32, ""), new Field("end_offset", Type.INT64, ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_13 = SCHEMA_12; public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, null, null, null, null, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 12; public static final short HIGHEST_SUPPORTED_VERSION = 15; public EpochEndOffset(Readable _readable, short _version) { read(_readable, _version); } public EpochEndOffset() { this.epoch = -1; this.endOffset = -1L; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of EpochEndOffset"); } this.epoch = _readable.readInt(); this.endOffset = _readable.readLong(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 12) { throw new UnsupportedVersionException("Can't write version " + _version + " of EpochEndOffset"); } int _numTaggedFields = 0; _writable.writeInt(epoch); _writable.writeLong(endOffset); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of EpochEndOffset"); } _size.addBytes(4); _size.addBytes(8); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof EpochEndOffset)) return false; EpochEndOffset other = (EpochEndOffset) obj; if (epoch != other.epoch) return false; if (endOffset != other.endOffset) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + epoch; hashCode = 31 * hashCode + ((int) (endOffset >> 32) ^ (int) endOffset); return hashCode; } @Override public EpochEndOffset duplicate() { EpochEndOffset _duplicate = new EpochEndOffset(); _duplicate.epoch = epoch; _duplicate.endOffset = endOffset; return _duplicate; } @Override public String toString() { return "EpochEndOffset(" + "epoch=" + epoch + ", endOffset=" + endOffset + ")"; } public int epoch() { return this.epoch; } public long endOffset() { return this.endOffset; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EpochEndOffset setEpoch(int v) { this.epoch = v; return this; } public EpochEndOffset setEndOffset(long v) { this.endOffset = v; return this; } } public static class LeaderIdAndEpoch implements Message { int leaderId; int leaderEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_12 = new Schema( new Field("leader_id", Type.INT32, "The ID of the current leader or -1 if the leader is unknown."), new Field("leader_epoch", Type.INT32, "The latest known leader epoch"), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_13 = SCHEMA_12; public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, null, null, null, null, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 12; public static final short HIGHEST_SUPPORTED_VERSION = 15; public LeaderIdAndEpoch(Readable _readable, short _version) { read(_readable, _version); } public LeaderIdAndEpoch() { this.leaderId = -1; this.leaderEpoch = -1; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of LeaderIdAndEpoch"); } this.leaderId = _readable.readInt(); this.leaderEpoch = _readable.readInt(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 12) { throw new UnsupportedVersionException("Can't write version " + _version + " of LeaderIdAndEpoch"); } int _numTaggedFields = 0; _writable.writeInt(leaderId); _writable.writeInt(leaderEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of LeaderIdAndEpoch"); } _size.addBytes(4); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderIdAndEpoch)) return false; LeaderIdAndEpoch other = (LeaderIdAndEpoch) obj; if (leaderId != other.leaderId) return false; if (leaderEpoch != other.leaderEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + leaderId; hashCode = 31 * hashCode + leaderEpoch; return hashCode; } @Override public LeaderIdAndEpoch duplicate() { LeaderIdAndEpoch _duplicate = new LeaderIdAndEpoch(); _duplicate.leaderId = leaderId; _duplicate.leaderEpoch = leaderEpoch; return _duplicate; } @Override public String toString() { return "LeaderIdAndEpoch(" + "leaderId=" + leaderId + ", leaderEpoch=" + leaderEpoch + ")"; } public int leaderId() { return this.leaderId; } public int leaderEpoch() { return this.leaderEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderIdAndEpoch setLeaderId(int v) { this.leaderId = v; return this; } public LeaderIdAndEpoch setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } } public static class SnapshotId implements Message { long endOffset; int epoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_12 = new Schema( new Field("end_offset", Type.INT64, ""), new Field("epoch", Type.INT32, ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_13 = SCHEMA_12; public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, null, null, null, null, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 12; public static final short HIGHEST_SUPPORTED_VERSION = 15; public SnapshotId(Readable _readable, short _version) { read(_readable, _version); } public SnapshotId() { this.endOffset = -1L; this.epoch = -1; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of SnapshotId"); } this.endOffset = _readable.readLong(); this.epoch = _readable.readInt(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 12) { throw new UnsupportedVersionException("Can't write version " + _version + " of SnapshotId"); } int _numTaggedFields = 0; _writable.writeLong(endOffset); _writable.writeInt(epoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of SnapshotId"); } _size.addBytes(8); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof SnapshotId)) return false; SnapshotId other = (SnapshotId) obj; if (endOffset != other.endOffset) return false; if (epoch != other.epoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + ((int) (endOffset >> 32) ^ (int) endOffset); hashCode = 31 * hashCode + epoch; return hashCode; } @Override public SnapshotId duplicate() { SnapshotId _duplicate = new SnapshotId(); _duplicate.endOffset = endOffset; _duplicate.epoch = epoch; return _duplicate; } @Override public String toString() { return "SnapshotId(" + "endOffset=" + endOffset + ", epoch=" + epoch + ")"; } public long endOffset() { return this.endOffset; } public int epoch() { return this.epoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public SnapshotId setEndOffset(long v) { this.endOffset = v; return this; } public SnapshotId setEpoch(int v) { this.epoch = v; return this; } } public static class AbortedTransaction implements Message { long producerId; long firstOffset; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_4 = new Schema( new Field("producer_id", Type.INT64, "The producer id associated with the aborted transaction."), new Field("first_offset", Type.INT64, "The first offset in the aborted transaction.") ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = SCHEMA_8; public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = SCHEMA_10; public static final Schema SCHEMA_12 = new Schema( new Field("producer_id", Type.INT64, "The producer id associated with the aborted transaction."), new Field("first_offset", Type.INT64, "The first offset in the aborted transaction."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_13 = SCHEMA_12; public static final Schema SCHEMA_14 = SCHEMA_13; public static final Schema SCHEMA_15 = SCHEMA_14; public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12, SCHEMA_13, SCHEMA_14, SCHEMA_15 }; public static final short LOWEST_SUPPORTED_VERSION = 4; public static final short HIGHEST_SUPPORTED_VERSION = 15; public AbortedTransaction(Readable _readable, short _version) { read(_readable, _version); } public AbortedTransaction() { this.producerId = 0L; this.firstOffset = 0L; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 15; } @Override public void read(Readable _readable, short _version) { if (_version > 15) { throw new UnsupportedVersionException("Can't read version " + _version + " of AbortedTransaction"); } this.producerId = _readable.readLong(); this.firstOffset = _readable.readLong(); this._unknownTaggedFields = null; if (_version >= 12) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 4) { throw new UnsupportedVersionException("Can't write version " + _version + " of AbortedTransaction"); } int _numTaggedFields = 0; _writable.writeLong(producerId); _writable.writeLong(firstOffset); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 12) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 15) { throw new UnsupportedVersionException("Can't size version " + _version + " of AbortedTransaction"); } _size.addBytes(8); _size.addBytes(8); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 12) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AbortedTransaction)) return false; AbortedTransaction other = (AbortedTransaction) obj; if (producerId != other.producerId) return false; if (firstOffset != other.firstOffset) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId); hashCode = 31 * hashCode + ((int) (firstOffset >> 32) ^ (int) firstOffset); return hashCode; } @Override public AbortedTransaction duplicate() { AbortedTransaction _duplicate = new AbortedTransaction(); _duplicate.producerId = producerId; _duplicate.firstOffset = firstOffset; return _duplicate; } @Override public String toString() { return "AbortedTransaction(" + "producerId=" + producerId + ", firstOffset=" + firstOffset + ")"; } public long producerId() { return this.producerId; } public long firstOffset() { return this.firstOffset; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AbortedTransaction setProducerId(long v) { this.producerId = v; return this; } public AbortedTransaction setFirstOffset(long v) { this.firstOffset = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FetchResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.nio.ByteBuffer; import java.util.ArrayList; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.record.MemoryRecords; import static org.apache.kafka.common.message.FetchResponseData.*; public class FetchResponseDataJsonConverter { public static FetchResponseData read(JsonNode _node, short _version) { FetchResponseData _object = new FetchResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 1) { throw new RuntimeException("FetchResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "FetchResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { if (_version >= 7) { throw new RuntimeException("FetchResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = (short) 0; } } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "FetchResponseData"); } JsonNode _sessionIdNode = _node.get("sessionId"); if (_sessionIdNode == null) { if (_version >= 7) { throw new RuntimeException("FetchResponseData: unable to locate field 'sessionId', which is mandatory in version " + _version); } else { _object.sessionId = 0; } } else { _object.sessionId = MessageUtil.jsonNodeToInt(_sessionIdNode, "FetchResponseData"); } JsonNode _responsesNode = _node.get("responses"); if (_responsesNode == null) { throw new RuntimeException("FetchResponseData: unable to locate field 'responses', which is mandatory in version " + _version); } else { if (!_responsesNode.isArray()) { throw new RuntimeException("FetchResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<FetchableTopicResponse> _collection = new ArrayList<FetchableTopicResponse>(_responsesNode.size()); _object.responses = _collection; for (JsonNode _element : _responsesNode) { _collection.add(FetchableTopicResponseJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(FetchResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 1) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } if (_version >= 7) { _node.set("errorCode", new ShortNode(_object.errorCode)); } if (_version >= 7) { _node.set("sessionId", new IntNode(_object.sessionId)); } else { if (_object.sessionId != 0) { throw new UnsupportedVersionException("Attempted to write a non-default sessionId at version " + _version); } } ArrayNode _responsesArray = new ArrayNode(JsonNodeFactory.instance); for (FetchableTopicResponse _element : _object.responses) { _responsesArray.add(FetchableTopicResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("responses", _responsesArray); return _node; } public static JsonNode write(FetchResponseData _object, short _version) { return write(_object, _version, true); } public static class AbortedTransactionJsonConverter { public static AbortedTransaction read(JsonNode _node, short _version) { AbortedTransaction _object = new AbortedTransaction(); if (_version < 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of AbortedTransaction"); } JsonNode _producerIdNode = _node.get("producerId"); if (_producerIdNode == null) { throw new RuntimeException("AbortedTransaction: unable to locate field 'producerId', which is mandatory in version " + _version); } else { _object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "AbortedTransaction"); } JsonNode _firstOffsetNode = _node.get("firstOffset"); if (_firstOffsetNode == null) { throw new RuntimeException("AbortedTransaction: unable to locate field 'firstOffset', which is mandatory in version " + _version); } else { _object.firstOffset = MessageUtil.jsonNodeToLong(_firstOffsetNode, "AbortedTransaction"); } return _object; } public static JsonNode write(AbortedTransaction _object, short _version, boolean _serializeRecords) { if (_version < 4) { throw new UnsupportedVersionException("Can't write version " + _version + " of AbortedTransaction"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("producerId", new LongNode(_object.producerId)); _node.set("firstOffset", new LongNode(_object.firstOffset)); return _node; } public static JsonNode write(AbortedTransaction _object, short _version) { return write(_object, _version, true); } } public static class EpochEndOffsetJsonConverter { public static EpochEndOffset read(JsonNode _node, short _version) { EpochEndOffset _object = new EpochEndOffset(); if (_version < 12) { throw new UnsupportedVersionException("Can't read version " + _version + " of EpochEndOffset"); } JsonNode _epochNode = _node.get("epoch"); if (_epochNode == null) { throw new RuntimeException("EpochEndOffset: unable to locate field 'epoch', which is mandatory in version " + _version); } else { _object.epoch = MessageUtil.jsonNodeToInt(_epochNode, "EpochEndOffset"); } JsonNode _endOffsetNode = _node.get("endOffset"); if (_endOffsetNode == null) { throw new RuntimeException("EpochEndOffset: unable to locate field 'endOffset', which is mandatory in version " + _version); } else { _object.endOffset = MessageUtil.jsonNodeToLong(_endOffsetNode, "EpochEndOffset"); } return _object; } public static JsonNode write(EpochEndOffset _object, short _version, boolean _serializeRecords) { if (_version < 12) { throw new UnsupportedVersionException("Can't write version " + _version + " of EpochEndOffset"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("epoch", new IntNode(_object.epoch)); _node.set("endOffset", new LongNode(_object.endOffset)); return _node; } public static JsonNode write(EpochEndOffset _object, short _version) { return write(_object, _version, true); } } public static class FetchableTopicResponseJsonConverter { public static FetchableTopicResponse read(JsonNode _node, short _version) { FetchableTopicResponse _object = new FetchableTopicResponse(); JsonNode _topicNode = _node.get("topic"); if (_topicNode == null) { if (_version <= 12) { throw new RuntimeException("FetchableTopicResponse: unable to locate field 'topic', which is mandatory in version " + _version); } else { _object.topic = ""; } } else { if (!_topicNode.isTextual()) { throw new RuntimeException("FetchableTopicResponse expected a string type, but got " + _node.getNodeType()); } _object.topic = _topicNode.asText(); } JsonNode _topicIdNode = _node.get("topicId"); if (_topicIdNode == null) { if (_version >= 13) { throw new RuntimeException("FetchableTopicResponse: unable to locate field 'topicId', which is mandatory in version " + _version); } else { _object.topicId = Uuid.ZERO_UUID; } } else { if (!_topicIdNode.isTextual()) { throw new RuntimeException("FetchableTopicResponse expected a JSON string type, but got " + _node.getNodeType()); } _object.topicId = Uuid.fromString(_topicIdNode.asText()); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("FetchableTopicResponse: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("FetchableTopicResponse expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionData> _collection = new ArrayList<PartitionData>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(PartitionDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(FetchableTopicResponse _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version <= 12) { _node.set("topic", new TextNode(_object.topic)); } if (_version >= 13) { _node.set("topicId", new TextNode(_object.topicId.toString())); } ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionData _element : _object.partitions) { _partitionsArray.add(PartitionDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(FetchableTopicResponse _object, short _version) { return write(_object, _version, true); } } public static class LeaderIdAndEpochJsonConverter { public static LeaderIdAndEpoch read(JsonNode _node, short _version) { LeaderIdAndEpoch _object = new LeaderIdAndEpoch(); if (_version < 12) { throw new UnsupportedVersionException("Can't read version " + _version + " of LeaderIdAndEpoch"); } JsonNode _leaderIdNode = _node.get("leaderId"); if (_leaderIdNode == null) { throw new RuntimeException("LeaderIdAndEpoch: unable to locate field 'leaderId', which is mandatory in version " + _version); } else { _object.leaderId = MessageUtil.jsonNodeToInt(_leaderIdNode, "LeaderIdAndEpoch"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { throw new RuntimeException("LeaderIdAndEpoch: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "LeaderIdAndEpoch"); } return _object; } public static JsonNode write(LeaderIdAndEpoch _object, short _version, boolean _serializeRecords) { if (_version < 12) { throw new UnsupportedVersionException("Can't write version " + _version + " of LeaderIdAndEpoch"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("leaderId", new IntNode(_object.leaderId)); _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); return _node; } public static JsonNode write(LeaderIdAndEpoch _object, short _version) { return write(_object, _version, true); } } public static class PartitionDataJsonConverter { public static PartitionData read(JsonNode _node, short _version) { PartitionData _object = new PartitionData(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "PartitionData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "PartitionData"); } JsonNode _highWatermarkNode = _node.get("highWatermark"); if (_highWatermarkNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'highWatermark', which is mandatory in version " + _version); } else { _object.highWatermark = MessageUtil.jsonNodeToLong(_highWatermarkNode, "PartitionData"); } JsonNode _lastStableOffsetNode = _node.get("lastStableOffset"); if (_lastStableOffsetNode == null) { if (_version >= 4) { throw new RuntimeException("PartitionData: unable to locate field 'lastStableOffset', which is mandatory in version " + _version); } else { _object.lastStableOffset = -1L; } } else { _object.lastStableOffset = MessageUtil.jsonNodeToLong(_lastStableOffsetNode, "PartitionData"); } JsonNode _logStartOffsetNode = _node.get("logStartOffset"); if (_logStartOffsetNode == null) { if (_version >= 5) { throw new RuntimeException("PartitionData: unable to locate field 'logStartOffset', which is mandatory in version " + _version); } else { _object.logStartOffset = -1L; } } else { _object.logStartOffset = MessageUtil.jsonNodeToLong(_logStartOffsetNode, "PartitionData"); } JsonNode _divergingEpochNode = _node.get("divergingEpoch"); if (_divergingEpochNode == null) { _object.divergingEpoch = new EpochEndOffset(); } else { _object.divergingEpoch = EpochEndOffsetJsonConverter.read(_divergingEpochNode, _version); } JsonNode _currentLeaderNode = _node.get("currentLeader"); if (_currentLeaderNode == null) { _object.currentLeader = new LeaderIdAndEpoch(); } else { _object.currentLeader = LeaderIdAndEpochJsonConverter.read(_currentLeaderNode, _version); } JsonNode _snapshotIdNode = _node.get("snapshotId"); if (_snapshotIdNode == null) { _object.snapshotId = new SnapshotId(); } else { _object.snapshotId = SnapshotIdJsonConverter.read(_snapshotIdNode, _version); } JsonNode _abortedTransactionsNode = _node.get("abortedTransactions"); if (_abortedTransactionsNode == null) { if (_version >= 4) { throw new RuntimeException("PartitionData: unable to locate field 'abortedTransactions', which is mandatory in version " + _version); } else { _object.abortedTransactions = new ArrayList<AbortedTransaction>(0); } } else { if (_abortedTransactionsNode.isNull()) { _object.abortedTransactions = null; } else { if (!_abortedTransactionsNode.isArray()) { throw new RuntimeException("PartitionData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<AbortedTransaction> _collection = new ArrayList<AbortedTransaction>(_abortedTransactionsNode.size()); _object.abortedTransactions = _collection; for (JsonNode _element : _abortedTransactionsNode) { _collection.add(AbortedTransactionJsonConverter.read(_element, _version)); } } } JsonNode _preferredReadReplicaNode = _node.get("preferredReadReplica"); if (_preferredReadReplicaNode == null) { if (_version >= 11) { throw new RuntimeException("PartitionData: unable to locate field 'preferredReadReplica', which is mandatory in version " + _version); } else { _object.preferredReadReplica = -1; } } else { _object.preferredReadReplica = MessageUtil.jsonNodeToInt(_preferredReadReplicaNode, "PartitionData"); } JsonNode _recordsNode = _node.get("records"); if (_recordsNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'records', which is mandatory in version " + _version); } else { if (_recordsNode.isNull()) { _object.records = null; } else { _object.records = MemoryRecords.readableRecords(ByteBuffer.wrap(MessageUtil.jsonNodeToBinary(_recordsNode, "PartitionData"))); } } return _object; } public static JsonNode write(PartitionData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("highWatermark", new LongNode(_object.highWatermark)); if (_version >= 4) { _node.set("lastStableOffset", new LongNode(_object.lastStableOffset)); } if (_version >= 5) { _node.set("logStartOffset", new LongNode(_object.logStartOffset)); } if (_version >= 12) { if (!_object.divergingEpoch.equals(new EpochEndOffset())) { _node.set("divergingEpoch", EpochEndOffsetJsonConverter.write(_object.divergingEpoch, _version, _serializeRecords)); } } else { if (!_object.divergingEpoch.equals(new EpochEndOffset())) { throw new UnsupportedVersionException("Attempted to write a non-default divergingEpoch at version " + _version); } } if (_version >= 12) { if (!_object.currentLeader.equals(new LeaderIdAndEpoch())) { _node.set("currentLeader", LeaderIdAndEpochJsonConverter.write(_object.currentLeader, _version, _serializeRecords)); } } else { if (!_object.currentLeader.equals(new LeaderIdAndEpoch())) { throw new UnsupportedVersionException("Attempted to write a non-default currentLeader at version " + _version); } } if (_version >= 12) { if (!_object.snapshotId.equals(new SnapshotId())) { _node.set("snapshotId", SnapshotIdJsonConverter.write(_object.snapshotId, _version, _serializeRecords)); } } else { if (!_object.snapshotId.equals(new SnapshotId())) { throw new UnsupportedVersionException("Attempted to write a non-default snapshotId at version " + _version); } } if (_version >= 4) { if (_object.abortedTransactions == null) { _node.set("abortedTransactions", NullNode.instance); } else { ArrayNode _abortedTransactionsArray = new ArrayNode(JsonNodeFactory.instance); for (AbortedTransaction _element : _object.abortedTransactions) { _abortedTransactionsArray.add(AbortedTransactionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("abortedTransactions", _abortedTransactionsArray); } } if (_version >= 11) { _node.set("preferredReadReplica", new IntNode(_object.preferredReadReplica)); } else { if (_object.preferredReadReplica != -1) { throw new UnsupportedVersionException("Attempted to write a non-default preferredReadReplica at version " + _version); } } if (_object.records == null) { _node.set("records", NullNode.instance); } else { if (_serializeRecords) { _node.set("records", new BinaryNode(new byte[]{})); } else { _node.set("recordsSizeInBytes", new IntNode(_object.records.sizeInBytes())); } } return _node; } public static JsonNode write(PartitionData _object, short _version) { return write(_object, _version, true); } } public static class SnapshotIdJsonConverter { public static SnapshotId read(JsonNode _node, short _version) { SnapshotId _object = new SnapshotId(); if (_version < 12) { throw new UnsupportedVersionException("Can't read version " + _version + " of SnapshotId"); } JsonNode _endOffsetNode = _node.get("endOffset"); if (_endOffsetNode == null) { throw new RuntimeException("SnapshotId: unable to locate field 'endOffset', which is mandatory in version " + _version); } else { _object.endOffset = MessageUtil.jsonNodeToLong(_endOffsetNode, "SnapshotId"); } JsonNode _epochNode = _node.get("epoch"); if (_epochNode == null) { throw new RuntimeException("SnapshotId: unable to locate field 'epoch', which is mandatory in version " + _version); } else { _object.epoch = MessageUtil.jsonNodeToInt(_epochNode, "SnapshotId"); } return _object; } public static JsonNode write(SnapshotId _object, short _version, boolean _serializeRecords) { if (_version < 12) { throw new UnsupportedVersionException("Can't write version " + _version + " of SnapshotId"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("endOffset", new LongNode(_object.endOffset)); _node.set("epoch", new IntNode(_object.epoch)); return _node; } public static JsonNode write(SnapshotId _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FetchSnapshotRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class FetchSnapshotRequestData implements ApiMessage { String clusterId; int replicaId; int maxBytes; List<TopicSnapshot> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower"), new Field("max_bytes", Type.INT32, "The maximum bytes to fetch from all of the snapshots"), new Field("topics", new CompactArrayOf(TopicSnapshot.SCHEMA_0), "The topics to fetch"), TaggedFieldsSection.of( 0, new Field("cluster_id", Type.COMPACT_NULLABLE_STRING, "The clusterId if known, this is used to validate metadata fetches prior to broker registration") ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public FetchSnapshotRequestData(Readable _readable, short _version) { read(_readable, _version); } public FetchSnapshotRequestData() { this.clusterId = null; this.replicaId = -1; this.maxBytes = 0x7fffffff; this.topics = new ArrayList<TopicSnapshot>(0); } @Override public short apiKey() { return 59; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { { this.clusterId = null; } this.replicaId = _readable.readInt(); this.maxBytes = _readable.readInt(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<TopicSnapshot> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicSnapshot(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { case 0: { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.clusterId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field clusterId had invalid length " + length); } else { this.clusterId = _readable.readString(length); } break; } default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (this.clusterId != null) { _numTaggedFields++; } _writable.writeInt(replicaId); _writable.writeInt(maxBytes); _writable.writeUnsignedVarint(topics.size() + 1); for (TopicSnapshot topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); if (clusterId != null) { _writable.writeUnsignedVarint(0); byte[] _stringBytes = _cache.getSerializedValue(this.clusterId); _writable.writeUnsignedVarint(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (clusterId == null) { } else { _numTaggedFields++; _size.addBytes(1); byte[] _stringBytes = clusterId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'clusterId' field is too long to be serialized"); } _cache.cacheSerializedValue(clusterId, _stringBytes); int _stringPrefixSize = ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1); _size.addBytes(_stringBytes.length + _stringPrefixSize + ByteUtils.sizeOfUnsignedVarint(_stringPrefixSize + _stringBytes.length)); } _size.addBytes(4); _size.addBytes(4); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (TopicSnapshot topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof FetchSnapshotRequestData)) return false; FetchSnapshotRequestData other = (FetchSnapshotRequestData) obj; if (this.clusterId == null) { if (other.clusterId != null) return false; } else { if (!this.clusterId.equals(other.clusterId)) return false; } if (replicaId != other.replicaId) return false; if (maxBytes != other.maxBytes) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (clusterId == null ? 0 : clusterId.hashCode()); hashCode = 31 * hashCode + replicaId; hashCode = 31 * hashCode + maxBytes; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public FetchSnapshotRequestData duplicate() { FetchSnapshotRequestData _duplicate = new FetchSnapshotRequestData(); if (clusterId == null) { _duplicate.clusterId = null; } else { _duplicate.clusterId = clusterId; } _duplicate.replicaId = replicaId; _duplicate.maxBytes = maxBytes; ArrayList<TopicSnapshot> newTopics = new ArrayList<TopicSnapshot>(topics.size()); for (TopicSnapshot _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "FetchSnapshotRequestData(" + "clusterId=" + ((clusterId == null) ? "null" : "'" + clusterId.toString() + "'") + ", replicaId=" + replicaId + ", maxBytes=" + maxBytes + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public String clusterId() { return this.clusterId; } public int replicaId() { return this.replicaId; } public int maxBytes() { return this.maxBytes; } public List<TopicSnapshot> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FetchSnapshotRequestData setClusterId(String v) { this.clusterId = v; return this; } public FetchSnapshotRequestData setReplicaId(int v) { this.replicaId = v; return this; } public FetchSnapshotRequestData setMaxBytes(int v) { this.maxBytes = v; return this; } public FetchSnapshotRequestData setTopics(List<TopicSnapshot> v) { this.topics = v; return this; } public static class TopicSnapshot implements Message { String name; List<PartitionSnapshot> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The name of the topic to fetch"), new Field("partitions", new CompactArrayOf(PartitionSnapshot.SCHEMA_0), "The partitions to fetch"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public TopicSnapshot(Readable _readable, short _version) { read(_readable, _version); } public TopicSnapshot() { this.name = ""; this.partitions = new ArrayList<PartitionSnapshot>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of TopicSnapshot"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionSnapshot> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionSnapshot(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(partitions.size() + 1); for (PartitionSnapshot partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of TopicSnapshot"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); for (PartitionSnapshot partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof TopicSnapshot)) return false; TopicSnapshot other = (TopicSnapshot) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public TopicSnapshot duplicate() { TopicSnapshot _duplicate = new TopicSnapshot(); _duplicate.name = name; ArrayList<PartitionSnapshot> newPartitions = new ArrayList<PartitionSnapshot>(partitions.size()); for (PartitionSnapshot _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "TopicSnapshot(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<PartitionSnapshot> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public TopicSnapshot setName(String v) { this.name = v; return this; } public TopicSnapshot setPartitions(List<PartitionSnapshot> v) { this.partitions = v; return this; } } public static class PartitionSnapshot implements Message { int partition; int currentLeaderEpoch; SnapshotId snapshotId; long position; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition", Type.INT32, "The partition index"), new Field("current_leader_epoch", Type.INT32, "The current leader epoch of the partition, -1 for unknown leader epoch"), new Field("snapshot_id", SnapshotId.SCHEMA_0, "The snapshot endOffset and epoch to fetch"), new Field("position", Type.INT64, "The byte position within the snapshot to start fetching from"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public PartitionSnapshot(Readable _readable, short _version) { read(_readable, _version); } public PartitionSnapshot() { this.partition = 0; this.currentLeaderEpoch = 0; this.snapshotId = new SnapshotId(); this.position = 0L; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionSnapshot"); } this.partition = _readable.readInt(); this.currentLeaderEpoch = _readable.readInt(); { this.snapshotId = new SnapshotId(_readable, _version); } this.position = _readable.readLong(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partition); _writable.writeInt(currentLeaderEpoch); snapshotId.write(_writable, _cache, _version); _writable.writeLong(position); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionSnapshot"); } _size.addBytes(4); _size.addBytes(4); { this.snapshotId.addSize(_size, _cache, _version); } _size.addBytes(8); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof PartitionSnapshot)) return false; PartitionSnapshot other = (PartitionSnapshot) obj; if (partition != other.partition) return false; if (currentLeaderEpoch != other.currentLeaderEpoch) return false; if (this.snapshotId == null) { if (other.snapshotId != null) return false; } else { if (!this.snapshotId.equals(other.snapshotId)) return false; } if (position != other.position) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partition; hashCode = 31 * hashCode + currentLeaderEpoch; hashCode = 31 * hashCode + (snapshotId == null ? 0 : snapshotId.hashCode()); hashCode = 31 * hashCode + ((int) (position >> 32) ^ (int) position); return hashCode; } @Override public PartitionSnapshot duplicate() { PartitionSnapshot _duplicate = new PartitionSnapshot(); _duplicate.partition = partition; _duplicate.currentLeaderEpoch = currentLeaderEpoch; _duplicate.snapshotId = snapshotId.duplicate(); _duplicate.position = position; return _duplicate; } @Override public String toString() { return "PartitionSnapshot(" + "partition=" + partition + ", currentLeaderEpoch=" + currentLeaderEpoch + ", snapshotId=" + snapshotId.toString() + ", position=" + position + ")"; } public int partition() { return this.partition; } public int currentLeaderEpoch() { return this.currentLeaderEpoch; } public SnapshotId snapshotId() { return this.snapshotId; } public long position() { return this.position; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public PartitionSnapshot setPartition(int v) { this.partition = v; return this; } public PartitionSnapshot setCurrentLeaderEpoch(int v) { this.currentLeaderEpoch = v; return this; } public PartitionSnapshot setSnapshotId(SnapshotId v) { this.snapshotId = v; return this; } public PartitionSnapshot setPosition(long v) { this.position = v; return this; } } public static class SnapshotId implements Message { long endOffset; int epoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("end_offset", Type.INT64, ""), new Field("epoch", Type.INT32, ""), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public SnapshotId(Readable _readable, short _version) { read(_readable, _version); } public SnapshotId() { this.endOffset = 0L; this.epoch = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of SnapshotId"); } this.endOffset = _readable.readLong(); this.epoch = _readable.readInt(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeLong(endOffset); _writable.writeInt(epoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of SnapshotId"); } _size.addBytes(8); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof SnapshotId)) return false; SnapshotId other = (SnapshotId) obj; if (endOffset != other.endOffset) return false; if (epoch != other.epoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + ((int) (endOffset >> 32) ^ (int) endOffset); hashCode = 31 * hashCode + epoch; return hashCode; } @Override public SnapshotId duplicate() { SnapshotId _duplicate = new SnapshotId(); _duplicate.endOffset = endOffset; _duplicate.epoch = epoch; return _duplicate; } @Override public String toString() { return "SnapshotId(" + "endOffset=" + endOffset + ", epoch=" + epoch + ")"; } public long endOffset() { return this.endOffset; } public int epoch() { return this.epoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public SnapshotId setEndOffset(long v) { this.endOffset = v; return this; } public SnapshotId setEpoch(int v) { this.epoch = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FetchSnapshotRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.FetchSnapshotRequestData.*; public class FetchSnapshotRequestDataJsonConverter { public static FetchSnapshotRequestData read(JsonNode _node, short _version) { FetchSnapshotRequestData _object = new FetchSnapshotRequestData(); JsonNode _clusterIdNode = _node.get("clusterId"); if (_clusterIdNode == null) { _object.clusterId = null; } else { if (_clusterIdNode.isNull()) { _object.clusterId = null; } else { if (!_clusterIdNode.isTextual()) { throw new RuntimeException("FetchSnapshotRequestData expected a string type, but got " + _node.getNodeType()); } _object.clusterId = _clusterIdNode.asText(); } } JsonNode _replicaIdNode = _node.get("replicaId"); if (_replicaIdNode == null) { throw new RuntimeException("FetchSnapshotRequestData: unable to locate field 'replicaId', which is mandatory in version " + _version); } else { _object.replicaId = MessageUtil.jsonNodeToInt(_replicaIdNode, "FetchSnapshotRequestData"); } JsonNode _maxBytesNode = _node.get("maxBytes"); if (_maxBytesNode == null) { throw new RuntimeException("FetchSnapshotRequestData: unable to locate field 'maxBytes', which is mandatory in version " + _version); } else { _object.maxBytes = MessageUtil.jsonNodeToInt(_maxBytesNode, "FetchSnapshotRequestData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("FetchSnapshotRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("FetchSnapshotRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<TopicSnapshot> _collection = new ArrayList<TopicSnapshot>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(TopicSnapshotJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(FetchSnapshotRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_object.clusterId != null) { _node.set("clusterId", new TextNode(_object.clusterId)); } _node.set("replicaId", new IntNode(_object.replicaId)); _node.set("maxBytes", new IntNode(_object.maxBytes)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (TopicSnapshot _element : _object.topics) { _topicsArray.add(TopicSnapshotJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(FetchSnapshotRequestData _object, short _version) { return write(_object, _version, true); } public static class PartitionSnapshotJsonConverter { public static PartitionSnapshot read(JsonNode _node, short _version) { PartitionSnapshot _object = new PartitionSnapshot(); JsonNode _partitionNode = _node.get("partition"); if (_partitionNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'partition', which is mandatory in version " + _version); } else { _object.partition = MessageUtil.jsonNodeToInt(_partitionNode, "PartitionSnapshot"); } JsonNode _currentLeaderEpochNode = _node.get("currentLeaderEpoch"); if (_currentLeaderEpochNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'currentLeaderEpoch', which is mandatory in version " + _version); } else { _object.currentLeaderEpoch = MessageUtil.jsonNodeToInt(_currentLeaderEpochNode, "PartitionSnapshot"); } JsonNode _snapshotIdNode = _node.get("snapshotId"); if (_snapshotIdNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'snapshotId', which is mandatory in version " + _version); } else { _object.snapshotId = SnapshotIdJsonConverter.read(_snapshotIdNode, _version); } JsonNode _positionNode = _node.get("position"); if (_positionNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'position', which is mandatory in version " + _version); } else { _object.position = MessageUtil.jsonNodeToLong(_positionNode, "PartitionSnapshot"); } return _object; } public static JsonNode write(PartitionSnapshot _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partition", new IntNode(_object.partition)); _node.set("currentLeaderEpoch", new IntNode(_object.currentLeaderEpoch)); _node.set("snapshotId", SnapshotIdJsonConverter.write(_object.snapshotId, _version, _serializeRecords)); _node.set("position", new LongNode(_object.position)); return _node; } public static JsonNode write(PartitionSnapshot _object, short _version) { return write(_object, _version, true); } } public static class SnapshotIdJsonConverter { public static SnapshotId read(JsonNode _node, short _version) { SnapshotId _object = new SnapshotId(); JsonNode _endOffsetNode = _node.get("endOffset"); if (_endOffsetNode == null) { throw new RuntimeException("SnapshotId: unable to locate field 'endOffset', which is mandatory in version " + _version); } else { _object.endOffset = MessageUtil.jsonNodeToLong(_endOffsetNode, "SnapshotId"); } JsonNode _epochNode = _node.get("epoch"); if (_epochNode == null) { throw new RuntimeException("SnapshotId: unable to locate field 'epoch', which is mandatory in version " + _version); } else { _object.epoch = MessageUtil.jsonNodeToInt(_epochNode, "SnapshotId"); } return _object; } public static JsonNode write(SnapshotId _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("endOffset", new LongNode(_object.endOffset)); _node.set("epoch", new IntNode(_object.epoch)); return _node; } public static JsonNode write(SnapshotId _object, short _version) { return write(_object, _version, true); } } public static class TopicSnapshotJsonConverter { public static TopicSnapshot read(JsonNode _node, short _version) { TopicSnapshot _object = new TopicSnapshot(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("TopicSnapshot: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("TopicSnapshot expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("TopicSnapshot: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("TopicSnapshot expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionSnapshot> _collection = new ArrayList<PartitionSnapshot>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(PartitionSnapshotJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(TopicSnapshot _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionSnapshot _element : _object.partitions) { _partitionsArray.add(PartitionSnapshotJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(TopicSnapshot _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FetchSnapshotResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Objects; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.record.BaseRecords; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class FetchSnapshotResponseData implements ApiMessage { int throttleTimeMs; short errorCode; List<TopicSnapshot> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top level response error code."), new Field("topics", new CompactArrayOf(TopicSnapshot.SCHEMA_0), "The topics to fetch."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public FetchSnapshotResponseData(Readable _readable, short _version) { read(_readable, _version); } public FetchSnapshotResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.topics = new ArrayList<TopicSnapshot>(0); } @Override public short apiKey() { return 59; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<TopicSnapshot> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicSnapshot(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); _writable.writeUnsignedVarint(topics.size() + 1); for (TopicSnapshot topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (TopicSnapshot topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof FetchSnapshotResponseData)) return false; FetchSnapshotResponseData other = (FetchSnapshotResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public FetchSnapshotResponseData duplicate() { FetchSnapshotResponseData _duplicate = new FetchSnapshotResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; ArrayList<TopicSnapshot> newTopics = new ArrayList<TopicSnapshot>(topics.size()); for (TopicSnapshot _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "FetchSnapshotResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public List<TopicSnapshot> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FetchSnapshotResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public FetchSnapshotResponseData setErrorCode(short v) { this.errorCode = v; return this; } public FetchSnapshotResponseData setTopics(List<TopicSnapshot> v) { this.topics = v; return this; } public static class TopicSnapshot implements Message { String name; List<PartitionSnapshot> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The name of the topic to fetch."), new Field("partitions", new CompactArrayOf(PartitionSnapshot.SCHEMA_0), "The partitions to fetch."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public TopicSnapshot(Readable _readable, short _version) { read(_readable, _version); } public TopicSnapshot() { this.name = ""; this.partitions = new ArrayList<PartitionSnapshot>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of TopicSnapshot"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionSnapshot> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionSnapshot(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(partitions.size() + 1); for (PartitionSnapshot partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of TopicSnapshot"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); for (PartitionSnapshot partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof TopicSnapshot)) return false; TopicSnapshot other = (TopicSnapshot) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public TopicSnapshot duplicate() { TopicSnapshot _duplicate = new TopicSnapshot(); _duplicate.name = name; ArrayList<PartitionSnapshot> newPartitions = new ArrayList<PartitionSnapshot>(partitions.size()); for (PartitionSnapshot _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "TopicSnapshot(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<PartitionSnapshot> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public TopicSnapshot setName(String v) { this.name = v; return this; } public TopicSnapshot setPartitions(List<PartitionSnapshot> v) { this.partitions = v; return this; } } public static class PartitionSnapshot implements Message { int index; short errorCode; SnapshotId snapshotId; LeaderIdAndEpoch currentLeader; long size; long position; BaseRecords unalignedRecords; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no fetch error."), new Field("snapshot_id", SnapshotId.SCHEMA_0, "The snapshot endOffset and epoch fetched"), new Field("size", Type.INT64, "The total size of the snapshot."), new Field("position", Type.INT64, "The starting byte position within the snapshot included in the Bytes field."), new Field("unaligned_records", Type.COMPACT_RECORDS, "Snapshot data in records format which may not be aligned on an offset boundary"), TaggedFieldsSection.of( 0, new Field("current_leader", LeaderIdAndEpoch.SCHEMA_0, "") ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public PartitionSnapshot(Readable _readable, short _version) { read(_readable, _version); } public PartitionSnapshot() { this.index = 0; this.errorCode = (short) 0; this.snapshotId = new SnapshotId(); this.currentLeader = new LeaderIdAndEpoch(); this.size = 0L; this.position = 0L; this.unalignedRecords = null; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionSnapshot"); } this.index = _readable.readInt(); this.errorCode = _readable.readShort(); { this.snapshotId = new SnapshotId(_readable, _version); } { this.currentLeader = new LeaderIdAndEpoch(); } this.size = _readable.readLong(); this.position = _readable.readLong(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field unalignedRecords was serialized as null"); } else { this.unalignedRecords = _readable.readRecords(length); } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { case 0: { this.currentLeader = new LeaderIdAndEpoch(_readable, _version); break; } default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(index); _writable.writeShort(errorCode); snapshotId.write(_writable, _cache, _version); if (!this.currentLeader.equals(new LeaderIdAndEpoch())) { _numTaggedFields++; } _writable.writeLong(size); _writable.writeLong(position); _writable.writeUnsignedVarint(unalignedRecords.sizeInBytes() + 1); _writable.writeRecords(unalignedRecords); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); { if (!this.currentLeader.equals(new LeaderIdAndEpoch())) { _writable.writeUnsignedVarint(0); _writable.writeUnsignedVarint(this.currentLeader.size(_cache, _version)); currentLeader.write(_writable, _cache, _version); } } _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionSnapshot"); } _size.addBytes(4); _size.addBytes(2); { this.snapshotId.addSize(_size, _cache, _version); } { if (!this.currentLeader.equals(new LeaderIdAndEpoch())) { _numTaggedFields++; _size.addBytes(1); int _sizeBeforeStruct = _size.totalSize(); this.currentLeader.addSize(_size, _cache, _version); int _structSize = _size.totalSize() - _sizeBeforeStruct; _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_structSize)); } } _size.addBytes(8); _size.addBytes(8); { _size.addZeroCopyBytes(unalignedRecords.sizeInBytes()); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(unalignedRecords.sizeInBytes() + 1)); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof PartitionSnapshot)) return false; PartitionSnapshot other = (PartitionSnapshot) obj; if (index != other.index) return false; if (errorCode != other.errorCode) return false; if (this.snapshotId == null) { if (other.snapshotId != null) return false; } else { if (!this.snapshotId.equals(other.snapshotId)) return false; } if (this.currentLeader == null) { if (other.currentLeader != null) return false; } else { if (!this.currentLeader.equals(other.currentLeader)) return false; } if (size != other.size) return false; if (position != other.position) return false; if (!Objects.equals(this.unalignedRecords, other.unalignedRecords)) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + index; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (snapshotId == null ? 0 : snapshotId.hashCode()); hashCode = 31 * hashCode + (currentLeader == null ? 0 : currentLeader.hashCode()); hashCode = 31 * hashCode + ((int) (size >> 32) ^ (int) size); hashCode = 31 * hashCode + ((int) (position >> 32) ^ (int) position); hashCode = 31 * hashCode + Objects.hashCode(unalignedRecords); return hashCode; } @Override public PartitionSnapshot duplicate() { PartitionSnapshot _duplicate = new PartitionSnapshot(); _duplicate.index = index; _duplicate.errorCode = errorCode; _duplicate.snapshotId = snapshotId.duplicate(); _duplicate.currentLeader = currentLeader.duplicate(); _duplicate.size = size; _duplicate.position = position; _duplicate.unalignedRecords = MemoryRecords.readableRecords(((MemoryRecords) unalignedRecords).buffer().duplicate()); return _duplicate; } @Override public String toString() { return "PartitionSnapshot(" + "index=" + index + ", errorCode=" + errorCode + ", snapshotId=" + snapshotId.toString() + ", currentLeader=" + currentLeader.toString() + ", size=" + size + ", position=" + position + ", unalignedRecords=" + unalignedRecords + ")"; } public int index() { return this.index; } public short errorCode() { return this.errorCode; } public SnapshotId snapshotId() { return this.snapshotId; } public LeaderIdAndEpoch currentLeader() { return this.currentLeader; } public long size() { return this.size; } public long position() { return this.position; } public BaseRecords unalignedRecords() { return this.unalignedRecords; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public PartitionSnapshot setIndex(int v) { this.index = v; return this; } public PartitionSnapshot setErrorCode(short v) { this.errorCode = v; return this; } public PartitionSnapshot setSnapshotId(SnapshotId v) { this.snapshotId = v; return this; } public PartitionSnapshot setCurrentLeader(LeaderIdAndEpoch v) { this.currentLeader = v; return this; } public PartitionSnapshot setSize(long v) { this.size = v; return this; } public PartitionSnapshot setPosition(long v) { this.position = v; return this; } public PartitionSnapshot setUnalignedRecords(BaseRecords v) { this.unalignedRecords = v; return this; } } public static class SnapshotId implements Message { long endOffset; int epoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("end_offset", Type.INT64, ""), new Field("epoch", Type.INT32, ""), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public SnapshotId(Readable _readable, short _version) { read(_readable, _version); } public SnapshotId() { this.endOffset = 0L; this.epoch = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of SnapshotId"); } this.endOffset = _readable.readLong(); this.epoch = _readable.readInt(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeLong(endOffset); _writable.writeInt(epoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of SnapshotId"); } _size.addBytes(8); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof SnapshotId)) return false; SnapshotId other = (SnapshotId) obj; if (endOffset != other.endOffset) return false; if (epoch != other.epoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + ((int) (endOffset >> 32) ^ (int) endOffset); hashCode = 31 * hashCode + epoch; return hashCode; } @Override public SnapshotId duplicate() { SnapshotId _duplicate = new SnapshotId(); _duplicate.endOffset = endOffset; _duplicate.epoch = epoch; return _duplicate; } @Override public String toString() { return "SnapshotId(" + "endOffset=" + endOffset + ", epoch=" + epoch + ")"; } public long endOffset() { return this.endOffset; } public int epoch() { return this.epoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public SnapshotId setEndOffset(long v) { this.endOffset = v; return this; } public SnapshotId setEpoch(int v) { this.epoch = v; return this; } } public static class LeaderIdAndEpoch implements Message { int leaderId; int leaderEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("leader_id", Type.INT32, "The ID of the current leader or -1 if the leader is unknown."), new Field("leader_epoch", Type.INT32, "The latest known leader epoch"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public LeaderIdAndEpoch(Readable _readable, short _version) { read(_readable, _version); } public LeaderIdAndEpoch() { this.leaderId = 0; this.leaderEpoch = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of LeaderIdAndEpoch"); } this.leaderId = _readable.readInt(); this.leaderEpoch = _readable.readInt(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(leaderId); _writable.writeInt(leaderEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of LeaderIdAndEpoch"); } _size.addBytes(4); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderIdAndEpoch)) return false; LeaderIdAndEpoch other = (LeaderIdAndEpoch) obj; if (leaderId != other.leaderId) return false; if (leaderEpoch != other.leaderEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + leaderId; hashCode = 31 * hashCode + leaderEpoch; return hashCode; } @Override public LeaderIdAndEpoch duplicate() { LeaderIdAndEpoch _duplicate = new LeaderIdAndEpoch(); _duplicate.leaderId = leaderId; _duplicate.leaderEpoch = leaderEpoch; return _duplicate; } @Override public String toString() { return "LeaderIdAndEpoch(" + "leaderId=" + leaderId + ", leaderEpoch=" + leaderEpoch + ")"; } public int leaderId() { return this.leaderId; } public int leaderEpoch() { return this.leaderEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderIdAndEpoch setLeaderId(int v) { this.leaderId = v; return this; } public LeaderIdAndEpoch setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FetchSnapshotResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.nio.ByteBuffer; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.record.MemoryRecords; import static org.apache.kafka.common.message.FetchSnapshotResponseData.*; public class FetchSnapshotResponseDataJsonConverter { public static FetchSnapshotResponseData read(JsonNode _node, short _version) { FetchSnapshotResponseData _object = new FetchSnapshotResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("FetchSnapshotResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "FetchSnapshotResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("FetchSnapshotResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "FetchSnapshotResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("FetchSnapshotResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("FetchSnapshotResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<TopicSnapshot> _collection = new ArrayList<TopicSnapshot>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(TopicSnapshotJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(FetchSnapshotResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (TopicSnapshot _element : _object.topics) { _topicsArray.add(TopicSnapshotJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(FetchSnapshotResponseData _object, short _version) { return write(_object, _version, true); } public static class LeaderIdAndEpochJsonConverter { public static LeaderIdAndEpoch read(JsonNode _node, short _version) { LeaderIdAndEpoch _object = new LeaderIdAndEpoch(); JsonNode _leaderIdNode = _node.get("leaderId"); if (_leaderIdNode == null) { throw new RuntimeException("LeaderIdAndEpoch: unable to locate field 'leaderId', which is mandatory in version " + _version); } else { _object.leaderId = MessageUtil.jsonNodeToInt(_leaderIdNode, "LeaderIdAndEpoch"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { throw new RuntimeException("LeaderIdAndEpoch: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "LeaderIdAndEpoch"); } return _object; } public static JsonNode write(LeaderIdAndEpoch _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("leaderId", new IntNode(_object.leaderId)); _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); return _node; } public static JsonNode write(LeaderIdAndEpoch _object, short _version) { return write(_object, _version, true); } } public static class PartitionSnapshotJsonConverter { public static PartitionSnapshot read(JsonNode _node, short _version) { PartitionSnapshot _object = new PartitionSnapshot(); JsonNode _indexNode = _node.get("index"); if (_indexNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'index', which is mandatory in version " + _version); } else { _object.index = MessageUtil.jsonNodeToInt(_indexNode, "PartitionSnapshot"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "PartitionSnapshot"); } JsonNode _snapshotIdNode = _node.get("snapshotId"); if (_snapshotIdNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'snapshotId', which is mandatory in version " + _version); } else { _object.snapshotId = SnapshotIdJsonConverter.read(_snapshotIdNode, _version); } JsonNode _currentLeaderNode = _node.get("currentLeader"); if (_currentLeaderNode == null) { _object.currentLeader = new LeaderIdAndEpoch(); } else { _object.currentLeader = LeaderIdAndEpochJsonConverter.read(_currentLeaderNode, _version); } JsonNode _sizeNode = _node.get("size"); if (_sizeNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'size', which is mandatory in version " + _version); } else { _object.size = MessageUtil.jsonNodeToLong(_sizeNode, "PartitionSnapshot"); } JsonNode _positionNode = _node.get("position"); if (_positionNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'position', which is mandatory in version " + _version); } else { _object.position = MessageUtil.jsonNodeToLong(_positionNode, "PartitionSnapshot"); } JsonNode _unalignedRecordsNode = _node.get("unalignedRecords"); if (_unalignedRecordsNode == null) { throw new RuntimeException("PartitionSnapshot: unable to locate field 'unalignedRecords', which is mandatory in version " + _version); } else { _object.unalignedRecords = MemoryRecords.readableRecords(ByteBuffer.wrap(MessageUtil.jsonNodeToBinary(_unalignedRecordsNode, "PartitionSnapshot"))); } return _object; } public static JsonNode write(PartitionSnapshot _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("index", new IntNode(_object.index)); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("snapshotId", SnapshotIdJsonConverter.write(_object.snapshotId, _version, _serializeRecords)); if (!_object.currentLeader.equals(new LeaderIdAndEpoch())) { _node.set("currentLeader", LeaderIdAndEpochJsonConverter.write(_object.currentLeader, _version, _serializeRecords)); } _node.set("size", new LongNode(_object.size)); _node.set("position", new LongNode(_object.position)); if (_serializeRecords) { _node.set("unalignedRecords", new BinaryNode(new byte[]{})); } else { _node.set("unalignedRecordsSizeInBytes", new IntNode(_object.unalignedRecords.sizeInBytes())); } return _node; } public static JsonNode write(PartitionSnapshot _object, short _version) { return write(_object, _version, true); } } public static class SnapshotIdJsonConverter { public static SnapshotId read(JsonNode _node, short _version) { SnapshotId _object = new SnapshotId(); JsonNode _endOffsetNode = _node.get("endOffset"); if (_endOffsetNode == null) { throw new RuntimeException("SnapshotId: unable to locate field 'endOffset', which is mandatory in version " + _version); } else { _object.endOffset = MessageUtil.jsonNodeToLong(_endOffsetNode, "SnapshotId"); } JsonNode _epochNode = _node.get("epoch"); if (_epochNode == null) { throw new RuntimeException("SnapshotId: unable to locate field 'epoch', which is mandatory in version " + _version); } else { _object.epoch = MessageUtil.jsonNodeToInt(_epochNode, "SnapshotId"); } return _object; } public static JsonNode write(SnapshotId _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("endOffset", new LongNode(_object.endOffset)); _node.set("epoch", new IntNode(_object.epoch)); return _node; } public static JsonNode write(SnapshotId _object, short _version) { return write(_object, _version, true); } } public static class TopicSnapshotJsonConverter { public static TopicSnapshot read(JsonNode _node, short _version) { TopicSnapshot _object = new TopicSnapshot(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("TopicSnapshot: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("TopicSnapshot expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("TopicSnapshot: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("TopicSnapshot expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionSnapshot> _collection = new ArrayList<PartitionSnapshot>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(PartitionSnapshotJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(TopicSnapshot _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionSnapshot _element : _object.partitions) { _partitionsArray.add(PartitionSnapshotJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(TopicSnapshot _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FindCoordinatorRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class FindCoordinatorRequestData implements ApiMessage { String key; byte keyType; List<String> coordinatorKeys; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("key", Type.STRING, "The coordinator key.") ); public static final Schema SCHEMA_1 = new Schema( new Field("key", Type.STRING, "The coordinator key."), new Field("key_type", Type.INT8, "The coordinator key type. (Group, transaction, etc.)") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("key", Type.COMPACT_STRING, "The coordinator key."), new Field("key_type", Type.INT8, "The coordinator key type. (Group, transaction, etc.)"), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = new Schema( new Field("key_type", Type.INT8, "The coordinator key type. (Group, transaction, etc.)"), new Field("coordinator_keys", new CompactArrayOf(Type.COMPACT_STRING), "The coordinator keys."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public FindCoordinatorRequestData(Readable _readable, short _version) { read(_readable, _version); } public FindCoordinatorRequestData() { this.key = ""; this.keyType = (byte) 0; this.coordinatorKeys = new ArrayList<String>(0); } @Override public short apiKey() { return 10; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version <= 3) { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field key was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field key had invalid length " + length); } else { this.key = _readable.readString(length); } } else { this.key = ""; } if (_version >= 1) { this.keyType = _readable.readByte(); } else { this.keyType = (byte) 0; } if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field coordinatorKeys was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<String> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field coordinatorKeys element was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field coordinatorKeys element had invalid length " + length); } else { newCollection.add(_readable.readString(length)); } } this.coordinatorKeys = newCollection; } } else { this.coordinatorKeys = new ArrayList<String>(0); } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 3) { { byte[] _stringBytes = _cache.getSerializedValue(key); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } else { if (!this.key.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default key at version " + _version); } } if (_version >= 1) { _writable.writeByte(keyType); } else { if (this.keyType != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default keyType at version " + _version); } } if (_version >= 4) { _writable.writeUnsignedVarint(coordinatorKeys.size() + 1); for (String coordinatorKeysElement : coordinatorKeys) { { byte[] _stringBytes = _cache.getSerializedValue(coordinatorKeysElement); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } } else { if (!this.coordinatorKeys.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default coordinatorKeys at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 3) { { byte[] _stringBytes = key.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'key' field is too long to be serialized"); } _cache.cacheSerializedValue(key, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version >= 1) { _size.addBytes(1); } if (_version >= 4) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(coordinatorKeys.size() + 1)); for (String coordinatorKeysElement : coordinatorKeys) { byte[] _stringBytes = coordinatorKeysElement.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'coordinatorKeysElement' field is too long to be serialized"); } _cache.cacheSerializedValue(coordinatorKeysElement, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof FindCoordinatorRequestData)) return false; FindCoordinatorRequestData other = (FindCoordinatorRequestData) obj; if (this.key == null) { if (other.key != null) return false; } else { if (!this.key.equals(other.key)) return false; } if (keyType != other.keyType) return false; if (this.coordinatorKeys == null) { if (other.coordinatorKeys != null) return false; } else { if (!this.coordinatorKeys.equals(other.coordinatorKeys)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (key == null ? 0 : key.hashCode()); hashCode = 31 * hashCode + keyType; hashCode = 31 * hashCode + (coordinatorKeys == null ? 0 : coordinatorKeys.hashCode()); return hashCode; } @Override public FindCoordinatorRequestData duplicate() { FindCoordinatorRequestData _duplicate = new FindCoordinatorRequestData(); _duplicate.key = key; _duplicate.keyType = keyType; ArrayList<String> newCoordinatorKeys = new ArrayList<String>(coordinatorKeys.size()); for (String _element : coordinatorKeys) { newCoordinatorKeys.add(_element); } _duplicate.coordinatorKeys = newCoordinatorKeys; return _duplicate; } @Override public String toString() { return "FindCoordinatorRequestData(" + "key=" + ((key == null) ? "null" : "'" + key.toString() + "'") + ", keyType=" + keyType + ", coordinatorKeys=" + MessageUtil.deepToString(coordinatorKeys.iterator()) + ")"; } public String key() { return this.key; } public byte keyType() { return this.keyType; } public List<String> coordinatorKeys() { return this.coordinatorKeys; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FindCoordinatorRequestData setKey(String v) { this.key = v; return this; } public FindCoordinatorRequestData setKeyType(byte v) { this.keyType = v; return this; } public FindCoordinatorRequestData setCoordinatorKeys(List<String> v) { this.coordinatorKeys = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FindCoordinatorRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.FindCoordinatorRequestData.*; public class FindCoordinatorRequestDataJsonConverter { public static FindCoordinatorRequestData read(JsonNode _node, short _version) { FindCoordinatorRequestData _object = new FindCoordinatorRequestData(); JsonNode _keyNode = _node.get("key"); if (_keyNode == null) { if (_version <= 3) { throw new RuntimeException("FindCoordinatorRequestData: unable to locate field 'key', which is mandatory in version " + _version); } else { _object.key = ""; } } else { if (!_keyNode.isTextual()) { throw new RuntimeException("FindCoordinatorRequestData expected a string type, but got " + _node.getNodeType()); } _object.key = _keyNode.asText(); } JsonNode _keyTypeNode = _node.get("keyType"); if (_keyTypeNode == null) { if (_version >= 1) { throw new RuntimeException("FindCoordinatorRequestData: unable to locate field 'keyType', which is mandatory in version " + _version); } else { _object.keyType = (byte) 0; } } else { _object.keyType = MessageUtil.jsonNodeToByte(_keyTypeNode, "FindCoordinatorRequestData"); } JsonNode _coordinatorKeysNode = _node.get("coordinatorKeys"); if (_coordinatorKeysNode == null) { if (_version >= 4) { throw new RuntimeException("FindCoordinatorRequestData: unable to locate field 'coordinatorKeys', which is mandatory in version " + _version); } else { _object.coordinatorKeys = new ArrayList<String>(0); } } else { if (!_coordinatorKeysNode.isArray()) { throw new RuntimeException("FindCoordinatorRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<String> _collection = new ArrayList<String>(_coordinatorKeysNode.size()); _object.coordinatorKeys = _collection; for (JsonNode _element : _coordinatorKeysNode) { if (!_element.isTextual()) { throw new RuntimeException("FindCoordinatorRequestData element expected a string type, but got " + _node.getNodeType()); } _collection.add(_element.asText()); } } return _object; } public static JsonNode write(FindCoordinatorRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version <= 3) { _node.set("key", new TextNode(_object.key)); } else { if (!_object.key.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default key at version " + _version); } } if (_version >= 1) { _node.set("keyType", new ShortNode(_object.keyType)); } else { if (_object.keyType != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default keyType at version " + _version); } } if (_version >= 4) { ArrayNode _coordinatorKeysArray = new ArrayNode(JsonNodeFactory.instance); for (String _element : _object.coordinatorKeys) { _coordinatorKeysArray.add(new TextNode(_element)); } _node.set("coordinatorKeys", _coordinatorKeysArray); } else { if (!_object.coordinatorKeys.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default coordinatorKeys at version " + _version); } } return _node; } public static JsonNode write(FindCoordinatorRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FindCoordinatorResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class FindCoordinatorResponseData implements ApiMessage { int throttleTimeMs; short errorCode; String errorMessage; int nodeId; String host; int port; List<Coordinator> coordinators; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("node_id", Type.INT32, "The node id."), new Field("host", Type.STRING, "The host name."), new Field("port", Type.INT32, "The port.") ); public static final Schema SCHEMA_1 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("error_message", Type.NULLABLE_STRING, "The error message, or null if there was no error."), new Field("node_id", Type.INT32, "The node id."), new Field("host", Type.STRING, "The host name."), new Field("port", Type.INT32, "The port.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The error message, or null if there was no error."), new Field("node_id", Type.INT32, "The node id."), new Field("host", Type.COMPACT_STRING, "The host name."), new Field("port", Type.INT32, "The port."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("coordinators", new CompactArrayOf(Coordinator.SCHEMA_4), "Each coordinator result in the response"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public FindCoordinatorResponseData(Readable _readable, short _version) { read(_readable, _version); } public FindCoordinatorResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.errorMessage = ""; this.nodeId = 0; this.host = ""; this.port = 0; this.coordinators = new ArrayList<Coordinator>(0); } @Override public short apiKey() { return 10; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version >= 1) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } if (_version <= 3) { this.errorCode = _readable.readShort(); } else { this.errorCode = (short) 0; } if ((_version >= 1) && (_version <= 3)) { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } else { this.errorMessage = ""; } if (_version <= 3) { this.nodeId = _readable.readInt(); } else { this.nodeId = 0; } if (_version <= 3) { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field host was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field host had invalid length " + length); } else { this.host = _readable.readString(length); } } else { this.host = ""; } if (_version <= 3) { this.port = _readable.readInt(); } else { this.port = 0; } if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field coordinators was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Coordinator> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new Coordinator(_readable, _version)); } this.coordinators = newCollection; } } else { this.coordinators = new ArrayList<Coordinator>(0); } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _writable.writeInt(throttleTimeMs); } if (_version <= 3) { _writable.writeShort(errorCode); } else { if (this.errorCode != (short) 0) { throw new UnsupportedVersionException("Attempted to write a non-default errorCode at version " + _version); } } if ((_version >= 1) && (_version <= 3)) { if (errorMessage == null) { if (_version >= 3) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } if (_version <= 3) { _writable.writeInt(nodeId); } else { if (this.nodeId != 0) { throw new UnsupportedVersionException("Attempted to write a non-default nodeId at version " + _version); } } if (_version <= 3) { { byte[] _stringBytes = _cache.getSerializedValue(host); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } else { if (!this.host.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default host at version " + _version); } } if (_version <= 3) { _writable.writeInt(port); } else { if (this.port != 0) { throw new UnsupportedVersionException("Attempted to write a non-default port at version " + _version); } } if (_version >= 4) { _writable.writeUnsignedVarint(coordinators.size() + 1); for (Coordinator coordinatorsElement : coordinators) { coordinatorsElement.write(_writable, _cache, _version); } } else { if (!this.coordinators.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default coordinators at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _size.addBytes(4); } if (_version <= 3) { _size.addBytes(2); } if ((_version >= 1) && (_version <= 3)) { if (errorMessage == null) { if (_version >= 3) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version <= 3) { _size.addBytes(4); } if (_version <= 3) { { byte[] _stringBytes = host.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'host' field is too long to be serialized"); } _cache.cacheSerializedValue(host, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version <= 3) { _size.addBytes(4); } if (_version >= 4) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(coordinators.size() + 1)); for (Coordinator coordinatorsElement : coordinators) { coordinatorsElement.addSize(_size, _cache, _version); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof FindCoordinatorResponseData)) return false; FindCoordinatorResponseData other = (FindCoordinatorResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } if (nodeId != other.nodeId) return false; if (this.host == null) { if (other.host != null) return false; } else { if (!this.host.equals(other.host)) return false; } if (port != other.port) return false; if (this.coordinators == null) { if (other.coordinators != null) return false; } else { if (!this.coordinators.equals(other.coordinators)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); hashCode = 31 * hashCode + nodeId; hashCode = 31 * hashCode + (host == null ? 0 : host.hashCode()); hashCode = 31 * hashCode + port; hashCode = 31 * hashCode + (coordinators == null ? 0 : coordinators.hashCode()); return hashCode; } @Override public FindCoordinatorResponseData duplicate() { FindCoordinatorResponseData _duplicate = new FindCoordinatorResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } _duplicate.nodeId = nodeId; _duplicate.host = host; _duplicate.port = port; ArrayList<Coordinator> newCoordinators = new ArrayList<Coordinator>(coordinators.size()); for (Coordinator _element : coordinators) { newCoordinators.add(_element.duplicate()); } _duplicate.coordinators = newCoordinators; return _duplicate; } @Override public String toString() { return "FindCoordinatorResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ", nodeId=" + nodeId + ", host=" + ((host == null) ? "null" : "'" + host.toString() + "'") + ", port=" + port + ", coordinators=" + MessageUtil.deepToString(coordinators.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public String errorMessage() { return this.errorMessage; } public int nodeId() { return this.nodeId; } public String host() { return this.host; } public int port() { return this.port; } public List<Coordinator> coordinators() { return this.coordinators; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FindCoordinatorResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public FindCoordinatorResponseData setErrorCode(short v) { this.errorCode = v; return this; } public FindCoordinatorResponseData setErrorMessage(String v) { this.errorMessage = v; return this; } public FindCoordinatorResponseData setNodeId(int v) { this.nodeId = v; return this; } public FindCoordinatorResponseData setHost(String v) { this.host = v; return this; } public FindCoordinatorResponseData setPort(int v) { this.port = v; return this; } public FindCoordinatorResponseData setCoordinators(List<Coordinator> v) { this.coordinators = v; return this; } public static class Coordinator implements Message { String key; int nodeId; String host; int port; short errorCode; String errorMessage; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_4 = new Schema( new Field("key", Type.COMPACT_STRING, "The coordinator key."), new Field("node_id", Type.INT32, "The node id."), new Field("host", Type.COMPACT_STRING, "The host name."), new Field("port", Type.INT32, "The port."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The error message, or null if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 4; public static final short HIGHEST_SUPPORTED_VERSION = 4; public Coordinator(Readable _readable, short _version) { read(_readable, _version); } public Coordinator() { this.key = ""; this.nodeId = 0; this.host = ""; this.port = 0; this.errorCode = (short) 0; this.errorMessage = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version > 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of Coordinator"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field key was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field key had invalid length " + length); } else { this.key = _readable.readString(length); } } this.nodeId = _readable.readInt(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field host was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field host had invalid length " + length); } else { this.host = _readable.readString(length); } } this.port = _readable.readInt(); this.errorCode = _readable.readShort(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 4) { throw new UnsupportedVersionException("Can't write version " + _version + " of Coordinator"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(key); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeInt(nodeId); { byte[] _stringBytes = _cache.getSerializedValue(host); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeInt(port); _writable.writeShort(errorCode); if (errorMessage == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 4) { throw new UnsupportedVersionException("Can't size version " + _version + " of Coordinator"); } { byte[] _stringBytes = key.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'key' field is too long to be serialized"); } _cache.cacheSerializedValue(key, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(4); { byte[] _stringBytes = host.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'host' field is too long to be serialized"); } _cache.cacheSerializedValue(host, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(4); _size.addBytes(2); if (errorMessage == null) { _size.addBytes(1); } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof Coordinator)) return false; Coordinator other = (Coordinator) obj; if (this.key == null) { if (other.key != null) return false; } else { if (!this.key.equals(other.key)) return false; } if (nodeId != other.nodeId) return false; if (this.host == null) { if (other.host != null) return false; } else { if (!this.host.equals(other.host)) return false; } if (port != other.port) return false; if (errorCode != other.errorCode) return false; if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (key == null ? 0 : key.hashCode()); hashCode = 31 * hashCode + nodeId; hashCode = 31 * hashCode + (host == null ? 0 : host.hashCode()); hashCode = 31 * hashCode + port; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); return hashCode; } @Override public Coordinator duplicate() { Coordinator _duplicate = new Coordinator(); _duplicate.key = key; _duplicate.nodeId = nodeId; _duplicate.host = host; _duplicate.port = port; _duplicate.errorCode = errorCode; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } return _duplicate; } @Override public String toString() { return "Coordinator(" + "key=" + ((key == null) ? "null" : "'" + key.toString() + "'") + ", nodeId=" + nodeId + ", host=" + ((host == null) ? "null" : "'" + host.toString() + "'") + ", port=" + port + ", errorCode=" + errorCode + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ")"; } public String key() { return this.key; } public int nodeId() { return this.nodeId; } public String host() { return this.host; } public int port() { return this.port; } public short errorCode() { return this.errorCode; } public String errorMessage() { return this.errorMessage; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public Coordinator setKey(String v) { this.key = v; return this; } public Coordinator setNodeId(int v) { this.nodeId = v; return this; } public Coordinator setHost(String v) { this.host = v; return this; } public Coordinator setPort(int v) { this.port = v; return this; } public Coordinator setErrorCode(short v) { this.errorCode = v; return this; } public Coordinator setErrorMessage(String v) { this.errorMessage = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/FindCoordinatorResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.FindCoordinatorResponseData.*; public class FindCoordinatorResponseDataJsonConverter { public static FindCoordinatorResponseData read(JsonNode _node, short _version) { FindCoordinatorResponseData _object = new FindCoordinatorResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 1) { throw new RuntimeException("FindCoordinatorResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "FindCoordinatorResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { if (_version <= 3) { throw new RuntimeException("FindCoordinatorResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = (short) 0; } } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "FindCoordinatorResponseData"); } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { if ((_version >= 1) && (_version <= 3)) { throw new RuntimeException("FindCoordinatorResponseData: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { _object.errorMessage = ""; } } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("FindCoordinatorResponseData expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } JsonNode _nodeIdNode = _node.get("nodeId"); if (_nodeIdNode == null) { if (_version <= 3) { throw new RuntimeException("FindCoordinatorResponseData: unable to locate field 'nodeId', which is mandatory in version " + _version); } else { _object.nodeId = 0; } } else { _object.nodeId = MessageUtil.jsonNodeToInt(_nodeIdNode, "FindCoordinatorResponseData"); } JsonNode _hostNode = _node.get("host"); if (_hostNode == null) { if (_version <= 3) { throw new RuntimeException("FindCoordinatorResponseData: unable to locate field 'host', which is mandatory in version " + _version); } else { _object.host = ""; } } else { if (!_hostNode.isTextual()) { throw new RuntimeException("FindCoordinatorResponseData expected a string type, but got " + _node.getNodeType()); } _object.host = _hostNode.asText(); } JsonNode _portNode = _node.get("port"); if (_portNode == null) { if (_version <= 3) { throw new RuntimeException("FindCoordinatorResponseData: unable to locate field 'port', which is mandatory in version " + _version); } else { _object.port = 0; } } else { _object.port = MessageUtil.jsonNodeToInt(_portNode, "FindCoordinatorResponseData"); } JsonNode _coordinatorsNode = _node.get("coordinators"); if (_coordinatorsNode == null) { if (_version >= 4) { throw new RuntimeException("FindCoordinatorResponseData: unable to locate field 'coordinators', which is mandatory in version " + _version); } else { _object.coordinators = new ArrayList<Coordinator>(0); } } else { if (!_coordinatorsNode.isArray()) { throw new RuntimeException("FindCoordinatorResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Coordinator> _collection = new ArrayList<Coordinator>(_coordinatorsNode.size()); _object.coordinators = _collection; for (JsonNode _element : _coordinatorsNode) { _collection.add(CoordinatorJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(FindCoordinatorResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 1) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } if (_version <= 3) { _node.set("errorCode", new ShortNode(_object.errorCode)); } else { if (_object.errorCode != (short) 0) { throw new UnsupportedVersionException("Attempted to write a non-default errorCode at version " + _version); } } if ((_version >= 1) && (_version <= 3)) { if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } } if (_version <= 3) { _node.set("nodeId", new IntNode(_object.nodeId)); } else { if (_object.nodeId != 0) { throw new UnsupportedVersionException("Attempted to write a non-default nodeId at version " + _version); } } if (_version <= 3) { _node.set("host", new TextNode(_object.host)); } else { if (!_object.host.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default host at version " + _version); } } if (_version <= 3) { _node.set("port", new IntNode(_object.port)); } else { if (_object.port != 0) { throw new UnsupportedVersionException("Attempted to write a non-default port at version " + _version); } } if (_version >= 4) { ArrayNode _coordinatorsArray = new ArrayNode(JsonNodeFactory.instance); for (Coordinator _element : _object.coordinators) { _coordinatorsArray.add(CoordinatorJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("coordinators", _coordinatorsArray); } else { if (!_object.coordinators.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default coordinators at version " + _version); } } return _node; } public static JsonNode write(FindCoordinatorResponseData _object, short _version) { return write(_object, _version, true); } public static class CoordinatorJsonConverter { public static Coordinator read(JsonNode _node, short _version) { Coordinator _object = new Coordinator(); if (_version < 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of Coordinator"); } JsonNode _keyNode = _node.get("key"); if (_keyNode == null) { throw new RuntimeException("Coordinator: unable to locate field 'key', which is mandatory in version " + _version); } else { if (!_keyNode.isTextual()) { throw new RuntimeException("Coordinator expected a string type, but got " + _node.getNodeType()); } _object.key = _keyNode.asText(); } JsonNode _nodeIdNode = _node.get("nodeId"); if (_nodeIdNode == null) { throw new RuntimeException("Coordinator: unable to locate field 'nodeId', which is mandatory in version " + _version); } else { _object.nodeId = MessageUtil.jsonNodeToInt(_nodeIdNode, "Coordinator"); } JsonNode _hostNode = _node.get("host"); if (_hostNode == null) { throw new RuntimeException("Coordinator: unable to locate field 'host', which is mandatory in version " + _version); } else { if (!_hostNode.isTextual()) { throw new RuntimeException("Coordinator expected a string type, but got " + _node.getNodeType()); } _object.host = _hostNode.asText(); } JsonNode _portNode = _node.get("port"); if (_portNode == null) { throw new RuntimeException("Coordinator: unable to locate field 'port', which is mandatory in version " + _version); } else { _object.port = MessageUtil.jsonNodeToInt(_portNode, "Coordinator"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("Coordinator: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "Coordinator"); } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { throw new RuntimeException("Coordinator: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("Coordinator expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } return _object; } public static JsonNode write(Coordinator _object, short _version, boolean _serializeRecords) { if (_version < 4) { throw new UnsupportedVersionException("Can't write version " + _version + " of Coordinator"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("key", new TextNode(_object.key)); _node.set("nodeId", new IntNode(_object.nodeId)); _node.set("host", new TextNode(_object.host)); _node.set("port", new IntNode(_object.port)); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } return _node; } public static JsonNode write(Coordinator _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/HeartbeatRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class HeartbeatRequestData implements ApiMessage { String groupId; int generationId; String memberId; String groupInstanceId; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("group_id", Type.STRING, "The group id."), new Field("generation_id", Type.INT32, "The generation of the group."), new Field("member_id", Type.STRING, "The member ID.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("group_id", Type.STRING, "The group id."), new Field("generation_id", Type.INT32, "The generation of the group."), new Field("member_id", Type.STRING, "The member ID."), new Field("group_instance_id", Type.NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user.") ); public static final Schema SCHEMA_4 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The group id."), new Field("generation_id", Type.INT32, "The generation of the group."), new Field("member_id", Type.COMPACT_STRING, "The member ID."), new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public HeartbeatRequestData(Readable _readable, short _version) { read(_readable, _version); } public HeartbeatRequestData() { this.groupId = ""; this.generationId = 0; this.memberId = ""; this.groupInstanceId = null; } @Override public short apiKey() { return 12; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } this.generationId = _readable.readInt(); { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field memberId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field memberId had invalid length " + length); } else { this.memberId = _readable.readString(length); } } if (_version >= 3) { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.groupInstanceId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field groupInstanceId had invalid length " + length); } else { this.groupInstanceId = _readable.readString(length); } } else { this.groupInstanceId = null; } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(groupId); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeInt(generationId); { byte[] _stringBytes = _cache.getSerializedValue(memberId); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 3) { if (groupInstanceId == null) { if (_version >= 4) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(groupInstanceId); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } else { if (this.groupInstanceId != null) { throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(4); { byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'memberId' field is too long to be serialized"); } _cache.cacheSerializedValue(memberId, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version >= 3) { if (groupInstanceId == null) { if (_version >= 4) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = groupInstanceId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupInstanceId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupInstanceId, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof HeartbeatRequestData)) return false; HeartbeatRequestData other = (HeartbeatRequestData) obj; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } if (generationId != other.generationId) return false; if (this.memberId == null) { if (other.memberId != null) return false; } else { if (!this.memberId.equals(other.memberId)) return false; } if (this.groupInstanceId == null) { if (other.groupInstanceId != null) return false; } else { if (!this.groupInstanceId.equals(other.groupInstanceId)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); hashCode = 31 * hashCode + generationId; hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode()); hashCode = 31 * hashCode + (groupInstanceId == null ? 0 : groupInstanceId.hashCode()); return hashCode; } @Override public HeartbeatRequestData duplicate() { HeartbeatRequestData _duplicate = new HeartbeatRequestData(); _duplicate.groupId = groupId; _duplicate.generationId = generationId; _duplicate.memberId = memberId; if (groupInstanceId == null) { _duplicate.groupInstanceId = null; } else { _duplicate.groupInstanceId = groupInstanceId; } return _duplicate; } @Override public String toString() { return "HeartbeatRequestData(" + "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ", generationId=" + generationId + ", memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'") + ", groupInstanceId=" + ((groupInstanceId == null) ? "null" : "'" + groupInstanceId.toString() + "'") + ")"; } public String groupId() { return this.groupId; } public int generationId() { return this.generationId; } public String memberId() { return this.memberId; } public String groupInstanceId() { return this.groupInstanceId; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public HeartbeatRequestData setGroupId(String v) { this.groupId = v; return this; } public HeartbeatRequestData setGenerationId(int v) { this.generationId = v; return this; } public HeartbeatRequestData setMemberId(String v) { this.memberId = v; return this; } public HeartbeatRequestData setGroupInstanceId(String v) { this.groupInstanceId = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/HeartbeatRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.HeartbeatRequestData.*; public class HeartbeatRequestDataJsonConverter { public static HeartbeatRequestData read(JsonNode _node, short _version) { HeartbeatRequestData _object = new HeartbeatRequestData(); JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { throw new RuntimeException("HeartbeatRequestData: unable to locate field 'groupId', which is mandatory in version " + _version); } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("HeartbeatRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } JsonNode _generationIdNode = _node.get("generationId"); if (_generationIdNode == null) { throw new RuntimeException("HeartbeatRequestData: unable to locate field 'generationId', which is mandatory in version " + _version); } else { _object.generationId = MessageUtil.jsonNodeToInt(_generationIdNode, "HeartbeatRequestData"); } JsonNode _memberIdNode = _node.get("memberId"); if (_memberIdNode == null) { throw new RuntimeException("HeartbeatRequestData: unable to locate field 'memberId', which is mandatory in version " + _version); } else { if (!_memberIdNode.isTextual()) { throw new RuntimeException("HeartbeatRequestData expected a string type, but got " + _node.getNodeType()); } _object.memberId = _memberIdNode.asText(); } JsonNode _groupInstanceIdNode = _node.get("groupInstanceId"); if (_groupInstanceIdNode == null) { if (_version >= 3) { throw new RuntimeException("HeartbeatRequestData: unable to locate field 'groupInstanceId', which is mandatory in version " + _version); } else { _object.groupInstanceId = null; } } else { if (_groupInstanceIdNode.isNull()) { _object.groupInstanceId = null; } else { if (!_groupInstanceIdNode.isTextual()) { throw new RuntimeException("HeartbeatRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupInstanceId = _groupInstanceIdNode.asText(); } } return _object; } public static JsonNode write(HeartbeatRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("groupId", new TextNode(_object.groupId)); _node.set("generationId", new IntNode(_object.generationId)); _node.set("memberId", new TextNode(_object.memberId)); if (_version >= 3) { if (_object.groupInstanceId == null) { _node.set("groupInstanceId", NullNode.instance); } else { _node.set("groupInstanceId", new TextNode(_object.groupInstanceId)); } } else { if (_object.groupInstanceId != null) { throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version); } } return _node; } public static JsonNode write(HeartbeatRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/HeartbeatResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class HeartbeatResponseData implements ApiMessage { int throttleTimeMs; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_1 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public HeartbeatResponseData(Readable _readable, short _version) { read(_readable, _version); } public HeartbeatResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; } @Override public short apiKey() { return 12; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version >= 1) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _writable.writeInt(throttleTimeMs); } _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _size.addBytes(4); } _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof HeartbeatResponseData)) return false; HeartbeatResponseData other = (HeartbeatResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public HeartbeatResponseData duplicate() { HeartbeatResponseData _duplicate = new HeartbeatResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "HeartbeatResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public HeartbeatResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public HeartbeatResponseData setErrorCode(short v) { this.errorCode = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/HeartbeatResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.HeartbeatResponseData.*; public class HeartbeatResponseDataJsonConverter { public static HeartbeatResponseData read(JsonNode _node, short _version) { HeartbeatResponseData _object = new HeartbeatResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 1) { throw new RuntimeException("HeartbeatResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "HeartbeatResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("HeartbeatResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "HeartbeatResponseData"); } return _object; } public static JsonNode write(HeartbeatResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 1) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(HeartbeatResponseData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/IncrementalAlterConfigsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class IncrementalAlterConfigsRequestData implements ApiMessage { AlterConfigsResourceCollection resources; boolean validateOnly; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("resources", new ArrayOf(AlterConfigsResource.SCHEMA_0), "The incremental updates for each resource."), new Field("validate_only", Type.BOOLEAN, "True if we should validate the request, but not change the configurations.") ); public static final Schema SCHEMA_1 = new Schema( new Field("resources", new CompactArrayOf(AlterConfigsResource.SCHEMA_1), "The incremental updates for each resource."), new Field("validate_only", Type.BOOLEAN, "True if we should validate the request, but not change the configurations."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public IncrementalAlterConfigsRequestData(Readable _readable, short _version) { read(_readable, _version); } public IncrementalAlterConfigsRequestData() { this.resources = new AlterConfigsResourceCollection(0); this.validateOnly = false; } @Override public short apiKey() { return 44; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { { if (_version >= 1) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field resources was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterConfigsResourceCollection newCollection = new AlterConfigsResourceCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterConfigsResource(_readable, _version)); } this.resources = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field resources was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterConfigsResourceCollection newCollection = new AlterConfigsResourceCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterConfigsResource(_readable, _version)); } this.resources = newCollection; } } } this.validateOnly = _readable.readByte() != 0; this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _writable.writeUnsignedVarint(resources.size() + 1); for (AlterConfigsResource resourcesElement : resources) { resourcesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(resources.size()); for (AlterConfigsResource resourcesElement : resources) { resourcesElement.write(_writable, _cache, _version); } } _writable.writeByte(validateOnly ? (byte) 1 : (byte) 0); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(resources.size() + 1)); } else { _size.addBytes(4); } for (AlterConfigsResource resourcesElement : resources) { resourcesElement.addSize(_size, _cache, _version); } } _size.addBytes(1); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof IncrementalAlterConfigsRequestData)) return false; IncrementalAlterConfigsRequestData other = (IncrementalAlterConfigsRequestData) obj; if (this.resources == null) { if (other.resources != null) return false; } else { if (!this.resources.equals(other.resources)) return false; } if (validateOnly != other.validateOnly) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (resources == null ? 0 : resources.hashCode()); hashCode = 31 * hashCode + (validateOnly ? 1231 : 1237); return hashCode; } @Override public IncrementalAlterConfigsRequestData duplicate() { IncrementalAlterConfigsRequestData _duplicate = new IncrementalAlterConfigsRequestData(); AlterConfigsResourceCollection newResources = new AlterConfigsResourceCollection(resources.size()); for (AlterConfigsResource _element : resources) { newResources.add(_element.duplicate()); } _duplicate.resources = newResources; _duplicate.validateOnly = validateOnly; return _duplicate; } @Override public String toString() { return "IncrementalAlterConfigsRequestData(" + "resources=" + MessageUtil.deepToString(resources.iterator()) + ", validateOnly=" + (validateOnly ? "true" : "false") + ")"; } public AlterConfigsResourceCollection resources() { return this.resources; } public boolean validateOnly() { return this.validateOnly; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public IncrementalAlterConfigsRequestData setResources(AlterConfigsResourceCollection v) { this.resources = v; return this; } public IncrementalAlterConfigsRequestData setValidateOnly(boolean v) { this.validateOnly = v; return this; } public static class AlterConfigsResource implements Message, ImplicitLinkedHashMultiCollection.Element { byte resourceType; String resourceName; AlterableConfigCollection configs; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("resource_type", Type.INT8, "The resource type."), new Field("resource_name", Type.STRING, "The resource name."), new Field("configs", new ArrayOf(AlterableConfig.SCHEMA_0), "The configurations.") ); public static final Schema SCHEMA_1 = new Schema( new Field("resource_type", Type.INT8, "The resource type."), new Field("resource_name", Type.COMPACT_STRING, "The resource name."), new Field("configs", new CompactArrayOf(AlterableConfig.SCHEMA_1), "The configurations."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public AlterConfigsResource(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AlterConfigsResource() { this.resourceType = (byte) 0; this.resourceName = ""; this.configs = new AlterableConfigCollection(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResource"); } this.resourceType = _readable.readByte(); { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field resourceName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field resourceName had invalid length " + length); } else { this.resourceName = _readable.readString(length); } } { if (_version >= 1) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field configs was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterableConfigCollection newCollection = new AlterableConfigCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterableConfig(_readable, _version)); } this.configs = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field configs was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterableConfigCollection newCollection = new AlterableConfigCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterableConfig(_readable, _version)); } this.configs = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeByte(resourceType); { byte[] _stringBytes = _cache.getSerializedValue(resourceName); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 1) { _writable.writeUnsignedVarint(configs.size() + 1); for (AlterableConfig configsElement : configs) { configsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(configs.size()); for (AlterableConfig configsElement : configs) { configsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterConfigsResource"); } _size.addBytes(1); { byte[] _stringBytes = resourceName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'resourceName' field is too long to be serialized"); } _cache.cacheSerializedValue(resourceName, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(configs.size() + 1)); } else { _size.addBytes(4); } for (AlterableConfig configsElement : configs) { configsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AlterConfigsResource)) return false; AlterConfigsResource other = (AlterConfigsResource) obj; if (resourceType != other.resourceType) return false; if (this.resourceName == null) { if (other.resourceName != null) return false; } else { if (!this.resourceName.equals(other.resourceName)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterConfigsResource)) return false; AlterConfigsResource other = (AlterConfigsResource) obj; if (resourceType != other.resourceType) return false; if (this.resourceName == null) { if (other.resourceName != null) return false; } else { if (!this.resourceName.equals(other.resourceName)) return false; } if (this.configs == null) { if (other.configs != null) return false; } else { if (!this.configs.equals(other.configs)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + resourceType; hashCode = 31 * hashCode + (resourceName == null ? 0 : resourceName.hashCode()); return hashCode; } @Override public AlterConfigsResource duplicate() { AlterConfigsResource _duplicate = new AlterConfigsResource(); _duplicate.resourceType = resourceType; _duplicate.resourceName = resourceName; AlterableConfigCollection newConfigs = new AlterableConfigCollection(configs.size()); for (AlterableConfig _element : configs) { newConfigs.add(_element.duplicate()); } _duplicate.configs = newConfigs; return _duplicate; } @Override public String toString() { return "AlterConfigsResource(" + "resourceType=" + resourceType + ", resourceName=" + ((resourceName == null) ? "null" : "'" + resourceName.toString() + "'") + ", configs=" + MessageUtil.deepToString(configs.iterator()) + ")"; } public byte resourceType() { return this.resourceType; } public String resourceName() { return this.resourceName; } public AlterableConfigCollection configs() { return this.configs; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterConfigsResource setResourceType(byte v) { this.resourceType = v; return this; } public AlterConfigsResource setResourceName(String v) { this.resourceName = v; return this; } public AlterConfigsResource setConfigs(AlterableConfigCollection v) { this.configs = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AlterableConfig implements Message, ImplicitLinkedHashMultiCollection.Element { String name; byte configOperation; String value; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The configuration key name."), new Field("config_operation", Type.INT8, "The type (Set, Delete, Append, Subtract) of operation."), new Field("value", Type.NULLABLE_STRING, "The value to set for the configuration key.") ); public static final Schema SCHEMA_1 = new Schema( new Field("name", Type.COMPACT_STRING, "The configuration key name."), new Field("config_operation", Type.INT8, "The type (Set, Delete, Append, Subtract) of operation."), new Field("value", Type.COMPACT_NULLABLE_STRING, "The value to set for the configuration key."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public AlterableConfig(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AlterableConfig() { this.name = ""; this.configOperation = (byte) 0; this.value = ""; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterableConfig"); } { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } this.configOperation = _readable.readByte(); { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.value = null; } else if (length > 0x7fff) { throw new RuntimeException("string field value had invalid length " + length); } else { this.value = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeByte(configOperation); if (value == null) { if (_version >= 1) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(value); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterableConfig"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(1); if (value == null) { if (_version >= 1) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = value.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'value' field is too long to be serialized"); } _cache.cacheSerializedValue(value, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AlterableConfig)) return false; AlterableConfig other = (AlterableConfig) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (configOperation != other.configOperation) return false; return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterableConfig)) return false; AlterableConfig other = (AlterableConfig) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (configOperation != other.configOperation) return false; if (this.value == null) { if (other.value != null) return false; } else { if (!this.value.equals(other.value)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + configOperation; return hashCode; } @Override public AlterableConfig duplicate() { AlterableConfig _duplicate = new AlterableConfig(); _duplicate.name = name; _duplicate.configOperation = configOperation; if (value == null) { _duplicate.value = null; } else { _duplicate.value = value; } return _duplicate; } @Override public String toString() { return "AlterableConfig(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", configOperation=" + configOperation + ", value=" + ((value == null) ? "null" : "'" + value.toString() + "'") + ")"; } public String name() { return this.name; } public byte configOperation() { return this.configOperation; } public String value() { return this.value; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterableConfig setName(String v) { this.name = v; return this; } public AlterableConfig setConfigOperation(byte v) { this.configOperation = v; return this; } public AlterableConfig setValue(String v) { this.value = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AlterableConfigCollection extends ImplicitLinkedHashMultiCollection<AlterableConfig> { public AlterableConfigCollection() { super(); } public AlterableConfigCollection(int expectedNumElements) { super(expectedNumElements); } public AlterableConfigCollection(Iterator<AlterableConfig> iterator) { super(iterator); } public AlterableConfig find(String name, byte configOperation) { AlterableConfig _key = new AlterableConfig(); _key.setName(name); _key.setConfigOperation(configOperation); return find(_key); } public List<AlterableConfig> findAll(String name, byte configOperation) { AlterableConfig _key = new AlterableConfig(); _key.setName(name); _key.setConfigOperation(configOperation); return findAll(_key); } public AlterableConfigCollection duplicate() { AlterableConfigCollection _duplicate = new AlterableConfigCollection(size()); for (AlterableConfig _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class AlterConfigsResourceCollection extends ImplicitLinkedHashMultiCollection<AlterConfigsResource> { public AlterConfigsResourceCollection() { super(); } public AlterConfigsResourceCollection(int expectedNumElements) { super(expectedNumElements); } public AlterConfigsResourceCollection(Iterator<AlterConfigsResource> iterator) { super(iterator); } public AlterConfigsResource find(byte resourceType, String resourceName) { AlterConfigsResource _key = new AlterConfigsResource(); _key.setResourceType(resourceType); _key.setResourceName(resourceName); return find(_key); } public List<AlterConfigsResource> findAll(byte resourceType, String resourceName) { AlterConfigsResource _key = new AlterConfigsResource(); _key.setResourceType(resourceType); _key.setResourceName(resourceName); return findAll(_key); } public AlterConfigsResourceCollection duplicate() { AlterConfigsResourceCollection _duplicate = new AlterConfigsResourceCollection(size()); for (AlterConfigsResource _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/IncrementalAlterConfigsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.*; public class IncrementalAlterConfigsRequestDataJsonConverter { public static IncrementalAlterConfigsRequestData read(JsonNode _node, short _version) { IncrementalAlterConfigsRequestData _object = new IncrementalAlterConfigsRequestData(); JsonNode _resourcesNode = _node.get("resources"); if (_resourcesNode == null) { throw new RuntimeException("IncrementalAlterConfigsRequestData: unable to locate field 'resources', which is mandatory in version " + _version); } else { if (!_resourcesNode.isArray()) { throw new RuntimeException("IncrementalAlterConfigsRequestData expected a JSON array, but got " + _node.getNodeType()); } AlterConfigsResourceCollection _collection = new AlterConfigsResourceCollection(_resourcesNode.size()); _object.resources = _collection; for (JsonNode _element : _resourcesNode) { _collection.add(AlterConfigsResourceJsonConverter.read(_element, _version)); } } JsonNode _validateOnlyNode = _node.get("validateOnly"); if (_validateOnlyNode == null) { throw new RuntimeException("IncrementalAlterConfigsRequestData: unable to locate field 'validateOnly', which is mandatory in version " + _version); } else { if (!_validateOnlyNode.isBoolean()) { throw new RuntimeException("IncrementalAlterConfigsRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.validateOnly = _validateOnlyNode.asBoolean(); } return _object; } public static JsonNode write(IncrementalAlterConfigsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); ArrayNode _resourcesArray = new ArrayNode(JsonNodeFactory.instance); for (AlterConfigsResource _element : _object.resources) { _resourcesArray.add(AlterConfigsResourceJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("resources", _resourcesArray); _node.set("validateOnly", BooleanNode.valueOf(_object.validateOnly)); return _node; } public static JsonNode write(IncrementalAlterConfigsRequestData _object, short _version) { return write(_object, _version, true); } public static class AlterConfigsResourceJsonConverter { public static AlterConfigsResource read(JsonNode _node, short _version) { AlterConfigsResource _object = new AlterConfigsResource(); JsonNode _resourceTypeNode = _node.get("resourceType"); if (_resourceTypeNode == null) { throw new RuntimeException("AlterConfigsResource: unable to locate field 'resourceType', which is mandatory in version " + _version); } else { _object.resourceType = MessageUtil.jsonNodeToByte(_resourceTypeNode, "AlterConfigsResource"); } JsonNode _resourceNameNode = _node.get("resourceName"); if (_resourceNameNode == null) { throw new RuntimeException("AlterConfigsResource: unable to locate field 'resourceName', which is mandatory in version " + _version); } else { if (!_resourceNameNode.isTextual()) { throw new RuntimeException("AlterConfigsResource expected a string type, but got " + _node.getNodeType()); } _object.resourceName = _resourceNameNode.asText(); } JsonNode _configsNode = _node.get("configs"); if (_configsNode == null) { throw new RuntimeException("AlterConfigsResource: unable to locate field 'configs', which is mandatory in version " + _version); } else { if (!_configsNode.isArray()) { throw new RuntimeException("AlterConfigsResource expected a JSON array, but got " + _node.getNodeType()); } AlterableConfigCollection _collection = new AlterableConfigCollection(_configsNode.size()); _object.configs = _collection; for (JsonNode _element : _configsNode) { _collection.add(AlterableConfigJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterConfigsResource _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("resourceType", new ShortNode(_object.resourceType)); _node.set("resourceName", new TextNode(_object.resourceName)); ArrayNode _configsArray = new ArrayNode(JsonNodeFactory.instance); for (AlterableConfig _element : _object.configs) { _configsArray.add(AlterableConfigJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("configs", _configsArray); return _node; } public static JsonNode write(AlterConfigsResource _object, short _version) { return write(_object, _version, true); } } public static class AlterableConfigJsonConverter { public static AlterableConfig read(JsonNode _node, short _version) { AlterableConfig _object = new AlterableConfig(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("AlterableConfig: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("AlterableConfig expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _configOperationNode = _node.get("configOperation"); if (_configOperationNode == null) { throw new RuntimeException("AlterableConfig: unable to locate field 'configOperation', which is mandatory in version " + _version); } else { _object.configOperation = MessageUtil.jsonNodeToByte(_configOperationNode, "AlterableConfig"); } JsonNode _valueNode = _node.get("value"); if (_valueNode == null) { throw new RuntimeException("AlterableConfig: unable to locate field 'value', which is mandatory in version " + _version); } else { if (_valueNode.isNull()) { _object.value = null; } else { if (!_valueNode.isTextual()) { throw new RuntimeException("AlterableConfig expected a string type, but got " + _node.getNodeType()); } _object.value = _valueNode.asText(); } } return _object; } public static JsonNode write(AlterableConfig _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); _node.set("configOperation", new ShortNode(_object.configOperation)); if (_object.value == null) { _node.set("value", NullNode.instance); } else { _node.set("value", new TextNode(_object.value)); } return _node; } public static JsonNode write(AlterableConfig _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/IncrementalAlterConfigsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class IncrementalAlterConfigsResponseData implements ApiMessage { int throttleTimeMs; List<AlterConfigsResourceResponse> responses; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("responses", new ArrayOf(AlterConfigsResourceResponse.SCHEMA_0), "The responses for each resource.") ); public static final Schema SCHEMA_1 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("responses", new CompactArrayOf(AlterConfigsResourceResponse.SCHEMA_1), "The responses for each resource."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public IncrementalAlterConfigsResponseData(Readable _readable, short _version) { read(_readable, _version); } public IncrementalAlterConfigsResponseData() { this.throttleTimeMs = 0; this.responses = new ArrayList<AlterConfigsResourceResponse>(0); } @Override public short apiKey() { return 44; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); { if (_version >= 1) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field responses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AlterConfigsResourceResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterConfigsResourceResponse(_readable, _version)); } this.responses = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field responses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AlterConfigsResourceResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterConfigsResourceResponse(_readable, _version)); } this.responses = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); if (_version >= 1) { _writable.writeUnsignedVarint(responses.size() + 1); for (AlterConfigsResourceResponse responsesElement : responses) { responsesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(responses.size()); for (AlterConfigsResourceResponse responsesElement : responses) { responsesElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); { if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(responses.size() + 1)); } else { _size.addBytes(4); } for (AlterConfigsResourceResponse responsesElement : responses) { responsesElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof IncrementalAlterConfigsResponseData)) return false; IncrementalAlterConfigsResponseData other = (IncrementalAlterConfigsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.responses == null) { if (other.responses != null) return false; } else { if (!this.responses.equals(other.responses)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (responses == null ? 0 : responses.hashCode()); return hashCode; } @Override public IncrementalAlterConfigsResponseData duplicate() { IncrementalAlterConfigsResponseData _duplicate = new IncrementalAlterConfigsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; ArrayList<AlterConfigsResourceResponse> newResponses = new ArrayList<AlterConfigsResourceResponse>(responses.size()); for (AlterConfigsResourceResponse _element : responses) { newResponses.add(_element.duplicate()); } _duplicate.responses = newResponses; return _duplicate; } @Override public String toString() { return "IncrementalAlterConfigsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", responses=" + MessageUtil.deepToString(responses.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public List<AlterConfigsResourceResponse> responses() { return this.responses; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public IncrementalAlterConfigsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public IncrementalAlterConfigsResponseData setResponses(List<AlterConfigsResourceResponse> v) { this.responses = v; return this; } public static class AlterConfigsResourceResponse implements Message { short errorCode; String errorMessage; byte resourceType; String resourceName; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The resource error code."), new Field("error_message", Type.NULLABLE_STRING, "The resource error message, or null if there was no error."), new Field("resource_type", Type.INT8, "The resource type."), new Field("resource_name", Type.STRING, "The resource name.") ); public static final Schema SCHEMA_1 = new Schema( new Field("error_code", Type.INT16, "The resource error code."), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The resource error message, or null if there was no error."), new Field("resource_type", Type.INT8, "The resource type."), new Field("resource_name", Type.COMPACT_STRING, "The resource name."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public AlterConfigsResourceResponse(Readable _readable, short _version) { read(_readable, _version); } public AlterConfigsResourceResponse() { this.errorCode = (short) 0; this.errorMessage = ""; this.resourceType = (byte) 0; this.resourceName = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResourceResponse"); } this.errorCode = _readable.readShort(); { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } this.resourceType = _readable.readByte(); { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field resourceName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field resourceName had invalid length " + length); } else { this.resourceName = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); if (errorMessage == null) { if (_version >= 1) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeByte(resourceType); { byte[] _stringBytes = _cache.getSerializedValue(resourceName); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterConfigsResourceResponse"); } _size.addBytes(2); if (errorMessage == null) { if (_version >= 1) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(1); { byte[] _stringBytes = resourceName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'resourceName' field is too long to be serialized"); } _cache.cacheSerializedValue(resourceName, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterConfigsResourceResponse)) return false; AlterConfigsResourceResponse other = (AlterConfigsResourceResponse) obj; if (errorCode != other.errorCode) return false; if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } if (resourceType != other.resourceType) return false; if (this.resourceName == null) { if (other.resourceName != null) return false; } else { if (!this.resourceName.equals(other.resourceName)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); hashCode = 31 * hashCode + resourceType; hashCode = 31 * hashCode + (resourceName == null ? 0 : resourceName.hashCode()); return hashCode; } @Override public AlterConfigsResourceResponse duplicate() { AlterConfigsResourceResponse _duplicate = new AlterConfigsResourceResponse(); _duplicate.errorCode = errorCode; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } _duplicate.resourceType = resourceType; _duplicate.resourceName = resourceName; return _duplicate; } @Override public String toString() { return "AlterConfigsResourceResponse(" + "errorCode=" + errorCode + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ", resourceType=" + resourceType + ", resourceName=" + ((resourceName == null) ? "null" : "'" + resourceName.toString() + "'") + ")"; } public short errorCode() { return this.errorCode; } public String errorMessage() { return this.errorMessage; } public byte resourceType() { return this.resourceType; } public String resourceName() { return this.resourceName; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterConfigsResourceResponse setErrorCode(short v) { this.errorCode = v; return this; } public AlterConfigsResourceResponse setErrorMessage(String v) { this.errorMessage = v; return this; } public AlterConfigsResourceResponse setResourceType(byte v) { this.resourceType = v; return this; } public AlterConfigsResourceResponse setResourceName(String v) { this.resourceName = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/IncrementalAlterConfigsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.*; public class IncrementalAlterConfigsResponseDataJsonConverter { public static IncrementalAlterConfigsResponseData read(JsonNode _node, short _version) { IncrementalAlterConfigsResponseData _object = new IncrementalAlterConfigsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("IncrementalAlterConfigsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "IncrementalAlterConfigsResponseData"); } JsonNode _responsesNode = _node.get("responses"); if (_responsesNode == null) { throw new RuntimeException("IncrementalAlterConfigsResponseData: unable to locate field 'responses', which is mandatory in version " + _version); } else { if (!_responsesNode.isArray()) { throw new RuntimeException("IncrementalAlterConfigsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<AlterConfigsResourceResponse> _collection = new ArrayList<AlterConfigsResourceResponse>(_responsesNode.size()); _object.responses = _collection; for (JsonNode _element : _responsesNode) { _collection.add(AlterConfigsResourceResponseJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(IncrementalAlterConfigsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); ArrayNode _responsesArray = new ArrayNode(JsonNodeFactory.instance); for (AlterConfigsResourceResponse _element : _object.responses) { _responsesArray.add(AlterConfigsResourceResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("responses", _responsesArray); return _node; } public static JsonNode write(IncrementalAlterConfigsResponseData _object, short _version) { return write(_object, _version, true); } public static class AlterConfigsResourceResponseJsonConverter { public static AlterConfigsResourceResponse read(JsonNode _node, short _version) { AlterConfigsResourceResponse _object = new AlterConfigsResourceResponse(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("AlterConfigsResourceResponse: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "AlterConfigsResourceResponse"); } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { throw new RuntimeException("AlterConfigsResourceResponse: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("AlterConfigsResourceResponse expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } JsonNode _resourceTypeNode = _node.get("resourceType"); if (_resourceTypeNode == null) { throw new RuntimeException("AlterConfigsResourceResponse: unable to locate field 'resourceType', which is mandatory in version " + _version); } else { _object.resourceType = MessageUtil.jsonNodeToByte(_resourceTypeNode, "AlterConfigsResourceResponse"); } JsonNode _resourceNameNode = _node.get("resourceName"); if (_resourceNameNode == null) { throw new RuntimeException("AlterConfigsResourceResponse: unable to locate field 'resourceName', which is mandatory in version " + _version); } else { if (!_resourceNameNode.isTextual()) { throw new RuntimeException("AlterConfigsResourceResponse expected a string type, but got " + _node.getNodeType()); } _object.resourceName = _resourceNameNode.asText(); } return _object; } public static JsonNode write(AlterConfigsResourceResponse _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } _node.set("resourceType", new ShortNode(_object.resourceType)); _node.set("resourceName", new TextNode(_object.resourceName)); return _node; } public static JsonNode write(AlterConfigsResourceResponse _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/InitProducerIdRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class InitProducerIdRequestData implements ApiMessage { String transactionalId; int transactionTimeoutMs; long producerId; short producerEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("transactional_id", Type.NULLABLE_STRING, "The transactional id, or null if the producer is not transactional."), new Field("transaction_timeout_ms", Type.INT32, "The time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("transactional_id", Type.COMPACT_NULLABLE_STRING, "The transactional id, or null if the producer is not transactional."), new Field("transaction_timeout_ms", Type.INT32, "The time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_3 = new Schema( new Field("transactional_id", Type.COMPACT_NULLABLE_STRING, "The transactional id, or null if the producer is not transactional."), new Field("transaction_timeout_ms", Type.INT32, "The time in ms to wait before aborting idle transactions sent by this producer. This is only relevant if a TransactionalId has been defined."), new Field("producer_id", Type.INT64, "The producer id. This is used to disambiguate requests if a transactional id is reused following its expiration."), new Field("producer_epoch", Type.INT16, "The producer's current epoch. This will be checked against the producer epoch on the broker, and the request will return an error if they do not match."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public InitProducerIdRequestData(Readable _readable, short _version) { read(_readable, _version); } public InitProducerIdRequestData() { this.transactionalId = ""; this.transactionTimeoutMs = 0; this.producerId = -1L; this.producerEpoch = (short) -1; } @Override public short apiKey() { return 22; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.transactionalId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field transactionalId had invalid length " + length); } else { this.transactionalId = _readable.readString(length); } } this.transactionTimeoutMs = _readable.readInt(); if (_version >= 3) { this.producerId = _readable.readLong(); } else { this.producerId = -1L; } if (_version >= 3) { this.producerEpoch = _readable.readShort(); } else { this.producerEpoch = (short) -1; } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (transactionalId == null) { if (_version >= 2) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(transactionalId); if (_version >= 2) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeInt(transactionTimeoutMs); if (_version >= 3) { _writable.writeLong(producerId); } else { if (this.producerId != -1L) { throw new UnsupportedVersionException("Attempted to write a non-default producerId at version " + _version); } } if (_version >= 3) { _writable.writeShort(producerEpoch); } else { if (this.producerEpoch != (short) -1) { throw new UnsupportedVersionException("Attempted to write a non-default producerEpoch at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (transactionalId == null) { if (_version >= 2) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'transactionalId' field is too long to be serialized"); } _cache.cacheSerializedValue(transactionalId, _stringBytes); if (_version >= 2) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(4); if (_version >= 3) { _size.addBytes(8); } if (_version >= 3) { _size.addBytes(2); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof InitProducerIdRequestData)) return false; InitProducerIdRequestData other = (InitProducerIdRequestData) obj; if (this.transactionalId == null) { if (other.transactionalId != null) return false; } else { if (!this.transactionalId.equals(other.transactionalId)) return false; } if (transactionTimeoutMs != other.transactionTimeoutMs) return false; if (producerId != other.producerId) return false; if (producerEpoch != other.producerEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode()); hashCode = 31 * hashCode + transactionTimeoutMs; hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId); hashCode = 31 * hashCode + producerEpoch; return hashCode; } @Override public InitProducerIdRequestData duplicate() { InitProducerIdRequestData _duplicate = new InitProducerIdRequestData(); if (transactionalId == null) { _duplicate.transactionalId = null; } else { _duplicate.transactionalId = transactionalId; } _duplicate.transactionTimeoutMs = transactionTimeoutMs; _duplicate.producerId = producerId; _duplicate.producerEpoch = producerEpoch; return _duplicate; } @Override public String toString() { return "InitProducerIdRequestData(" + "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'") + ", transactionTimeoutMs=" + transactionTimeoutMs + ", producerId=" + producerId + ", producerEpoch=" + producerEpoch + ")"; } public String transactionalId() { return this.transactionalId; } public int transactionTimeoutMs() { return this.transactionTimeoutMs; } public long producerId() { return this.producerId; } public short producerEpoch() { return this.producerEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public InitProducerIdRequestData setTransactionalId(String v) { this.transactionalId = v; return this; } public InitProducerIdRequestData setTransactionTimeoutMs(int v) { this.transactionTimeoutMs = v; return this; } public InitProducerIdRequestData setProducerId(long v) { this.producerId = v; return this; } public InitProducerIdRequestData setProducerEpoch(short v) { this.producerEpoch = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/InitProducerIdRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.InitProducerIdRequestData.*; public class InitProducerIdRequestDataJsonConverter { public static InitProducerIdRequestData read(JsonNode _node, short _version) { InitProducerIdRequestData _object = new InitProducerIdRequestData(); JsonNode _transactionalIdNode = _node.get("transactionalId"); if (_transactionalIdNode == null) { throw new RuntimeException("InitProducerIdRequestData: unable to locate field 'transactionalId', which is mandatory in version " + _version); } else { if (_transactionalIdNode.isNull()) { _object.transactionalId = null; } else { if (!_transactionalIdNode.isTextual()) { throw new RuntimeException("InitProducerIdRequestData expected a string type, but got " + _node.getNodeType()); } _object.transactionalId = _transactionalIdNode.asText(); } } JsonNode _transactionTimeoutMsNode = _node.get("transactionTimeoutMs"); if (_transactionTimeoutMsNode == null) { throw new RuntimeException("InitProducerIdRequestData: unable to locate field 'transactionTimeoutMs', which is mandatory in version " + _version); } else { _object.transactionTimeoutMs = MessageUtil.jsonNodeToInt(_transactionTimeoutMsNode, "InitProducerIdRequestData"); } JsonNode _producerIdNode = _node.get("producerId"); if (_producerIdNode == null) { if (_version >= 3) { throw new RuntimeException("InitProducerIdRequestData: unable to locate field 'producerId', which is mandatory in version " + _version); } else { _object.producerId = -1L; } } else { _object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "InitProducerIdRequestData"); } JsonNode _producerEpochNode = _node.get("producerEpoch"); if (_producerEpochNode == null) { if (_version >= 3) { throw new RuntimeException("InitProducerIdRequestData: unable to locate field 'producerEpoch', which is mandatory in version " + _version); } else { _object.producerEpoch = (short) -1; } } else { _object.producerEpoch = MessageUtil.jsonNodeToShort(_producerEpochNode, "InitProducerIdRequestData"); } return _object; } public static JsonNode write(InitProducerIdRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_object.transactionalId == null) { _node.set("transactionalId", NullNode.instance); } else { _node.set("transactionalId", new TextNode(_object.transactionalId)); } _node.set("transactionTimeoutMs", new IntNode(_object.transactionTimeoutMs)); if (_version >= 3) { _node.set("producerId", new LongNode(_object.producerId)); } else { if (_object.producerId != -1L) { throw new UnsupportedVersionException("Attempted to write a non-default producerId at version " + _version); } } if (_version >= 3) { _node.set("producerEpoch", new ShortNode(_object.producerEpoch)); } else { if (_object.producerEpoch != (short) -1) { throw new UnsupportedVersionException("Attempted to write a non-default producerEpoch at version " + _version); } } return _node; } public static JsonNode write(InitProducerIdRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/InitProducerIdResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class InitProducerIdResponseData implements ApiMessage { int throttleTimeMs; short errorCode; long producerId; short producerEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("producer_id", Type.INT64, "The current producer id."), new Field("producer_epoch", Type.INT16, "The current epoch associated with the producer id.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("producer_id", Type.INT64, "The current producer id."), new Field("producer_epoch", Type.INT16, "The current epoch associated with the producer id."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public InitProducerIdResponseData(Readable _readable, short _version) { read(_readable, _version); } public InitProducerIdResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.producerId = -1L; this.producerEpoch = (short) 0; } @Override public short apiKey() { return 22; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); this.producerId = _readable.readLong(); this.producerEpoch = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); _writable.writeLong(producerId); _writable.writeShort(producerEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); _size.addBytes(8); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof InitProducerIdResponseData)) return false; InitProducerIdResponseData other = (InitProducerIdResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (producerId != other.producerId) return false; if (producerEpoch != other.producerEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId); hashCode = 31 * hashCode + producerEpoch; return hashCode; } @Override public InitProducerIdResponseData duplicate() { InitProducerIdResponseData _duplicate = new InitProducerIdResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; _duplicate.producerId = producerId; _duplicate.producerEpoch = producerEpoch; return _duplicate; } @Override public String toString() { return "InitProducerIdResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", producerId=" + producerId + ", producerEpoch=" + producerEpoch + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public long producerId() { return this.producerId; } public short producerEpoch() { return this.producerEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public InitProducerIdResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public InitProducerIdResponseData setErrorCode(short v) { this.errorCode = v; return this; } public InitProducerIdResponseData setProducerId(long v) { this.producerId = v; return this; } public InitProducerIdResponseData setProducerEpoch(short v) { this.producerEpoch = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/InitProducerIdResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.InitProducerIdResponseData.*; public class InitProducerIdResponseDataJsonConverter { public static InitProducerIdResponseData read(JsonNode _node, short _version) { InitProducerIdResponseData _object = new InitProducerIdResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("InitProducerIdResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "InitProducerIdResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("InitProducerIdResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "InitProducerIdResponseData"); } JsonNode _producerIdNode = _node.get("producerId"); if (_producerIdNode == null) { throw new RuntimeException("InitProducerIdResponseData: unable to locate field 'producerId', which is mandatory in version " + _version); } else { _object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "InitProducerIdResponseData"); } JsonNode _producerEpochNode = _node.get("producerEpoch"); if (_producerEpochNode == null) { throw new RuntimeException("InitProducerIdResponseData: unable to locate field 'producerEpoch', which is mandatory in version " + _version); } else { _object.producerEpoch = MessageUtil.jsonNodeToShort(_producerEpochNode, "InitProducerIdResponseData"); } return _object; } public static JsonNode write(InitProducerIdResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("producerId", new LongNode(_object.producerId)); _node.set("producerEpoch", new ShortNode(_object.producerEpoch)); return _node; } public static JsonNode write(InitProducerIdResponseData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/JoinGroupRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class JoinGroupRequestData implements ApiMessage { String groupId; int sessionTimeoutMs; int rebalanceTimeoutMs; String memberId; String groupInstanceId; String protocolType; JoinGroupRequestProtocolCollection protocols; String reason; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("group_id", Type.STRING, "The group identifier."), new Field("session_timeout_ms", Type.INT32, "The coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds."), new Field("member_id", Type.STRING, "The member id assigned by the group coordinator."), new Field("protocol_type", Type.STRING, "The unique name the for class of protocols implemented by the group we want to join."), new Field("protocols", new ArrayOf(JoinGroupRequestProtocol.SCHEMA_0), "The list of protocols that the member supports.") ); public static final Schema SCHEMA_1 = new Schema( new Field("group_id", Type.STRING, "The group identifier."), new Field("session_timeout_ms", Type.INT32, "The coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds."), new Field("rebalance_timeout_ms", Type.INT32, "The maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group."), new Field("member_id", Type.STRING, "The member id assigned by the group coordinator."), new Field("protocol_type", Type.STRING, "The unique name the for class of protocols implemented by the group we want to join."), new Field("protocols", new ArrayOf(JoinGroupRequestProtocol.SCHEMA_0), "The list of protocols that the member supports.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("group_id", Type.STRING, "The group identifier."), new Field("session_timeout_ms", Type.INT32, "The coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds."), new Field("rebalance_timeout_ms", Type.INT32, "The maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group."), new Field("member_id", Type.STRING, "The member id assigned by the group coordinator."), new Field("group_instance_id", Type.NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."), new Field("protocol_type", Type.STRING, "The unique name the for class of protocols implemented by the group we want to join."), new Field("protocols", new ArrayOf(JoinGroupRequestProtocol.SCHEMA_0), "The list of protocols that the member supports.") ); public static final Schema SCHEMA_6 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The group identifier."), new Field("session_timeout_ms", Type.INT32, "The coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds."), new Field("rebalance_timeout_ms", Type.INT32, "The maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group."), new Field("member_id", Type.COMPACT_STRING, "The member id assigned by the group coordinator."), new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."), new Field("protocol_type", Type.COMPACT_STRING, "The unique name the for class of protocols implemented by the group we want to join."), new Field("protocols", new CompactArrayOf(JoinGroupRequestProtocol.SCHEMA_6), "The list of protocols that the member supports."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The group identifier."), new Field("session_timeout_ms", Type.INT32, "The coordinator considers the consumer dead if it receives no heartbeat after this timeout in milliseconds."), new Field("rebalance_timeout_ms", Type.INT32, "The maximum time in milliseconds that the coordinator will wait for each member to rejoin when rebalancing the group."), new Field("member_id", Type.COMPACT_STRING, "The member id assigned by the group coordinator."), new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."), new Field("protocol_type", Type.COMPACT_STRING, "The unique name the for class of protocols implemented by the group we want to join."), new Field("protocols", new CompactArrayOf(JoinGroupRequestProtocol.SCHEMA_6), "The list of protocols that the member supports."), new Field("reason", Type.COMPACT_NULLABLE_STRING, "The reason why the member (re-)joins the group."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_9 = SCHEMA_8; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public JoinGroupRequestData(Readable _readable, short _version) { read(_readable, _version); } public JoinGroupRequestData() { this.groupId = ""; this.sessionTimeoutMs = 0; this.rebalanceTimeoutMs = -1; this.memberId = ""; this.groupInstanceId = null; this.protocolType = ""; this.protocols = new JoinGroupRequestProtocolCollection(0); this.reason = null; } @Override public short apiKey() { return 11; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } this.sessionTimeoutMs = _readable.readInt(); if (_version >= 1) { this.rebalanceTimeoutMs = _readable.readInt(); } else { this.rebalanceTimeoutMs = -1; } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field memberId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field memberId had invalid length " + length); } else { this.memberId = _readable.readString(length); } } if (_version >= 5) { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.groupInstanceId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field groupInstanceId had invalid length " + length); } else { this.groupInstanceId = _readable.readString(length); } } else { this.groupInstanceId = null; } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field protocolType was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field protocolType had invalid length " + length); } else { this.protocolType = _readable.readString(length); } } { if (_version >= 6) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field protocols was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } JoinGroupRequestProtocolCollection newCollection = new JoinGroupRequestProtocolCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new JoinGroupRequestProtocol(_readable, _version)); } this.protocols = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field protocols was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } JoinGroupRequestProtocolCollection newCollection = new JoinGroupRequestProtocolCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new JoinGroupRequestProtocol(_readable, _version)); } this.protocols = newCollection; } } } if (_version >= 8) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.reason = null; } else if (length > 0x7fff) { throw new RuntimeException("string field reason had invalid length " + length); } else { this.reason = _readable.readString(length); } } else { this.reason = null; } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(groupId); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeInt(sessionTimeoutMs); if (_version >= 1) { _writable.writeInt(rebalanceTimeoutMs); } { byte[] _stringBytes = _cache.getSerializedValue(memberId); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 5) { if (groupInstanceId == null) { if (_version >= 6) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(groupInstanceId); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } else { if (this.groupInstanceId != null) { throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version); } } { byte[] _stringBytes = _cache.getSerializedValue(protocolType); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 6) { _writable.writeUnsignedVarint(protocols.size() + 1); for (JoinGroupRequestProtocol protocolsElement : protocols) { protocolsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(protocols.size()); for (JoinGroupRequestProtocol protocolsElement : protocols) { protocolsElement.write(_writable, _cache, _version); } } if (_version >= 8) { if (reason == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(reason); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(4); if (_version >= 1) { _size.addBytes(4); } { byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'memberId' field is too long to be serialized"); } _cache.cacheSerializedValue(memberId, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version >= 5) { if (groupInstanceId == null) { if (_version >= 6) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = groupInstanceId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupInstanceId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupInstanceId, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } { byte[] _stringBytes = protocolType.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'protocolType' field is too long to be serialized"); } _cache.cacheSerializedValue(protocolType, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(protocols.size() + 1)); } else { _size.addBytes(4); } for (JoinGroupRequestProtocol protocolsElement : protocols) { protocolsElement.addSize(_size, _cache, _version); } } if (_version >= 8) { if (reason == null) { _size.addBytes(1); } else { byte[] _stringBytes = reason.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'reason' field is too long to be serialized"); } _cache.cacheSerializedValue(reason, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof JoinGroupRequestData)) return false; JoinGroupRequestData other = (JoinGroupRequestData) obj; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } if (sessionTimeoutMs != other.sessionTimeoutMs) return false; if (rebalanceTimeoutMs != other.rebalanceTimeoutMs) return false; if (this.memberId == null) { if (other.memberId != null) return false; } else { if (!this.memberId.equals(other.memberId)) return false; } if (this.groupInstanceId == null) { if (other.groupInstanceId != null) return false; } else { if (!this.groupInstanceId.equals(other.groupInstanceId)) return false; } if (this.protocolType == null) { if (other.protocolType != null) return false; } else { if (!this.protocolType.equals(other.protocolType)) return false; } if (this.protocols == null) { if (other.protocols != null) return false; } else { if (!this.protocols.equals(other.protocols)) return false; } if (this.reason == null) { if (other.reason != null) return false; } else { if (!this.reason.equals(other.reason)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); hashCode = 31 * hashCode + sessionTimeoutMs; hashCode = 31 * hashCode + rebalanceTimeoutMs; hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode()); hashCode = 31 * hashCode + (groupInstanceId == null ? 0 : groupInstanceId.hashCode()); hashCode = 31 * hashCode + (protocolType == null ? 0 : protocolType.hashCode()); hashCode = 31 * hashCode + (protocols == null ? 0 : protocols.hashCode()); hashCode = 31 * hashCode + (reason == null ? 0 : reason.hashCode()); return hashCode; } @Override public JoinGroupRequestData duplicate() { JoinGroupRequestData _duplicate = new JoinGroupRequestData(); _duplicate.groupId = groupId; _duplicate.sessionTimeoutMs = sessionTimeoutMs; _duplicate.rebalanceTimeoutMs = rebalanceTimeoutMs; _duplicate.memberId = memberId; if (groupInstanceId == null) { _duplicate.groupInstanceId = null; } else { _duplicate.groupInstanceId = groupInstanceId; } _duplicate.protocolType = protocolType; JoinGroupRequestProtocolCollection newProtocols = new JoinGroupRequestProtocolCollection(protocols.size()); for (JoinGroupRequestProtocol _element : protocols) { newProtocols.add(_element.duplicate()); } _duplicate.protocols = newProtocols; if (reason == null) { _duplicate.reason = null; } else { _duplicate.reason = reason; } return _duplicate; } @Override public String toString() { return "JoinGroupRequestData(" + "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ", sessionTimeoutMs=" + sessionTimeoutMs + ", rebalanceTimeoutMs=" + rebalanceTimeoutMs + ", memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'") + ", groupInstanceId=" + ((groupInstanceId == null) ? "null" : "'" + groupInstanceId.toString() + "'") + ", protocolType=" + ((protocolType == null) ? "null" : "'" + protocolType.toString() + "'") + ", protocols=" + MessageUtil.deepToString(protocols.iterator()) + ", reason=" + ((reason == null) ? "null" : "'" + reason.toString() + "'") + ")"; } public String groupId() { return this.groupId; } public int sessionTimeoutMs() { return this.sessionTimeoutMs; } public int rebalanceTimeoutMs() { return this.rebalanceTimeoutMs; } public String memberId() { return this.memberId; } public String groupInstanceId() { return this.groupInstanceId; } public String protocolType() { return this.protocolType; } public JoinGroupRequestProtocolCollection protocols() { return this.protocols; } public String reason() { return this.reason; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public JoinGroupRequestData setGroupId(String v) { this.groupId = v; return this; } public JoinGroupRequestData setSessionTimeoutMs(int v) { this.sessionTimeoutMs = v; return this; } public JoinGroupRequestData setRebalanceTimeoutMs(int v) { this.rebalanceTimeoutMs = v; return this; } public JoinGroupRequestData setMemberId(String v) { this.memberId = v; return this; } public JoinGroupRequestData setGroupInstanceId(String v) { this.groupInstanceId = v; return this; } public JoinGroupRequestData setProtocolType(String v) { this.protocolType = v; return this; } public JoinGroupRequestData setProtocols(JoinGroupRequestProtocolCollection v) { this.protocols = v; return this; } public JoinGroupRequestData setReason(String v) { this.reason = v; return this; } public static class JoinGroupRequestProtocol implements Message, ImplicitLinkedHashMultiCollection.Element { String name; byte[] metadata; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The protocol name."), new Field("metadata", Type.BYTES, "The protocol metadata.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("name", Type.COMPACT_STRING, "The protocol name."), new Field("metadata", Type.COMPACT_BYTES, "The protocol metadata."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = SCHEMA_8; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public JoinGroupRequestProtocol(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public JoinGroupRequestProtocol() { this.name = ""; this.metadata = Bytes.EMPTY; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { if (_version > 9) { throw new UnsupportedVersionException("Can't read version " + _version + " of JoinGroupRequestProtocol"); } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readInt(); } if (length < 0) { throw new RuntimeException("non-nullable field metadata was serialized as null"); } else { byte[] newBytes = _readable.readArray(length); this.metadata = newBytes; } } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 6) { _writable.writeUnsignedVarint(metadata.length + 1); } else { _writable.writeInt(metadata.length); } _writable.writeByteArray(metadata); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 9) { throw new UnsupportedVersionException("Can't size version " + _version + " of JoinGroupRequestProtocol"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { _size.addBytes(metadata.length); if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(metadata.length + 1)); } else { _size.addBytes(4); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof JoinGroupRequestProtocol)) return false; JoinGroupRequestProtocol other = (JoinGroupRequestProtocol) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof JoinGroupRequestProtocol)) return false; JoinGroupRequestProtocol other = (JoinGroupRequestProtocol) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (!Arrays.equals(this.metadata, other.metadata)) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public JoinGroupRequestProtocol duplicate() { JoinGroupRequestProtocol _duplicate = new JoinGroupRequestProtocol(); _duplicate.name = name; _duplicate.metadata = MessageUtil.duplicate(metadata); return _duplicate; } @Override public String toString() { return "JoinGroupRequestProtocol(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", metadata=" + Arrays.toString(metadata) + ")"; } public String name() { return this.name; } public byte[] metadata() { return this.metadata; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public JoinGroupRequestProtocol setName(String v) { this.name = v; return this; } public JoinGroupRequestProtocol setMetadata(byte[] v) { this.metadata = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class JoinGroupRequestProtocolCollection extends ImplicitLinkedHashMultiCollection<JoinGroupRequestProtocol> { public JoinGroupRequestProtocolCollection() { super(); } public JoinGroupRequestProtocolCollection(int expectedNumElements) { super(expectedNumElements); } public JoinGroupRequestProtocolCollection(Iterator<JoinGroupRequestProtocol> iterator) { super(iterator); } public JoinGroupRequestProtocol find(String name) { JoinGroupRequestProtocol _key = new JoinGroupRequestProtocol(); _key.setName(name); return find(_key); } public List<JoinGroupRequestProtocol> findAll(String name) { JoinGroupRequestProtocol _key = new JoinGroupRequestProtocol(); _key.setName(name); return findAll(_key); } public JoinGroupRequestProtocolCollection duplicate() { JoinGroupRequestProtocolCollection _duplicate = new JoinGroupRequestProtocolCollection(size()); for (JoinGroupRequestProtocol _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/JoinGroupRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.Arrays; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.JoinGroupRequestData.*; public class JoinGroupRequestDataJsonConverter { public static JoinGroupRequestData read(JsonNode _node, short _version) { JoinGroupRequestData _object = new JoinGroupRequestData(); JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { throw new RuntimeException("JoinGroupRequestData: unable to locate field 'groupId', which is mandatory in version " + _version); } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("JoinGroupRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } JsonNode _sessionTimeoutMsNode = _node.get("sessionTimeoutMs"); if (_sessionTimeoutMsNode == null) { throw new RuntimeException("JoinGroupRequestData: unable to locate field 'sessionTimeoutMs', which is mandatory in version " + _version); } else { _object.sessionTimeoutMs = MessageUtil.jsonNodeToInt(_sessionTimeoutMsNode, "JoinGroupRequestData"); } JsonNode _rebalanceTimeoutMsNode = _node.get("rebalanceTimeoutMs"); if (_rebalanceTimeoutMsNode == null) { if (_version >= 1) { throw new RuntimeException("JoinGroupRequestData: unable to locate field 'rebalanceTimeoutMs', which is mandatory in version " + _version); } else { _object.rebalanceTimeoutMs = -1; } } else { _object.rebalanceTimeoutMs = MessageUtil.jsonNodeToInt(_rebalanceTimeoutMsNode, "JoinGroupRequestData"); } JsonNode _memberIdNode = _node.get("memberId"); if (_memberIdNode == null) { throw new RuntimeException("JoinGroupRequestData: unable to locate field 'memberId', which is mandatory in version " + _version); } else { if (!_memberIdNode.isTextual()) { throw new RuntimeException("JoinGroupRequestData expected a string type, but got " + _node.getNodeType()); } _object.memberId = _memberIdNode.asText(); } JsonNode _groupInstanceIdNode = _node.get("groupInstanceId"); if (_groupInstanceIdNode == null) { if (_version >= 5) { throw new RuntimeException("JoinGroupRequestData: unable to locate field 'groupInstanceId', which is mandatory in version " + _version); } else { _object.groupInstanceId = null; } } else { if (_groupInstanceIdNode.isNull()) { _object.groupInstanceId = null; } else { if (!_groupInstanceIdNode.isTextual()) { throw new RuntimeException("JoinGroupRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupInstanceId = _groupInstanceIdNode.asText(); } } JsonNode _protocolTypeNode = _node.get("protocolType"); if (_protocolTypeNode == null) { throw new RuntimeException("JoinGroupRequestData: unable to locate field 'protocolType', which is mandatory in version " + _version); } else { if (!_protocolTypeNode.isTextual()) { throw new RuntimeException("JoinGroupRequestData expected a string type, but got " + _node.getNodeType()); } _object.protocolType = _protocolTypeNode.asText(); } JsonNode _protocolsNode = _node.get("protocols"); if (_protocolsNode == null) { throw new RuntimeException("JoinGroupRequestData: unable to locate field 'protocols', which is mandatory in version " + _version); } else { if (!_protocolsNode.isArray()) { throw new RuntimeException("JoinGroupRequestData expected a JSON array, but got " + _node.getNodeType()); } JoinGroupRequestProtocolCollection _collection = new JoinGroupRequestProtocolCollection(_protocolsNode.size()); _object.protocols = _collection; for (JsonNode _element : _protocolsNode) { _collection.add(JoinGroupRequestProtocolJsonConverter.read(_element, _version)); } } JsonNode _reasonNode = _node.get("reason"); if (_reasonNode == null) { if (_version >= 8) { throw new RuntimeException("JoinGroupRequestData: unable to locate field 'reason', which is mandatory in version " + _version); } else { _object.reason = null; } } else { if (_reasonNode.isNull()) { _object.reason = null; } else { if (!_reasonNode.isTextual()) { throw new RuntimeException("JoinGroupRequestData expected a string type, but got " + _node.getNodeType()); } _object.reason = _reasonNode.asText(); } } return _object; } public static JsonNode write(JoinGroupRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("groupId", new TextNode(_object.groupId)); _node.set("sessionTimeoutMs", new IntNode(_object.sessionTimeoutMs)); if (_version >= 1) { _node.set("rebalanceTimeoutMs", new IntNode(_object.rebalanceTimeoutMs)); } _node.set("memberId", new TextNode(_object.memberId)); if (_version >= 5) { if (_object.groupInstanceId == null) { _node.set("groupInstanceId", NullNode.instance); } else { _node.set("groupInstanceId", new TextNode(_object.groupInstanceId)); } } else { if (_object.groupInstanceId != null) { throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version); } } _node.set("protocolType", new TextNode(_object.protocolType)); ArrayNode _protocolsArray = new ArrayNode(JsonNodeFactory.instance); for (JoinGroupRequestProtocol _element : _object.protocols) { _protocolsArray.add(JoinGroupRequestProtocolJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("protocols", _protocolsArray); if (_version >= 8) { if (_object.reason == null) { _node.set("reason", NullNode.instance); } else { _node.set("reason", new TextNode(_object.reason)); } } return _node; } public static JsonNode write(JoinGroupRequestData _object, short _version) { return write(_object, _version, true); } public static class JoinGroupRequestProtocolJsonConverter { public static JoinGroupRequestProtocol read(JsonNode _node, short _version) { JoinGroupRequestProtocol _object = new JoinGroupRequestProtocol(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("JoinGroupRequestProtocol: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("JoinGroupRequestProtocol expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _metadataNode = _node.get("metadata"); if (_metadataNode == null) { throw new RuntimeException("JoinGroupRequestProtocol: unable to locate field 'metadata', which is mandatory in version " + _version); } else { _object.metadata = MessageUtil.jsonNodeToBinary(_metadataNode, "JoinGroupRequestProtocol"); } return _object; } public static JsonNode write(JoinGroupRequestProtocol _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); _node.set("metadata", new BinaryNode(Arrays.copyOf(_object.metadata, _object.metadata.length))); return _node; } public static JsonNode write(JoinGroupRequestProtocol _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/JoinGroupResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Bytes; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class JoinGroupResponseData implements ApiMessage { int throttleTimeMs; short errorCode; int generationId; String protocolType; String protocolName; String leader; boolean skipAssignment; String memberId; List<JoinGroupResponseMember> members; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("generation_id", Type.INT32, "The generation ID of the group."), new Field("protocol_name", Type.STRING, "The group protocol selected by the coordinator."), new Field("leader", Type.STRING, "The leader of the group."), new Field("member_id", Type.STRING, "The member ID assigned by the group coordinator."), new Field("members", new ArrayOf(JoinGroupResponseMember.SCHEMA_0), "") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("generation_id", Type.INT32, "The generation ID of the group."), new Field("protocol_name", Type.STRING, "The group protocol selected by the coordinator."), new Field("leader", Type.STRING, "The leader of the group."), new Field("member_id", Type.STRING, "The member ID assigned by the group coordinator."), new Field("members", new ArrayOf(JoinGroupResponseMember.SCHEMA_0), "") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("generation_id", Type.INT32, "The generation ID of the group."), new Field("protocol_name", Type.STRING, "The group protocol selected by the coordinator."), new Field("leader", Type.STRING, "The leader of the group."), new Field("member_id", Type.STRING, "The member ID assigned by the group coordinator."), new Field("members", new ArrayOf(JoinGroupResponseMember.SCHEMA_5), "") ); public static final Schema SCHEMA_6 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("generation_id", Type.INT32, "The generation ID of the group."), new Field("protocol_name", Type.COMPACT_STRING, "The group protocol selected by the coordinator."), new Field("leader", Type.COMPACT_STRING, "The leader of the group."), new Field("member_id", Type.COMPACT_STRING, "The member ID assigned by the group coordinator."), new Field("members", new CompactArrayOf(JoinGroupResponseMember.SCHEMA_6), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("generation_id", Type.INT32, "The generation ID of the group."), new Field("protocol_type", Type.COMPACT_NULLABLE_STRING, "The group protocol name."), new Field("protocol_name", Type.COMPACT_NULLABLE_STRING, "The group protocol selected by the coordinator."), new Field("leader", Type.COMPACT_STRING, "The leader of the group."), new Field("member_id", Type.COMPACT_STRING, "The member ID assigned by the group coordinator."), new Field("members", new CompactArrayOf(JoinGroupResponseMember.SCHEMA_6), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("generation_id", Type.INT32, "The generation ID of the group."), new Field("protocol_type", Type.COMPACT_NULLABLE_STRING, "The group protocol name."), new Field("protocol_name", Type.COMPACT_NULLABLE_STRING, "The group protocol selected by the coordinator."), new Field("leader", Type.COMPACT_STRING, "The leader of the group."), new Field("skip_assignment", Type.BOOLEAN, "True if the leader must skip running the assignment."), new Field("member_id", Type.COMPACT_STRING, "The member ID assigned by the group coordinator."), new Field("members", new CompactArrayOf(JoinGroupResponseMember.SCHEMA_6), ""), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public JoinGroupResponseData(Readable _readable, short _version) { read(_readable, _version); } public JoinGroupResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.generationId = -1; this.protocolType = null; this.protocolName = ""; this.leader = ""; this.skipAssignment = false; this.memberId = ""; this.members = new ArrayList<JoinGroupResponseMember>(0); } @Override public short apiKey() { return 11; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { if (_version >= 2) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } this.errorCode = _readable.readShort(); this.generationId = _readable.readInt(); if (_version >= 7) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.protocolType = null; } else if (length > 0x7fff) { throw new RuntimeException("string field protocolType had invalid length " + length); } else { this.protocolType = _readable.readString(length); } } else { this.protocolType = null; } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { if (_version >= 7) { this.protocolName = null; } else { throw new RuntimeException("non-nullable field protocolName was serialized as null"); } } else if (length > 0x7fff) { throw new RuntimeException("string field protocolName had invalid length " + length); } else { this.protocolName = _readable.readString(length); } } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field leader was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field leader had invalid length " + length); } else { this.leader = _readable.readString(length); } } if (_version >= 9) { this.skipAssignment = _readable.readByte() != 0; } else { this.skipAssignment = false; } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field memberId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field memberId had invalid length " + length); } else { this.memberId = _readable.readString(length); } } { if (_version >= 6) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field members was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<JoinGroupResponseMember> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new JoinGroupResponseMember(_readable, _version)); } this.members = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field members was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<JoinGroupResponseMember> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new JoinGroupResponseMember(_readable, _version)); } this.members = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _writable.writeInt(throttleTimeMs); } _writable.writeShort(errorCode); _writable.writeInt(generationId); if (_version >= 7) { if (protocolType == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(protocolType); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } if (protocolName == null) { if (_version >= 7) { _writable.writeUnsignedVarint(0); } else { throw new NullPointerException(); } } else { byte[] _stringBytes = _cache.getSerializedValue(protocolName); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } { byte[] _stringBytes = _cache.getSerializedValue(leader); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 9) { _writable.writeByte(skipAssignment ? (byte) 1 : (byte) 0); } else { if (this.skipAssignment) { throw new UnsupportedVersionException("Attempted to write a non-default skipAssignment at version " + _version); } } { byte[] _stringBytes = _cache.getSerializedValue(memberId); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 6) { _writable.writeUnsignedVarint(members.size() + 1); for (JoinGroupResponseMember membersElement : members) { membersElement.write(_writable, _cache, _version); } } else { _writable.writeInt(members.size()); for (JoinGroupResponseMember membersElement : members) { membersElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _size.addBytes(4); } _size.addBytes(2); _size.addBytes(4); if (_version >= 7) { if (protocolType == null) { _size.addBytes(1); } else { byte[] _stringBytes = protocolType.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'protocolType' field is too long to be serialized"); } _cache.cacheSerializedValue(protocolType, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } if (protocolName == null) { if (_version >= 6) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = protocolName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'protocolName' field is too long to be serialized"); } _cache.cacheSerializedValue(protocolName, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { byte[] _stringBytes = leader.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'leader' field is too long to be serialized"); } _cache.cacheSerializedValue(leader, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version >= 9) { _size.addBytes(1); } { byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'memberId' field is too long to be serialized"); } _cache.cacheSerializedValue(memberId, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(members.size() + 1)); } else { _size.addBytes(4); } for (JoinGroupResponseMember membersElement : members) { membersElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof JoinGroupResponseData)) return false; JoinGroupResponseData other = (JoinGroupResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (generationId != other.generationId) return false; if (this.protocolType == null) { if (other.protocolType != null) return false; } else { if (!this.protocolType.equals(other.protocolType)) return false; } if (this.protocolName == null) { if (other.protocolName != null) return false; } else { if (!this.protocolName.equals(other.protocolName)) return false; } if (this.leader == null) { if (other.leader != null) return false; } else { if (!this.leader.equals(other.leader)) return false; } if (skipAssignment != other.skipAssignment) return false; if (this.memberId == null) { if (other.memberId != null) return false; } else { if (!this.memberId.equals(other.memberId)) return false; } if (this.members == null) { if (other.members != null) return false; } else { if (!this.members.equals(other.members)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + generationId; hashCode = 31 * hashCode + (protocolType == null ? 0 : protocolType.hashCode()); hashCode = 31 * hashCode + (protocolName == null ? 0 : protocolName.hashCode()); hashCode = 31 * hashCode + (leader == null ? 0 : leader.hashCode()); hashCode = 31 * hashCode + (skipAssignment ? 1231 : 1237); hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode()); hashCode = 31 * hashCode + (members == null ? 0 : members.hashCode()); return hashCode; } @Override public JoinGroupResponseData duplicate() { JoinGroupResponseData _duplicate = new JoinGroupResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; _duplicate.generationId = generationId; if (protocolType == null) { _duplicate.protocolType = null; } else { _duplicate.protocolType = protocolType; } if (protocolName == null) { _duplicate.protocolName = null; } else { _duplicate.protocolName = protocolName; } _duplicate.leader = leader; _duplicate.skipAssignment = skipAssignment; _duplicate.memberId = memberId; ArrayList<JoinGroupResponseMember> newMembers = new ArrayList<JoinGroupResponseMember>(members.size()); for (JoinGroupResponseMember _element : members) { newMembers.add(_element.duplicate()); } _duplicate.members = newMembers; return _duplicate; } @Override public String toString() { return "JoinGroupResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", generationId=" + generationId + ", protocolType=" + ((protocolType == null) ? "null" : "'" + protocolType.toString() + "'") + ", protocolName=" + ((protocolName == null) ? "null" : "'" + protocolName.toString() + "'") + ", leader=" + ((leader == null) ? "null" : "'" + leader.toString() + "'") + ", skipAssignment=" + (skipAssignment ? "true" : "false") + ", memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'") + ", members=" + MessageUtil.deepToString(members.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public int generationId() { return this.generationId; } public String protocolType() { return this.protocolType; } public String protocolName() { return this.protocolName; } public String leader() { return this.leader; } public boolean skipAssignment() { return this.skipAssignment; } public String memberId() { return this.memberId; } public List<JoinGroupResponseMember> members() { return this.members; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public JoinGroupResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public JoinGroupResponseData setErrorCode(short v) { this.errorCode = v; return this; } public JoinGroupResponseData setGenerationId(int v) { this.generationId = v; return this; } public JoinGroupResponseData setProtocolType(String v) { this.protocolType = v; return this; } public JoinGroupResponseData setProtocolName(String v) { this.protocolName = v; return this; } public JoinGroupResponseData setLeader(String v) { this.leader = v; return this; } public JoinGroupResponseData setSkipAssignment(boolean v) { this.skipAssignment = v; return this; } public JoinGroupResponseData setMemberId(String v) { this.memberId = v; return this; } public JoinGroupResponseData setMembers(List<JoinGroupResponseMember> v) { this.members = v; return this; } public static class JoinGroupResponseMember implements Message { String memberId; String groupInstanceId; byte[] metadata; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("member_id", Type.STRING, "The group member ID."), new Field("metadata", Type.BYTES, "The group member metadata.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("member_id", Type.STRING, "The group member ID."), new Field("group_instance_id", Type.NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."), new Field("metadata", Type.BYTES, "The group member metadata.") ); public static final Schema SCHEMA_6 = new Schema( new Field("member_id", Type.COMPACT_STRING, "The group member ID."), new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."), new Field("metadata", Type.COMPACT_BYTES, "The group member metadata."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = SCHEMA_8; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public JoinGroupResponseMember(Readable _readable, short _version) { read(_readable, _version); } public JoinGroupResponseMember() { this.memberId = ""; this.groupInstanceId = null; this.metadata = Bytes.EMPTY; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { if (_version > 9) { throw new UnsupportedVersionException("Can't read version " + _version + " of JoinGroupResponseMember"); } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field memberId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field memberId had invalid length " + length); } else { this.memberId = _readable.readString(length); } } if (_version >= 5) { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.groupInstanceId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field groupInstanceId had invalid length " + length); } else { this.groupInstanceId = _readable.readString(length); } } else { this.groupInstanceId = null; } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readInt(); } if (length < 0) { throw new RuntimeException("non-nullable field metadata was serialized as null"); } else { byte[] newBytes = _readable.readArray(length); this.metadata = newBytes; } } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(memberId); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 5) { if (groupInstanceId == null) { if (_version >= 6) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(groupInstanceId); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } if (_version >= 6) { _writable.writeUnsignedVarint(metadata.length + 1); } else { _writable.writeInt(metadata.length); } _writable.writeByteArray(metadata); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 9) { throw new UnsupportedVersionException("Can't size version " + _version + " of JoinGroupResponseMember"); } { byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'memberId' field is too long to be serialized"); } _cache.cacheSerializedValue(memberId, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version >= 5) { if (groupInstanceId == null) { if (_version >= 6) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = groupInstanceId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupInstanceId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupInstanceId, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } { _size.addBytes(metadata.length); if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(metadata.length + 1)); } else { _size.addBytes(4); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof JoinGroupResponseMember)) return false; JoinGroupResponseMember other = (JoinGroupResponseMember) obj; if (this.memberId == null) { if (other.memberId != null) return false; } else { if (!this.memberId.equals(other.memberId)) return false; } if (this.groupInstanceId == null) { if (other.groupInstanceId != null) return false; } else { if (!this.groupInstanceId.equals(other.groupInstanceId)) return false; } if (!Arrays.equals(this.metadata, other.metadata)) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode()); hashCode = 31 * hashCode + (groupInstanceId == null ? 0 : groupInstanceId.hashCode()); hashCode = 31 * hashCode + Arrays.hashCode(metadata); return hashCode; } @Override public JoinGroupResponseMember duplicate() { JoinGroupResponseMember _duplicate = new JoinGroupResponseMember(); _duplicate.memberId = memberId; if (groupInstanceId == null) { _duplicate.groupInstanceId = null; } else { _duplicate.groupInstanceId = groupInstanceId; } _duplicate.metadata = MessageUtil.duplicate(metadata); return _duplicate; } @Override public String toString() { return "JoinGroupResponseMember(" + "memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'") + ", groupInstanceId=" + ((groupInstanceId == null) ? "null" : "'" + groupInstanceId.toString() + "'") + ", metadata=" + Arrays.toString(metadata) + ")"; } public String memberId() { return this.memberId; } public String groupInstanceId() { return this.groupInstanceId; } public byte[] metadata() { return this.metadata; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public JoinGroupResponseMember setMemberId(String v) { this.memberId = v; return this; } public JoinGroupResponseMember setGroupInstanceId(String v) { this.groupInstanceId = v; return this; } public JoinGroupResponseMember setMetadata(byte[] v) { this.metadata = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/JoinGroupResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import java.util.Arrays; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.JoinGroupResponseData.*; public class JoinGroupResponseDataJsonConverter { public static JoinGroupResponseData read(JsonNode _node, short _version) { JoinGroupResponseData _object = new JoinGroupResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 2) { throw new RuntimeException("JoinGroupResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "JoinGroupResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("JoinGroupResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "JoinGroupResponseData"); } JsonNode _generationIdNode = _node.get("generationId"); if (_generationIdNode == null) { throw new RuntimeException("JoinGroupResponseData: unable to locate field 'generationId', which is mandatory in version " + _version); } else { _object.generationId = MessageUtil.jsonNodeToInt(_generationIdNode, "JoinGroupResponseData"); } JsonNode _protocolTypeNode = _node.get("protocolType"); if (_protocolTypeNode == null) { if (_version >= 7) { throw new RuntimeException("JoinGroupResponseData: unable to locate field 'protocolType', which is mandatory in version " + _version); } else { _object.protocolType = null; } } else { if (_protocolTypeNode.isNull()) { _object.protocolType = null; } else { if (!_protocolTypeNode.isTextual()) { throw new RuntimeException("JoinGroupResponseData expected a string type, but got " + _node.getNodeType()); } _object.protocolType = _protocolTypeNode.asText(); } } JsonNode _protocolNameNode = _node.get("protocolName"); if (_protocolNameNode == null) { throw new RuntimeException("JoinGroupResponseData: unable to locate field 'protocolName', which is mandatory in version " + _version); } else { if (_protocolNameNode.isNull()) { _object.protocolName = null; } else { if (!_protocolNameNode.isTextual()) { throw new RuntimeException("JoinGroupResponseData expected a string type, but got " + _node.getNodeType()); } _object.protocolName = _protocolNameNode.asText(); } } JsonNode _leaderNode = _node.get("leader"); if (_leaderNode == null) { throw new RuntimeException("JoinGroupResponseData: unable to locate field 'leader', which is mandatory in version " + _version); } else { if (!_leaderNode.isTextual()) { throw new RuntimeException("JoinGroupResponseData expected a string type, but got " + _node.getNodeType()); } _object.leader = _leaderNode.asText(); } JsonNode _skipAssignmentNode = _node.get("skipAssignment"); if (_skipAssignmentNode == null) { if (_version >= 9) { throw new RuntimeException("JoinGroupResponseData: unable to locate field 'skipAssignment', which is mandatory in version " + _version); } else { _object.skipAssignment = false; } } else { if (!_skipAssignmentNode.isBoolean()) { throw new RuntimeException("JoinGroupResponseData expected Boolean type, but got " + _node.getNodeType()); } _object.skipAssignment = _skipAssignmentNode.asBoolean(); } JsonNode _memberIdNode = _node.get("memberId"); if (_memberIdNode == null) { throw new RuntimeException("JoinGroupResponseData: unable to locate field 'memberId', which is mandatory in version " + _version); } else { if (!_memberIdNode.isTextual()) { throw new RuntimeException("JoinGroupResponseData expected a string type, but got " + _node.getNodeType()); } _object.memberId = _memberIdNode.asText(); } JsonNode _membersNode = _node.get("members"); if (_membersNode == null) { throw new RuntimeException("JoinGroupResponseData: unable to locate field 'members', which is mandatory in version " + _version); } else { if (!_membersNode.isArray()) { throw new RuntimeException("JoinGroupResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<JoinGroupResponseMember> _collection = new ArrayList<JoinGroupResponseMember>(_membersNode.size()); _object.members = _collection; for (JsonNode _element : _membersNode) { _collection.add(JoinGroupResponseMemberJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(JoinGroupResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 2) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("generationId", new IntNode(_object.generationId)); if (_version >= 7) { if (_object.protocolType == null) { _node.set("protocolType", NullNode.instance); } else { _node.set("protocolType", new TextNode(_object.protocolType)); } } if (_object.protocolName == null) { _node.set("protocolName", NullNode.instance); } else { _node.set("protocolName", new TextNode(_object.protocolName)); } _node.set("leader", new TextNode(_object.leader)); if (_version >= 9) { _node.set("skipAssignment", BooleanNode.valueOf(_object.skipAssignment)); } else { if (_object.skipAssignment) { throw new UnsupportedVersionException("Attempted to write a non-default skipAssignment at version " + _version); } } _node.set("memberId", new TextNode(_object.memberId)); ArrayNode _membersArray = new ArrayNode(JsonNodeFactory.instance); for (JoinGroupResponseMember _element : _object.members) { _membersArray.add(JoinGroupResponseMemberJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("members", _membersArray); return _node; } public static JsonNode write(JoinGroupResponseData _object, short _version) { return write(_object, _version, true); } public static class JoinGroupResponseMemberJsonConverter { public static JoinGroupResponseMember read(JsonNode _node, short _version) { JoinGroupResponseMember _object = new JoinGroupResponseMember(); JsonNode _memberIdNode = _node.get("memberId"); if (_memberIdNode == null) { throw new RuntimeException("JoinGroupResponseMember: unable to locate field 'memberId', which is mandatory in version " + _version); } else { if (!_memberIdNode.isTextual()) { throw new RuntimeException("JoinGroupResponseMember expected a string type, but got " + _node.getNodeType()); } _object.memberId = _memberIdNode.asText(); } JsonNode _groupInstanceIdNode = _node.get("groupInstanceId"); if (_groupInstanceIdNode == null) { if (_version >= 5) { throw new RuntimeException("JoinGroupResponseMember: unable to locate field 'groupInstanceId', which is mandatory in version " + _version); } else { _object.groupInstanceId = null; } } else { if (_groupInstanceIdNode.isNull()) { _object.groupInstanceId = null; } else { if (!_groupInstanceIdNode.isTextual()) { throw new RuntimeException("JoinGroupResponseMember expected a string type, but got " + _node.getNodeType()); } _object.groupInstanceId = _groupInstanceIdNode.asText(); } } JsonNode _metadataNode = _node.get("metadata"); if (_metadataNode == null) { throw new RuntimeException("JoinGroupResponseMember: unable to locate field 'metadata', which is mandatory in version " + _version); } else { _object.metadata = MessageUtil.jsonNodeToBinary(_metadataNode, "JoinGroupResponseMember"); } return _object; } public static JsonNode write(JoinGroupResponseMember _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("memberId", new TextNode(_object.memberId)); if (_version >= 5) { if (_object.groupInstanceId == null) { _node.set("groupInstanceId", NullNode.instance); } else { _node.set("groupInstanceId", new TextNode(_object.groupInstanceId)); } } _node.set("metadata", new BinaryNode(Arrays.copyOf(_object.metadata, _object.metadata.length))); return _node; } public static JsonNode write(JoinGroupResponseMember _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaderAndIsrRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class LeaderAndIsrRequestData implements ApiMessage { int controllerId; boolean isKRaftController; int controllerEpoch; long brokerEpoch; byte type; List<LeaderAndIsrPartitionState> ungroupedPartitionStates; List<LeaderAndIsrTopicState> topicStates; List<LeaderAndIsrLiveLeader> liveLeaders; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("controller_id", Type.INT32, "The current controller ID."), new Field("controller_epoch", Type.INT32, "The current controller epoch."), new Field("ungrouped_partition_states", new ArrayOf(LeaderAndIsrPartitionState.SCHEMA_0), "The state of each partition, in a v0 or v1 message."), new Field("live_leaders", new ArrayOf(LeaderAndIsrLiveLeader.SCHEMA_0), "The current live leaders.") ); public static final Schema SCHEMA_1 = new Schema( new Field("controller_id", Type.INT32, "The current controller ID."), new Field("controller_epoch", Type.INT32, "The current controller epoch."), new Field("ungrouped_partition_states", new ArrayOf(LeaderAndIsrPartitionState.SCHEMA_1), "The state of each partition, in a v0 or v1 message."), new Field("live_leaders", new ArrayOf(LeaderAndIsrLiveLeader.SCHEMA_0), "The current live leaders.") ); public static final Schema SCHEMA_2 = new Schema( new Field("controller_id", Type.INT32, "The current controller ID."), new Field("controller_epoch", Type.INT32, "The current controller epoch."), new Field("broker_epoch", Type.INT64, "The current broker epoch."), new Field("topic_states", new ArrayOf(LeaderAndIsrTopicState.SCHEMA_2), "Each topic."), new Field("live_leaders", new ArrayOf(LeaderAndIsrLiveLeader.SCHEMA_0), "The current live leaders.") ); public static final Schema SCHEMA_3 = new Schema( new Field("controller_id", Type.INT32, "The current controller ID."), new Field("controller_epoch", Type.INT32, "The current controller epoch."), new Field("broker_epoch", Type.INT64, "The current broker epoch."), new Field("topic_states", new ArrayOf(LeaderAndIsrTopicState.SCHEMA_3), "Each topic."), new Field("live_leaders", new ArrayOf(LeaderAndIsrLiveLeader.SCHEMA_0), "The current live leaders.") ); public static final Schema SCHEMA_4 = new Schema( new Field("controller_id", Type.INT32, "The current controller ID."), new Field("controller_epoch", Type.INT32, "The current controller epoch."), new Field("broker_epoch", Type.INT64, "The current broker epoch."), new Field("topic_states", new CompactArrayOf(LeaderAndIsrTopicState.SCHEMA_4), "Each topic."), new Field("live_leaders", new CompactArrayOf(LeaderAndIsrLiveLeader.SCHEMA_4), "The current live leaders."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = new Schema( new Field("controller_id", Type.INT32, "The current controller ID."), new Field("controller_epoch", Type.INT32, "The current controller epoch."), new Field("broker_epoch", Type.INT64, "The current broker epoch."), new Field("type", Type.INT8, "The type that indicates whether all topics are included in the request"), new Field("topic_states", new CompactArrayOf(LeaderAndIsrTopicState.SCHEMA_5), "Each topic."), new Field("live_leaders", new CompactArrayOf(LeaderAndIsrLiveLeader.SCHEMA_4), "The current live leaders."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_6 = new Schema( new Field("controller_id", Type.INT32, "The current controller ID."), new Field("controller_epoch", Type.INT32, "The current controller epoch."), new Field("broker_epoch", Type.INT64, "The current broker epoch."), new Field("type", Type.INT8, "The type that indicates whether all topics are included in the request"), new Field("topic_states", new CompactArrayOf(LeaderAndIsrTopicState.SCHEMA_6), "Each topic."), new Field("live_leaders", new CompactArrayOf(LeaderAndIsrLiveLeader.SCHEMA_4), "The current live leaders."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = new Schema( new Field("controller_id", Type.INT32, "The current controller ID."), new Field("is_kraft_controller", Type.BOOLEAN, "If KRaft controller id is used during migration. See KIP-866"), new Field("controller_epoch", Type.INT32, "The current controller epoch."), new Field("broker_epoch", Type.INT64, "The current broker epoch."), new Field("type", Type.INT8, "The type that indicates whether all topics are included in the request"), new Field("topic_states", new CompactArrayOf(LeaderAndIsrTopicState.SCHEMA_6), "Each topic."), new Field("live_leaders", new CompactArrayOf(LeaderAndIsrLiveLeader.SCHEMA_4), "The current live leaders."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 7; public LeaderAndIsrRequestData(Readable _readable, short _version) { read(_readable, _version); } public LeaderAndIsrRequestData() { this.controllerId = 0; this.isKRaftController = false; this.controllerEpoch = 0; this.brokerEpoch = -1L; this.type = (byte) 0; this.ungroupedPartitionStates = new ArrayList<LeaderAndIsrPartitionState>(0); this.topicStates = new ArrayList<LeaderAndIsrTopicState>(0); this.liveLeaders = new ArrayList<LeaderAndIsrLiveLeader>(0); } @Override public short apiKey() { return 4; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 7; } @Override public void read(Readable _readable, short _version) { this.controllerId = _readable.readInt(); if (_version >= 7) { this.isKRaftController = _readable.readByte() != 0; } else { this.isKRaftController = false; } this.controllerEpoch = _readable.readInt(); if (_version >= 2) { this.brokerEpoch = _readable.readLong(); } else { this.brokerEpoch = -1L; } if (_version >= 5) { this.type = _readable.readByte(); } else { this.type = (byte) 0; } if (_version <= 1) { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field ungroupedPartitionStates was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrPartitionState> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrPartitionState(_readable, _version)); } this.ungroupedPartitionStates = newCollection; } } else { this.ungroupedPartitionStates = new ArrayList<LeaderAndIsrPartitionState>(0); } if (_version >= 2) { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topicStates was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrTopicState> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrTopicState(_readable, _version)); } this.topicStates = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topicStates was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrTopicState> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrTopicState(_readable, _version)); } this.topicStates = newCollection; } } } else { this.topicStates = new ArrayList<LeaderAndIsrTopicState>(0); } { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field liveLeaders was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrLiveLeader> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrLiveLeader(_readable, _version)); } this.liveLeaders = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field liveLeaders was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrLiveLeader> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrLiveLeader(_readable, _version)); } this.liveLeaders = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(controllerId); if (_version >= 7) { _writable.writeByte(isKRaftController ? (byte) 1 : (byte) 0); } else { if (this.isKRaftController) { throw new UnsupportedVersionException("Attempted to write a non-default isKRaftController at version " + _version); } } _writable.writeInt(controllerEpoch); if (_version >= 2) { _writable.writeLong(brokerEpoch); } if (_version >= 5) { _writable.writeByte(type); } else { if (this.type != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default type at version " + _version); } } if (_version <= 1) { _writable.writeInt(ungroupedPartitionStates.size()); for (LeaderAndIsrPartitionState ungroupedPartitionStatesElement : ungroupedPartitionStates) { ungroupedPartitionStatesElement.write(_writable, _cache, _version); } } else { if (!this.ungroupedPartitionStates.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default ungroupedPartitionStates at version " + _version); } } if (_version >= 2) { if (_version >= 4) { _writable.writeUnsignedVarint(topicStates.size() + 1); for (LeaderAndIsrTopicState topicStatesElement : topicStates) { topicStatesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topicStates.size()); for (LeaderAndIsrTopicState topicStatesElement : topicStates) { topicStatesElement.write(_writable, _cache, _version); } } } else { if (!this.topicStates.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default topicStates at version " + _version); } } if (_version >= 4) { _writable.writeUnsignedVarint(liveLeaders.size() + 1); for (LeaderAndIsrLiveLeader liveLeadersElement : liveLeaders) { liveLeadersElement.write(_writable, _cache, _version); } } else { _writable.writeInt(liveLeaders.size()); for (LeaderAndIsrLiveLeader liveLeadersElement : liveLeaders) { liveLeadersElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); if (_version >= 7) { _size.addBytes(1); } _size.addBytes(4); if (_version >= 2) { _size.addBytes(8); } if (_version >= 5) { _size.addBytes(1); } if (_version <= 1) { { _size.addBytes(4); for (LeaderAndIsrPartitionState ungroupedPartitionStatesElement : ungroupedPartitionStates) { ungroupedPartitionStatesElement.addSize(_size, _cache, _version); } } } if (_version >= 2) { { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topicStates.size() + 1)); } else { _size.addBytes(4); } for (LeaderAndIsrTopicState topicStatesElement : topicStates) { topicStatesElement.addSize(_size, _cache, _version); } } } { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(liveLeaders.size() + 1)); } else { _size.addBytes(4); } for (LeaderAndIsrLiveLeader liveLeadersElement : liveLeaders) { liveLeadersElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderAndIsrRequestData)) return false; LeaderAndIsrRequestData other = (LeaderAndIsrRequestData) obj; if (controllerId != other.controllerId) return false; if (isKRaftController != other.isKRaftController) return false; if (controllerEpoch != other.controllerEpoch) return false; if (brokerEpoch != other.brokerEpoch) return false; if (type != other.type) return false; if (this.ungroupedPartitionStates == null) { if (other.ungroupedPartitionStates != null) return false; } else { if (!this.ungroupedPartitionStates.equals(other.ungroupedPartitionStates)) return false; } if (this.topicStates == null) { if (other.topicStates != null) return false; } else { if (!this.topicStates.equals(other.topicStates)) return false; } if (this.liveLeaders == null) { if (other.liveLeaders != null) return false; } else { if (!this.liveLeaders.equals(other.liveLeaders)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + controllerId; hashCode = 31 * hashCode + (isKRaftController ? 1231 : 1237); hashCode = 31 * hashCode + controllerEpoch; hashCode = 31 * hashCode + ((int) (brokerEpoch >> 32) ^ (int) brokerEpoch); hashCode = 31 * hashCode + type; hashCode = 31 * hashCode + (ungroupedPartitionStates == null ? 0 : ungroupedPartitionStates.hashCode()); hashCode = 31 * hashCode + (topicStates == null ? 0 : topicStates.hashCode()); hashCode = 31 * hashCode + (liveLeaders == null ? 0 : liveLeaders.hashCode()); return hashCode; } @Override public LeaderAndIsrRequestData duplicate() { LeaderAndIsrRequestData _duplicate = new LeaderAndIsrRequestData(); _duplicate.controllerId = controllerId; _duplicate.isKRaftController = isKRaftController; _duplicate.controllerEpoch = controllerEpoch; _duplicate.brokerEpoch = brokerEpoch; _duplicate.type = type; ArrayList<LeaderAndIsrPartitionState> newUngroupedPartitionStates = new ArrayList<LeaderAndIsrPartitionState>(ungroupedPartitionStates.size()); for (LeaderAndIsrPartitionState _element : ungroupedPartitionStates) { newUngroupedPartitionStates.add(_element.duplicate()); } _duplicate.ungroupedPartitionStates = newUngroupedPartitionStates; ArrayList<LeaderAndIsrTopicState> newTopicStates = new ArrayList<LeaderAndIsrTopicState>(topicStates.size()); for (LeaderAndIsrTopicState _element : topicStates) { newTopicStates.add(_element.duplicate()); } _duplicate.topicStates = newTopicStates; ArrayList<LeaderAndIsrLiveLeader> newLiveLeaders = new ArrayList<LeaderAndIsrLiveLeader>(liveLeaders.size()); for (LeaderAndIsrLiveLeader _element : liveLeaders) { newLiveLeaders.add(_element.duplicate()); } _duplicate.liveLeaders = newLiveLeaders; return _duplicate; } @Override public String toString() { return "LeaderAndIsrRequestData(" + "controllerId=" + controllerId + ", isKRaftController=" + (isKRaftController ? "true" : "false") + ", controllerEpoch=" + controllerEpoch + ", brokerEpoch=" + brokerEpoch + ", type=" + type + ", ungroupedPartitionStates=" + MessageUtil.deepToString(ungroupedPartitionStates.iterator()) + ", topicStates=" + MessageUtil.deepToString(topicStates.iterator()) + ", liveLeaders=" + MessageUtil.deepToString(liveLeaders.iterator()) + ")"; } public int controllerId() { return this.controllerId; } public boolean isKRaftController() { return this.isKRaftController; } public int controllerEpoch() { return this.controllerEpoch; } public long brokerEpoch() { return this.brokerEpoch; } public byte type() { return this.type; } public List<LeaderAndIsrPartitionState> ungroupedPartitionStates() { return this.ungroupedPartitionStates; } public List<LeaderAndIsrTopicState> topicStates() { return this.topicStates; } public List<LeaderAndIsrLiveLeader> liveLeaders() { return this.liveLeaders; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderAndIsrRequestData setControllerId(int v) { this.controllerId = v; return this; } public LeaderAndIsrRequestData setIsKRaftController(boolean v) { this.isKRaftController = v; return this; } public LeaderAndIsrRequestData setControllerEpoch(int v) { this.controllerEpoch = v; return this; } public LeaderAndIsrRequestData setBrokerEpoch(long v) { this.brokerEpoch = v; return this; } public LeaderAndIsrRequestData setType(byte v) { this.type = v; return this; } public LeaderAndIsrRequestData setUngroupedPartitionStates(List<LeaderAndIsrPartitionState> v) { this.ungroupedPartitionStates = v; return this; } public LeaderAndIsrRequestData setTopicStates(List<LeaderAndIsrTopicState> v) { this.topicStates = v; return this; } public LeaderAndIsrRequestData setLiveLeaders(List<LeaderAndIsrLiveLeader> v) { this.liveLeaders = v; return this; } public static class LeaderAndIsrTopicState implements Message { String topicName; Uuid topicId; List<LeaderAndIsrPartitionState> partitionStates; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_2 = new Schema( new Field("topic_name", Type.STRING, "The topic name."), new Field("partition_states", new ArrayOf(LeaderAndIsrPartitionState.SCHEMA_2), "The state of each partition") ); public static final Schema SCHEMA_3 = new Schema( new Field("topic_name", Type.STRING, "The topic name."), new Field("partition_states", new ArrayOf(LeaderAndIsrPartitionState.SCHEMA_3), "The state of each partition") ); public static final Schema SCHEMA_4 = new Schema( new Field("topic_name", Type.COMPACT_STRING, "The topic name."), new Field("partition_states", new CompactArrayOf(LeaderAndIsrPartitionState.SCHEMA_4), "The state of each partition"), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = new Schema( new Field("topic_name", Type.COMPACT_STRING, "The topic name."), new Field("topic_id", Type.UUID, "The unique topic ID."), new Field("partition_states", new CompactArrayOf(LeaderAndIsrPartitionState.SCHEMA_4), "The state of each partition"), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_6 = new Schema( new Field("topic_name", Type.COMPACT_STRING, "The topic name."), new Field("topic_id", Type.UUID, "The unique topic ID."), new Field("partition_states", new CompactArrayOf(LeaderAndIsrPartitionState.SCHEMA_6), "The state of each partition"), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema[] SCHEMAS = new Schema[] { null, null, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 2; public static final short HIGHEST_SUPPORTED_VERSION = 7; public LeaderAndIsrTopicState(Readable _readable, short _version) { read(_readable, _version); } public LeaderAndIsrTopicState() { this.topicName = ""; this.topicId = Uuid.ZERO_UUID; this.partitionStates = new ArrayList<LeaderAndIsrPartitionState>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 7; } @Override public void read(Readable _readable, short _version) { if (_version > 7) { throw new UnsupportedVersionException("Can't read version " + _version + " of LeaderAndIsrTopicState"); } { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field topicName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topicName had invalid length " + length); } else { this.topicName = _readable.readString(length); } } if (_version >= 5) { this.topicId = _readable.readUuid(); } else { this.topicId = Uuid.ZERO_UUID; } { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionStates was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrPartitionState> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrPartitionState(_readable, _version)); } this.partitionStates = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionStates was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrPartitionState> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrPartitionState(_readable, _version)); } this.partitionStates = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 2) { throw new UnsupportedVersionException("Can't write version " + _version + " of LeaderAndIsrTopicState"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(topicName); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 5) { _writable.writeUuid(topicId); } if (_version >= 4) { _writable.writeUnsignedVarint(partitionStates.size() + 1); for (LeaderAndIsrPartitionState partitionStatesElement : partitionStates) { partitionStatesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitionStates.size()); for (LeaderAndIsrPartitionState partitionStatesElement : partitionStates) { partitionStatesElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 7) { throw new UnsupportedVersionException("Can't size version " + _version + " of LeaderAndIsrTopicState"); } { byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topicName' field is too long to be serialized"); } _cache.cacheSerializedValue(topicName, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version >= 5) { _size.addBytes(16); } { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionStates.size() + 1)); } else { _size.addBytes(4); } for (LeaderAndIsrPartitionState partitionStatesElement : partitionStates) { partitionStatesElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderAndIsrTopicState)) return false; LeaderAndIsrTopicState other = (LeaderAndIsrTopicState) obj; if (this.topicName == null) { if (other.topicName != null) return false; } else { if (!this.topicName.equals(other.topicName)) return false; } if (!this.topicId.equals(other.topicId)) return false; if (this.partitionStates == null) { if (other.partitionStates != null) return false; } else { if (!this.partitionStates.equals(other.partitionStates)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode()); hashCode = 31 * hashCode + topicId.hashCode(); hashCode = 31 * hashCode + (partitionStates == null ? 0 : partitionStates.hashCode()); return hashCode; } @Override public LeaderAndIsrTopicState duplicate() { LeaderAndIsrTopicState _duplicate = new LeaderAndIsrTopicState(); _duplicate.topicName = topicName; _duplicate.topicId = topicId; ArrayList<LeaderAndIsrPartitionState> newPartitionStates = new ArrayList<LeaderAndIsrPartitionState>(partitionStates.size()); for (LeaderAndIsrPartitionState _element : partitionStates) { newPartitionStates.add(_element.duplicate()); } _duplicate.partitionStates = newPartitionStates; return _duplicate; } @Override public String toString() { return "LeaderAndIsrTopicState(" + "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'") + ", topicId=" + topicId.toString() + ", partitionStates=" + MessageUtil.deepToString(partitionStates.iterator()) + ")"; } public String topicName() { return this.topicName; } public Uuid topicId() { return this.topicId; } public List<LeaderAndIsrPartitionState> partitionStates() { return this.partitionStates; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderAndIsrTopicState setTopicName(String v) { this.topicName = v; return this; } public LeaderAndIsrTopicState setTopicId(Uuid v) { this.topicId = v; return this; } public LeaderAndIsrTopicState setPartitionStates(List<LeaderAndIsrPartitionState> v) { this.partitionStates = v; return this; } } public static class LeaderAndIsrLiveLeader implements Message { int brokerId; String hostName; int port; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("broker_id", Type.INT32, "The leader's broker ID."), new Field("host_name", Type.STRING, "The leader's hostname."), new Field("port", Type.INT32, "The leader's port.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("broker_id", Type.INT32, "The leader's broker ID."), new Field("host_name", Type.COMPACT_STRING, "The leader's hostname."), new Field("port", Type.INT32, "The leader's port."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 7; public LeaderAndIsrLiveLeader(Readable _readable, short _version) { read(_readable, _version); } public LeaderAndIsrLiveLeader() { this.brokerId = 0; this.hostName = ""; this.port = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 7; } @Override public void read(Readable _readable, short _version) { if (_version > 7) { throw new UnsupportedVersionException("Can't read version " + _version + " of LeaderAndIsrLiveLeader"); } this.brokerId = _readable.readInt(); { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field hostName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field hostName had invalid length " + length); } else { this.hostName = _readable.readString(length); } } this.port = _readable.readInt(); this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(brokerId); { byte[] _stringBytes = _cache.getSerializedValue(hostName); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeInt(port); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 7) { throw new UnsupportedVersionException("Can't size version " + _version + " of LeaderAndIsrLiveLeader"); } _size.addBytes(4); { byte[] _stringBytes = hostName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'hostName' field is too long to be serialized"); } _cache.cacheSerializedValue(hostName, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderAndIsrLiveLeader)) return false; LeaderAndIsrLiveLeader other = (LeaderAndIsrLiveLeader) obj; if (brokerId != other.brokerId) return false; if (this.hostName == null) { if (other.hostName != null) return false; } else { if (!this.hostName.equals(other.hostName)) return false; } if (port != other.port) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + brokerId; hashCode = 31 * hashCode + (hostName == null ? 0 : hostName.hashCode()); hashCode = 31 * hashCode + port; return hashCode; } @Override public LeaderAndIsrLiveLeader duplicate() { LeaderAndIsrLiveLeader _duplicate = new LeaderAndIsrLiveLeader(); _duplicate.brokerId = brokerId; _duplicate.hostName = hostName; _duplicate.port = port; return _duplicate; } @Override public String toString() { return "LeaderAndIsrLiveLeader(" + "brokerId=" + brokerId + ", hostName=" + ((hostName == null) ? "null" : "'" + hostName.toString() + "'") + ", port=" + port + ")"; } public int brokerId() { return this.brokerId; } public String hostName() { return this.hostName; } public int port() { return this.port; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderAndIsrLiveLeader setBrokerId(int v) { this.brokerId = v; return this; } public LeaderAndIsrLiveLeader setHostName(String v) { this.hostName = v; return this; } public LeaderAndIsrLiveLeader setPort(int v) { this.port = v; return this; } } public static class LeaderAndIsrPartitionState implements Message { String topicName; int partitionIndex; int controllerEpoch; int leader; int leaderEpoch; List<Integer> isr; int partitionEpoch; List<Integer> replicas; List<Integer> addingReplicas; List<Integer> removingReplicas; boolean isNew; byte leaderRecoveryState; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topic_name", Type.STRING, "The topic name. This is only present in v0 or v1."), new Field("partition_index", Type.INT32, "The partition index."), new Field("controller_epoch", Type.INT32, "The controller epoch."), new Field("leader", Type.INT32, "The broker ID of the leader."), new Field("leader_epoch", Type.INT32, "The leader epoch."), new Field("isr", new ArrayOf(Type.INT32), "The in-sync replica IDs."), new Field("partition_epoch", Type.INT32, "The current epoch for the partition. The epoch is a monotonically increasing value which is incremented after every partition change. (Since the LeaderAndIsr request is only used by the legacy controller, this corresponds to the zkVersion)"), new Field("replicas", new ArrayOf(Type.INT32), "The replica IDs.") ); public static final Schema SCHEMA_1 = new Schema( new Field("topic_name", Type.STRING, "The topic name. This is only present in v0 or v1."), new Field("partition_index", Type.INT32, "The partition index."), new Field("controller_epoch", Type.INT32, "The controller epoch."), new Field("leader", Type.INT32, "The broker ID of the leader."), new Field("leader_epoch", Type.INT32, "The leader epoch."), new Field("isr", new ArrayOf(Type.INT32), "The in-sync replica IDs."), new Field("partition_epoch", Type.INT32, "The current epoch for the partition. The epoch is a monotonically increasing value which is incremented after every partition change. (Since the LeaderAndIsr request is only used by the legacy controller, this corresponds to the zkVersion)"), new Field("replicas", new ArrayOf(Type.INT32), "The replica IDs."), new Field("is_new", Type.BOOLEAN, "Whether the replica should have existed on the broker or not.") ); public static final Schema SCHEMA_2 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("controller_epoch", Type.INT32, "The controller epoch."), new Field("leader", Type.INT32, "The broker ID of the leader."), new Field("leader_epoch", Type.INT32, "The leader epoch."), new Field("isr", new ArrayOf(Type.INT32), "The in-sync replica IDs."), new Field("partition_epoch", Type.INT32, "The current epoch for the partition. The epoch is a monotonically increasing value which is incremented after every partition change. (Since the LeaderAndIsr request is only used by the legacy controller, this corresponds to the zkVersion)"), new Field("replicas", new ArrayOf(Type.INT32), "The replica IDs."), new Field("is_new", Type.BOOLEAN, "Whether the replica should have existed on the broker or not.") ); public static final Schema SCHEMA_3 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("controller_epoch", Type.INT32, "The controller epoch."), new Field("leader", Type.INT32, "The broker ID of the leader."), new Field("leader_epoch", Type.INT32, "The leader epoch."), new Field("isr", new ArrayOf(Type.INT32), "The in-sync replica IDs."), new Field("partition_epoch", Type.INT32, "The current epoch for the partition. The epoch is a monotonically increasing value which is incremented after every partition change. (Since the LeaderAndIsr request is only used by the legacy controller, this corresponds to the zkVersion)"), new Field("replicas", new ArrayOf(Type.INT32), "The replica IDs."), new Field("adding_replicas", new ArrayOf(Type.INT32), "The replica IDs that we are adding this partition to, or null if no replicas are being added."), new Field("removing_replicas", new ArrayOf(Type.INT32), "The replica IDs that we are removing this partition from, or null if no replicas are being removed."), new Field("is_new", Type.BOOLEAN, "Whether the replica should have existed on the broker or not.") ); public static final Schema SCHEMA_4 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("controller_epoch", Type.INT32, "The controller epoch."), new Field("leader", Type.INT32, "The broker ID of the leader."), new Field("leader_epoch", Type.INT32, "The leader epoch."), new Field("isr", new CompactArrayOf(Type.INT32), "The in-sync replica IDs."), new Field("partition_epoch", Type.INT32, "The current epoch for the partition. The epoch is a monotonically increasing value which is incremented after every partition change. (Since the LeaderAndIsr request is only used by the legacy controller, this corresponds to the zkVersion)"), new Field("replicas", new CompactArrayOf(Type.INT32), "The replica IDs."), new Field("adding_replicas", new CompactArrayOf(Type.INT32), "The replica IDs that we are adding this partition to, or null if no replicas are being added."), new Field("removing_replicas", new CompactArrayOf(Type.INT32), "The replica IDs that we are removing this partition from, or null if no replicas are being removed."), new Field("is_new", Type.BOOLEAN, "Whether the replica should have existed on the broker or not."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("controller_epoch", Type.INT32, "The controller epoch."), new Field("leader", Type.INT32, "The broker ID of the leader."), new Field("leader_epoch", Type.INT32, "The leader epoch."), new Field("isr", new CompactArrayOf(Type.INT32), "The in-sync replica IDs."), new Field("partition_epoch", Type.INT32, "The current epoch for the partition. The epoch is a monotonically increasing value which is incremented after every partition change. (Since the LeaderAndIsr request is only used by the legacy controller, this corresponds to the zkVersion)"), new Field("replicas", new CompactArrayOf(Type.INT32), "The replica IDs."), new Field("adding_replicas", new CompactArrayOf(Type.INT32), "The replica IDs that we are adding this partition to, or null if no replicas are being added."), new Field("removing_replicas", new CompactArrayOf(Type.INT32), "The replica IDs that we are removing this partition from, or null if no replicas are being removed."), new Field("is_new", Type.BOOLEAN, "Whether the replica should have existed on the broker or not."), new Field("leader_recovery_state", Type.INT8, "1 if the partition is recovering from an unclean leader election; 0 otherwise."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 7; public LeaderAndIsrPartitionState(Readable _readable, short _version) { read(_readable, _version); } public LeaderAndIsrPartitionState() { this.topicName = ""; this.partitionIndex = 0; this.controllerEpoch = 0; this.leader = 0; this.leaderEpoch = 0; this.isr = new ArrayList<Integer>(0); this.partitionEpoch = 0; this.replicas = new ArrayList<Integer>(0); this.addingReplicas = new ArrayList<Integer>(0); this.removingReplicas = new ArrayList<Integer>(0); this.isNew = false; this.leaderRecoveryState = (byte) 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 32767; } @Override public void read(Readable _readable, short _version) { if (_version <= 1) { int length; length = _readable.readShort(); if (length < 0) { throw new RuntimeException("non-nullable field topicName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topicName had invalid length " + length); } else { this.topicName = _readable.readString(length); } } else { this.topicName = ""; } this.partitionIndex = _readable.readInt(); this.controllerEpoch = _readable.readInt(); this.leader = _readable.readInt(); this.leaderEpoch = _readable.readInt(); { int arrayLength; if (_version >= 4) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field isr was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.isr = newCollection; } } this.partitionEpoch = _readable.readInt(); { int arrayLength; if (_version >= 4) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field replicas was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.replicas = newCollection; } } if (_version >= 3) { int arrayLength; if (_version >= 4) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field addingReplicas was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.addingReplicas = newCollection; } } else { this.addingReplicas = new ArrayList<Integer>(0); } if (_version >= 3) { int arrayLength; if (_version >= 4) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field removingReplicas was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.removingReplicas = newCollection; } } else { this.removingReplicas = new ArrayList<Integer>(0); } if (_version >= 1) { this.isNew = _readable.readByte() != 0; } else { this.isNew = false; } if (_version >= 6) { this.leaderRecoveryState = _readable.readByte(); } else { this.leaderRecoveryState = (byte) 0; } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 1) { { byte[] _stringBytes = _cache.getSerializedValue(topicName); _writable.writeShort((short) _stringBytes.length); _writable.writeByteArray(_stringBytes); } } _writable.writeInt(partitionIndex); _writable.writeInt(controllerEpoch); _writable.writeInt(leader); _writable.writeInt(leaderEpoch); if (_version >= 4) { _writable.writeUnsignedVarint(isr.size() + 1); } else { _writable.writeInt(isr.size()); } for (Integer isrElement : isr) { _writable.writeInt(isrElement); } _writable.writeInt(partitionEpoch); if (_version >= 4) { _writable.writeUnsignedVarint(replicas.size() + 1); } else { _writable.writeInt(replicas.size()); } for (Integer replicasElement : replicas) { _writable.writeInt(replicasElement); } if (_version >= 3) { if (_version >= 4) { _writable.writeUnsignedVarint(addingReplicas.size() + 1); } else { _writable.writeInt(addingReplicas.size()); } for (Integer addingReplicasElement : addingReplicas) { _writable.writeInt(addingReplicasElement); } } if (_version >= 3) { if (_version >= 4) { _writable.writeUnsignedVarint(removingReplicas.size() + 1); } else { _writable.writeInt(removingReplicas.size()); } for (Integer removingReplicasElement : removingReplicas) { _writable.writeInt(removingReplicasElement); } } if (_version >= 1) { _writable.writeByte(isNew ? (byte) 1 : (byte) 0); } if (_version >= 6) { _writable.writeByte(leaderRecoveryState); } else { if (this.leaderRecoveryState != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default leaderRecoveryState at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 1) { { byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topicName' field is too long to be serialized"); } _cache.cacheSerializedValue(topicName, _stringBytes); _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(4); _size.addBytes(4); _size.addBytes(4); _size.addBytes(4); { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(isr.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(isr.size() * 4); } _size.addBytes(4); { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(replicas.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(replicas.size() * 4); } if (_version >= 3) { { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(addingReplicas.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(addingReplicas.size() * 4); } } if (_version >= 3) { { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(removingReplicas.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(removingReplicas.size() * 4); } } if (_version >= 1) { _size.addBytes(1); } if (_version >= 6) { _size.addBytes(1); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderAndIsrPartitionState)) return false; LeaderAndIsrPartitionState other = (LeaderAndIsrPartitionState) obj; if (this.topicName == null) { if (other.topicName != null) return false; } else { if (!this.topicName.equals(other.topicName)) return false; } if (partitionIndex != other.partitionIndex) return false; if (controllerEpoch != other.controllerEpoch) return false; if (leader != other.leader) return false; if (leaderEpoch != other.leaderEpoch) return false; if (this.isr == null) { if (other.isr != null) return false; } else { if (!this.isr.equals(other.isr)) return false; } if (partitionEpoch != other.partitionEpoch) return false; if (this.replicas == null) { if (other.replicas != null) return false; } else { if (!this.replicas.equals(other.replicas)) return false; } if (this.addingReplicas == null) { if (other.addingReplicas != null) return false; } else { if (!this.addingReplicas.equals(other.addingReplicas)) return false; } if (this.removingReplicas == null) { if (other.removingReplicas != null) return false; } else { if (!this.removingReplicas.equals(other.removingReplicas)) return false; } if (isNew != other.isNew) return false; if (leaderRecoveryState != other.leaderRecoveryState) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode()); hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + controllerEpoch; hashCode = 31 * hashCode + leader; hashCode = 31 * hashCode + leaderEpoch; hashCode = 31 * hashCode + (isr == null ? 0 : isr.hashCode()); hashCode = 31 * hashCode + partitionEpoch; hashCode = 31 * hashCode + (replicas == null ? 0 : replicas.hashCode()); hashCode = 31 * hashCode + (addingReplicas == null ? 0 : addingReplicas.hashCode()); hashCode = 31 * hashCode + (removingReplicas == null ? 0 : removingReplicas.hashCode()); hashCode = 31 * hashCode + (isNew ? 1231 : 1237); hashCode = 31 * hashCode + leaderRecoveryState; return hashCode; } @Override public LeaderAndIsrPartitionState duplicate() { LeaderAndIsrPartitionState _duplicate = new LeaderAndIsrPartitionState(); _duplicate.topicName = topicName; _duplicate.partitionIndex = partitionIndex; _duplicate.controllerEpoch = controllerEpoch; _duplicate.leader = leader; _duplicate.leaderEpoch = leaderEpoch; ArrayList<Integer> newIsr = new ArrayList<Integer>(isr.size()); for (Integer _element : isr) { newIsr.add(_element); } _duplicate.isr = newIsr; _duplicate.partitionEpoch = partitionEpoch; ArrayList<Integer> newReplicas = new ArrayList<Integer>(replicas.size()); for (Integer _element : replicas) { newReplicas.add(_element); } _duplicate.replicas = newReplicas; ArrayList<Integer> newAddingReplicas = new ArrayList<Integer>(addingReplicas.size()); for (Integer _element : addingReplicas) { newAddingReplicas.add(_element); } _duplicate.addingReplicas = newAddingReplicas; ArrayList<Integer> newRemovingReplicas = new ArrayList<Integer>(removingReplicas.size()); for (Integer _element : removingReplicas) { newRemovingReplicas.add(_element); } _duplicate.removingReplicas = newRemovingReplicas; _duplicate.isNew = isNew; _duplicate.leaderRecoveryState = leaderRecoveryState; return _duplicate; } @Override public String toString() { return "LeaderAndIsrPartitionState(" + "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'") + ", partitionIndex=" + partitionIndex + ", controllerEpoch=" + controllerEpoch + ", leader=" + leader + ", leaderEpoch=" + leaderEpoch + ", isr=" + MessageUtil.deepToString(isr.iterator()) + ", partitionEpoch=" + partitionEpoch + ", replicas=" + MessageUtil.deepToString(replicas.iterator()) + ", addingReplicas=" + MessageUtil.deepToString(addingReplicas.iterator()) + ", removingReplicas=" + MessageUtil.deepToString(removingReplicas.iterator()) + ", isNew=" + (isNew ? "true" : "false") + ", leaderRecoveryState=" + leaderRecoveryState + ")"; } public String topicName() { return this.topicName; } public int partitionIndex() { return this.partitionIndex; } public int controllerEpoch() { return this.controllerEpoch; } public int leader() { return this.leader; } public int leaderEpoch() { return this.leaderEpoch; } public List<Integer> isr() { return this.isr; } public int partitionEpoch() { return this.partitionEpoch; } public List<Integer> replicas() { return this.replicas; } public List<Integer> addingReplicas() { return this.addingReplicas; } public List<Integer> removingReplicas() { return this.removingReplicas; } public boolean isNew() { return this.isNew; } public byte leaderRecoveryState() { return this.leaderRecoveryState; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderAndIsrPartitionState setTopicName(String v) { this.topicName = v; return this; } public LeaderAndIsrPartitionState setPartitionIndex(int v) { this.partitionIndex = v; return this; } public LeaderAndIsrPartitionState setControllerEpoch(int v) { this.controllerEpoch = v; return this; } public LeaderAndIsrPartitionState setLeader(int v) { this.leader = v; return this; } public LeaderAndIsrPartitionState setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } public LeaderAndIsrPartitionState setIsr(List<Integer> v) { this.isr = v; return this; } public LeaderAndIsrPartitionState setPartitionEpoch(int v) { this.partitionEpoch = v; return this; } public LeaderAndIsrPartitionState setReplicas(List<Integer> v) { this.replicas = v; return this; } public LeaderAndIsrPartitionState setAddingReplicas(List<Integer> v) { this.addingReplicas = v; return this; } public LeaderAndIsrPartitionState setRemovingReplicas(List<Integer> v) { this.removingReplicas = v; return this; } public LeaderAndIsrPartitionState setIsNew(boolean v) { this.isNew = v; return this; } public LeaderAndIsrPartitionState setLeaderRecoveryState(byte v) { this.leaderRecoveryState = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaderAndIsrRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.LeaderAndIsrRequestData.*; public class LeaderAndIsrRequestDataJsonConverter { public static LeaderAndIsrRequestData read(JsonNode _node, short _version) { LeaderAndIsrRequestData _object = new LeaderAndIsrRequestData(); JsonNode _controllerIdNode = _node.get("controllerId"); if (_controllerIdNode == null) { throw new RuntimeException("LeaderAndIsrRequestData: unable to locate field 'controllerId', which is mandatory in version " + _version); } else { _object.controllerId = MessageUtil.jsonNodeToInt(_controllerIdNode, "LeaderAndIsrRequestData"); } JsonNode _isKRaftControllerNode = _node.get("isKRaftController"); if (_isKRaftControllerNode == null) { if (_version >= 7) { throw new RuntimeException("LeaderAndIsrRequestData: unable to locate field 'isKRaftController', which is mandatory in version " + _version); } else { _object.isKRaftController = false; } } else { if (!_isKRaftControllerNode.isBoolean()) { throw new RuntimeException("LeaderAndIsrRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.isKRaftController = _isKRaftControllerNode.asBoolean(); } JsonNode _controllerEpochNode = _node.get("controllerEpoch"); if (_controllerEpochNode == null) { throw new RuntimeException("LeaderAndIsrRequestData: unable to locate field 'controllerEpoch', which is mandatory in version " + _version); } else { _object.controllerEpoch = MessageUtil.jsonNodeToInt(_controllerEpochNode, "LeaderAndIsrRequestData"); } JsonNode _brokerEpochNode = _node.get("brokerEpoch"); if (_brokerEpochNode == null) { if (_version >= 2) { throw new RuntimeException("LeaderAndIsrRequestData: unable to locate field 'brokerEpoch', which is mandatory in version " + _version); } else { _object.brokerEpoch = -1L; } } else { _object.brokerEpoch = MessageUtil.jsonNodeToLong(_brokerEpochNode, "LeaderAndIsrRequestData"); } JsonNode _typeNode = _node.get("type"); if (_typeNode == null) { if (_version >= 5) { throw new RuntimeException("LeaderAndIsrRequestData: unable to locate field 'type', which is mandatory in version " + _version); } else { _object.type = (byte) 0; } } else { _object.type = MessageUtil.jsonNodeToByte(_typeNode, "LeaderAndIsrRequestData"); } JsonNode _ungroupedPartitionStatesNode = _node.get("ungroupedPartitionStates"); if (_ungroupedPartitionStatesNode == null) { if (_version <= 1) { throw new RuntimeException("LeaderAndIsrRequestData: unable to locate field 'ungroupedPartitionStates', which is mandatory in version " + _version); } else { _object.ungroupedPartitionStates = new ArrayList<LeaderAndIsrPartitionState>(0); } } else { if (!_ungroupedPartitionStatesNode.isArray()) { throw new RuntimeException("LeaderAndIsrRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<LeaderAndIsrPartitionState> _collection = new ArrayList<LeaderAndIsrPartitionState>(_ungroupedPartitionStatesNode.size()); _object.ungroupedPartitionStates = _collection; for (JsonNode _element : _ungroupedPartitionStatesNode) { _collection.add(LeaderAndIsrPartitionStateJsonConverter.read(_element, _version)); } } JsonNode _topicStatesNode = _node.get("topicStates"); if (_topicStatesNode == null) { if (_version >= 2) { throw new RuntimeException("LeaderAndIsrRequestData: unable to locate field 'topicStates', which is mandatory in version " + _version); } else { _object.topicStates = new ArrayList<LeaderAndIsrTopicState>(0); } } else { if (!_topicStatesNode.isArray()) { throw new RuntimeException("LeaderAndIsrRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<LeaderAndIsrTopicState> _collection = new ArrayList<LeaderAndIsrTopicState>(_topicStatesNode.size()); _object.topicStates = _collection; for (JsonNode _element : _topicStatesNode) { _collection.add(LeaderAndIsrTopicStateJsonConverter.read(_element, _version)); } } JsonNode _liveLeadersNode = _node.get("liveLeaders"); if (_liveLeadersNode == null) { throw new RuntimeException("LeaderAndIsrRequestData: unable to locate field 'liveLeaders', which is mandatory in version " + _version); } else { if (!_liveLeadersNode.isArray()) { throw new RuntimeException("LeaderAndIsrRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<LeaderAndIsrLiveLeader> _collection = new ArrayList<LeaderAndIsrLiveLeader>(_liveLeadersNode.size()); _object.liveLeaders = _collection; for (JsonNode _element : _liveLeadersNode) { _collection.add(LeaderAndIsrLiveLeaderJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(LeaderAndIsrRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("controllerId", new IntNode(_object.controllerId)); if (_version >= 7) { _node.set("isKRaftController", BooleanNode.valueOf(_object.isKRaftController)); } else { if (_object.isKRaftController) { throw new UnsupportedVersionException("Attempted to write a non-default isKRaftController at version " + _version); } } _node.set("controllerEpoch", new IntNode(_object.controllerEpoch)); if (_version >= 2) { _node.set("brokerEpoch", new LongNode(_object.brokerEpoch)); } if (_version >= 5) { _node.set("type", new ShortNode(_object.type)); } else { if (_object.type != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default type at version " + _version); } } if (_version <= 1) { ArrayNode _ungroupedPartitionStatesArray = new ArrayNode(JsonNodeFactory.instance); for (LeaderAndIsrPartitionState _element : _object.ungroupedPartitionStates) { _ungroupedPartitionStatesArray.add(LeaderAndIsrPartitionStateJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("ungroupedPartitionStates", _ungroupedPartitionStatesArray); } else { if (!_object.ungroupedPartitionStates.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default ungroupedPartitionStates at version " + _version); } } if (_version >= 2) { ArrayNode _topicStatesArray = new ArrayNode(JsonNodeFactory.instance); for (LeaderAndIsrTopicState _element : _object.topicStates) { _topicStatesArray.add(LeaderAndIsrTopicStateJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topicStates", _topicStatesArray); } else { if (!_object.topicStates.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default topicStates at version " + _version); } } ArrayNode _liveLeadersArray = new ArrayNode(JsonNodeFactory.instance); for (LeaderAndIsrLiveLeader _element : _object.liveLeaders) { _liveLeadersArray.add(LeaderAndIsrLiveLeaderJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("liveLeaders", _liveLeadersArray); return _node; } public static JsonNode write(LeaderAndIsrRequestData _object, short _version) { return write(_object, _version, true); } public static class LeaderAndIsrLiveLeaderJsonConverter { public static LeaderAndIsrLiveLeader read(JsonNode _node, short _version) { LeaderAndIsrLiveLeader _object = new LeaderAndIsrLiveLeader(); JsonNode _brokerIdNode = _node.get("brokerId"); if (_brokerIdNode == null) { throw new RuntimeException("LeaderAndIsrLiveLeader: unable to locate field 'brokerId', which is mandatory in version " + _version); } else { _object.brokerId = MessageUtil.jsonNodeToInt(_brokerIdNode, "LeaderAndIsrLiveLeader"); } JsonNode _hostNameNode = _node.get("hostName"); if (_hostNameNode == null) { throw new RuntimeException("LeaderAndIsrLiveLeader: unable to locate field 'hostName', which is mandatory in version " + _version); } else { if (!_hostNameNode.isTextual()) { throw new RuntimeException("LeaderAndIsrLiveLeader expected a string type, but got " + _node.getNodeType()); } _object.hostName = _hostNameNode.asText(); } JsonNode _portNode = _node.get("port"); if (_portNode == null) { throw new RuntimeException("LeaderAndIsrLiveLeader: unable to locate field 'port', which is mandatory in version " + _version); } else { _object.port = MessageUtil.jsonNodeToInt(_portNode, "LeaderAndIsrLiveLeader"); } return _object; } public static JsonNode write(LeaderAndIsrLiveLeader _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("brokerId", new IntNode(_object.brokerId)); _node.set("hostName", new TextNode(_object.hostName)); _node.set("port", new IntNode(_object.port)); return _node; } public static JsonNode write(LeaderAndIsrLiveLeader _object, short _version) { return write(_object, _version, true); } } public static class LeaderAndIsrPartitionStateJsonConverter { public static LeaderAndIsrPartitionState read(JsonNode _node, short _version) { LeaderAndIsrPartitionState _object = new LeaderAndIsrPartitionState(); JsonNode _topicNameNode = _node.get("topicName"); if (_topicNameNode == null) { if (_version <= 1) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'topicName', which is mandatory in version " + _version); } else { _object.topicName = ""; } } else { if (!_topicNameNode.isTextual()) { throw new RuntimeException("LeaderAndIsrPartitionState expected a string type, but got " + _node.getNodeType()); } _object.topicName = _topicNameNode.asText(); } JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "LeaderAndIsrPartitionState"); } JsonNode _controllerEpochNode = _node.get("controllerEpoch"); if (_controllerEpochNode == null) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'controllerEpoch', which is mandatory in version " + _version); } else { _object.controllerEpoch = MessageUtil.jsonNodeToInt(_controllerEpochNode, "LeaderAndIsrPartitionState"); } JsonNode _leaderNode = _node.get("leader"); if (_leaderNode == null) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'leader', which is mandatory in version " + _version); } else { _object.leader = MessageUtil.jsonNodeToInt(_leaderNode, "LeaderAndIsrPartitionState"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "LeaderAndIsrPartitionState"); } JsonNode _isrNode = _node.get("isr"); if (_isrNode == null) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'isr', which is mandatory in version " + _version); } else { if (!_isrNode.isArray()) { throw new RuntimeException("LeaderAndIsrPartitionState expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_isrNode.size()); _object.isr = _collection; for (JsonNode _element : _isrNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "LeaderAndIsrPartitionState element")); } } JsonNode _partitionEpochNode = _node.get("partitionEpoch"); if (_partitionEpochNode == null) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'partitionEpoch', which is mandatory in version " + _version); } else { _object.partitionEpoch = MessageUtil.jsonNodeToInt(_partitionEpochNode, "LeaderAndIsrPartitionState"); } JsonNode _replicasNode = _node.get("replicas"); if (_replicasNode == null) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'replicas', which is mandatory in version " + _version); } else { if (!_replicasNode.isArray()) { throw new RuntimeException("LeaderAndIsrPartitionState expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_replicasNode.size()); _object.replicas = _collection; for (JsonNode _element : _replicasNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "LeaderAndIsrPartitionState element")); } } JsonNode _addingReplicasNode = _node.get("addingReplicas"); if (_addingReplicasNode == null) { if (_version >= 3) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'addingReplicas', which is mandatory in version " + _version); } else { _object.addingReplicas = new ArrayList<Integer>(0); } } else { if (!_addingReplicasNode.isArray()) { throw new RuntimeException("LeaderAndIsrPartitionState expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_addingReplicasNode.size()); _object.addingReplicas = _collection; for (JsonNode _element : _addingReplicasNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "LeaderAndIsrPartitionState element")); } } JsonNode _removingReplicasNode = _node.get("removingReplicas"); if (_removingReplicasNode == null) { if (_version >= 3) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'removingReplicas', which is mandatory in version " + _version); } else { _object.removingReplicas = new ArrayList<Integer>(0); } } else { if (!_removingReplicasNode.isArray()) { throw new RuntimeException("LeaderAndIsrPartitionState expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_removingReplicasNode.size()); _object.removingReplicas = _collection; for (JsonNode _element : _removingReplicasNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "LeaderAndIsrPartitionState element")); } } JsonNode _isNewNode = _node.get("isNew"); if (_isNewNode == null) { if (_version >= 1) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'isNew', which is mandatory in version " + _version); } else { _object.isNew = false; } } else { if (!_isNewNode.isBoolean()) { throw new RuntimeException("LeaderAndIsrPartitionState expected Boolean type, but got " + _node.getNodeType()); } _object.isNew = _isNewNode.asBoolean(); } JsonNode _leaderRecoveryStateNode = _node.get("leaderRecoveryState"); if (_leaderRecoveryStateNode == null) { if (_version >= 6) { throw new RuntimeException("LeaderAndIsrPartitionState: unable to locate field 'leaderRecoveryState', which is mandatory in version " + _version); } else { _object.leaderRecoveryState = (byte) 0; } } else { _object.leaderRecoveryState = MessageUtil.jsonNodeToByte(_leaderRecoveryStateNode, "LeaderAndIsrPartitionState"); } return _object; } public static JsonNode write(LeaderAndIsrPartitionState _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version <= 1) { _node.set("topicName", new TextNode(_object.topicName)); } _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("controllerEpoch", new IntNode(_object.controllerEpoch)); _node.set("leader", new IntNode(_object.leader)); _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); ArrayNode _isrArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.isr) { _isrArray.add(new IntNode(_element)); } _node.set("isr", _isrArray); _node.set("partitionEpoch", new IntNode(_object.partitionEpoch)); ArrayNode _replicasArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.replicas) { _replicasArray.add(new IntNode(_element)); } _node.set("replicas", _replicasArray); if (_version >= 3) { ArrayNode _addingReplicasArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.addingReplicas) { _addingReplicasArray.add(new IntNode(_element)); } _node.set("addingReplicas", _addingReplicasArray); } if (_version >= 3) { ArrayNode _removingReplicasArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.removingReplicas) { _removingReplicasArray.add(new IntNode(_element)); } _node.set("removingReplicas", _removingReplicasArray); } if (_version >= 1) { _node.set("isNew", BooleanNode.valueOf(_object.isNew)); } if (_version >= 6) { _node.set("leaderRecoveryState", new ShortNode(_object.leaderRecoveryState)); } else { if (_object.leaderRecoveryState != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default leaderRecoveryState at version " + _version); } } return _node; } public static JsonNode write(LeaderAndIsrPartitionState _object, short _version) { return write(_object, _version, true); } } public static class LeaderAndIsrTopicStateJsonConverter { public static LeaderAndIsrTopicState read(JsonNode _node, short _version) { LeaderAndIsrTopicState _object = new LeaderAndIsrTopicState(); if (_version < 2) { throw new UnsupportedVersionException("Can't read version " + _version + " of LeaderAndIsrTopicState"); } JsonNode _topicNameNode = _node.get("topicName"); if (_topicNameNode == null) { throw new RuntimeException("LeaderAndIsrTopicState: unable to locate field 'topicName', which is mandatory in version " + _version); } else { if (!_topicNameNode.isTextual()) { throw new RuntimeException("LeaderAndIsrTopicState expected a string type, but got " + _node.getNodeType()); } _object.topicName = _topicNameNode.asText(); } JsonNode _topicIdNode = _node.get("topicId"); if (_topicIdNode == null) { if (_version >= 5) { throw new RuntimeException("LeaderAndIsrTopicState: unable to locate field 'topicId', which is mandatory in version " + _version); } else { _object.topicId = Uuid.ZERO_UUID; } } else { if (!_topicIdNode.isTextual()) { throw new RuntimeException("LeaderAndIsrTopicState expected a JSON string type, but got " + _node.getNodeType()); } _object.topicId = Uuid.fromString(_topicIdNode.asText()); } JsonNode _partitionStatesNode = _node.get("partitionStates"); if (_partitionStatesNode == null) { throw new RuntimeException("LeaderAndIsrTopicState: unable to locate field 'partitionStates', which is mandatory in version " + _version); } else { if (!_partitionStatesNode.isArray()) { throw new RuntimeException("LeaderAndIsrTopicState expected a JSON array, but got " + _node.getNodeType()); } ArrayList<LeaderAndIsrPartitionState> _collection = new ArrayList<LeaderAndIsrPartitionState>(_partitionStatesNode.size()); _object.partitionStates = _collection; for (JsonNode _element : _partitionStatesNode) { _collection.add(LeaderAndIsrPartitionStateJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(LeaderAndIsrTopicState _object, short _version, boolean _serializeRecords) { if (_version < 2) { throw new UnsupportedVersionException("Can't write version " + _version + " of LeaderAndIsrTopicState"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("topicName", new TextNode(_object.topicName)); if (_version >= 5) { _node.set("topicId", new TextNode(_object.topicId.toString())); } ArrayNode _partitionStatesArray = new ArrayNode(JsonNodeFactory.instance); for (LeaderAndIsrPartitionState _element : _object.partitionStates) { _partitionStatesArray.add(LeaderAndIsrPartitionStateJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitionStates", _partitionStatesArray); return _node; } public static JsonNode write(LeaderAndIsrTopicState _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaderAndIsrResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class LeaderAndIsrResponseData implements ApiMessage { short errorCode; List<LeaderAndIsrPartitionError> partitionErrors; LeaderAndIsrTopicErrorCollection topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("partition_errors", new ArrayOf(LeaderAndIsrPartitionError.SCHEMA_0), "Each partition in v0 to v4 message.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("partition_errors", new CompactArrayOf(LeaderAndIsrPartitionError.SCHEMA_4), "Each partition in v0 to v4 message."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("topics", new CompactArrayOf(LeaderAndIsrTopicError.SCHEMA_5), "Each topic"), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 7; public LeaderAndIsrResponseData(Readable _readable, short _version) { read(_readable, _version); } public LeaderAndIsrResponseData() { this.errorCode = (short) 0; this.partitionErrors = new ArrayList<LeaderAndIsrPartitionError>(0); this.topics = new LeaderAndIsrTopicErrorCollection(0); } @Override public short apiKey() { return 4; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 7; } @Override public void read(Readable _readable, short _version) { this.errorCode = _readable.readShort(); if (_version <= 4) { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionErrors was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrPartitionError> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrPartitionError(_readable, _version)); } this.partitionErrors = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionErrors was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrPartitionError> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrPartitionError(_readable, _version)); } this.partitionErrors = newCollection; } } } else { this.partitionErrors = new ArrayList<LeaderAndIsrPartitionError>(0); } if (_version >= 5) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } LeaderAndIsrTopicErrorCollection newCollection = new LeaderAndIsrTopicErrorCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrTopicError(_readable, _version)); } this.topics = newCollection; } } else { this.topics = new LeaderAndIsrTopicErrorCollection(0); } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); if (_version <= 4) { if (_version >= 4) { _writable.writeUnsignedVarint(partitionErrors.size() + 1); for (LeaderAndIsrPartitionError partitionErrorsElement : partitionErrors) { partitionErrorsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitionErrors.size()); for (LeaderAndIsrPartitionError partitionErrorsElement : partitionErrors) { partitionErrorsElement.write(_writable, _cache, _version); } } } else { if (!this.partitionErrors.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default partitionErrors at version " + _version); } } if (_version >= 5) { _writable.writeUnsignedVarint(topics.size() + 1); for (LeaderAndIsrTopicError topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { if (!this.topics.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default topics at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(2); if (_version <= 4) { { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionErrors.size() + 1)); } else { _size.addBytes(4); } for (LeaderAndIsrPartitionError partitionErrorsElement : partitionErrors) { partitionErrorsElement.addSize(_size, _cache, _version); } } } if (_version >= 5) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (LeaderAndIsrTopicError topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderAndIsrResponseData)) return false; LeaderAndIsrResponseData other = (LeaderAndIsrResponseData) obj; if (errorCode != other.errorCode) return false; if (this.partitionErrors == null) { if (other.partitionErrors != null) return false; } else { if (!this.partitionErrors.equals(other.partitionErrors)) return false; } if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (partitionErrors == null ? 0 : partitionErrors.hashCode()); hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public LeaderAndIsrResponseData duplicate() { LeaderAndIsrResponseData _duplicate = new LeaderAndIsrResponseData(); _duplicate.errorCode = errorCode; ArrayList<LeaderAndIsrPartitionError> newPartitionErrors = new ArrayList<LeaderAndIsrPartitionError>(partitionErrors.size()); for (LeaderAndIsrPartitionError _element : partitionErrors) { newPartitionErrors.add(_element.duplicate()); } _duplicate.partitionErrors = newPartitionErrors; LeaderAndIsrTopicErrorCollection newTopics = new LeaderAndIsrTopicErrorCollection(topics.size()); for (LeaderAndIsrTopicError _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "LeaderAndIsrResponseData(" + "errorCode=" + errorCode + ", partitionErrors=" + MessageUtil.deepToString(partitionErrors.iterator()) + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public short errorCode() { return this.errorCode; } public List<LeaderAndIsrPartitionError> partitionErrors() { return this.partitionErrors; } public LeaderAndIsrTopicErrorCollection topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderAndIsrResponseData setErrorCode(short v) { this.errorCode = v; return this; } public LeaderAndIsrResponseData setPartitionErrors(List<LeaderAndIsrPartitionError> v) { this.partitionErrors = v; return this; } public LeaderAndIsrResponseData setTopics(LeaderAndIsrTopicErrorCollection v) { this.topics = v; return this; } public static class LeaderAndIsrTopicError implements Message, ImplicitLinkedHashMultiCollection.Element { Uuid topicId; List<LeaderAndIsrPartitionError> partitionErrors; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_5 = new Schema( new Field("topic_id", Type.UUID, "The unique topic ID"), new Field("partition_errors", new CompactArrayOf(LeaderAndIsrPartitionError.SCHEMA_5), "Each partition."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 5; public static final short HIGHEST_SUPPORTED_VERSION = 7; public LeaderAndIsrTopicError(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public LeaderAndIsrTopicError() { this.topicId = Uuid.ZERO_UUID; this.partitionErrors = new ArrayList<LeaderAndIsrPartitionError>(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 7; } @Override public void read(Readable _readable, short _version) { if (_version > 7) { throw new UnsupportedVersionException("Can't read version " + _version + " of LeaderAndIsrTopicError"); } this.topicId = _readable.readUuid(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionErrors was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<LeaderAndIsrPartitionError> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new LeaderAndIsrPartitionError(_readable, _version)); } this.partitionErrors = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 5) { throw new UnsupportedVersionException("Can't write version " + _version + " of LeaderAndIsrTopicError"); } int _numTaggedFields = 0; _writable.writeUuid(topicId); _writable.writeUnsignedVarint(partitionErrors.size() + 1); for (LeaderAndIsrPartitionError partitionErrorsElement : partitionErrors) { partitionErrorsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 7) { throw new UnsupportedVersionException("Can't size version " + _version + " of LeaderAndIsrTopicError"); } _size.addBytes(16); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionErrors.size() + 1)); for (LeaderAndIsrPartitionError partitionErrorsElement : partitionErrors) { partitionErrorsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof LeaderAndIsrTopicError)) return false; LeaderAndIsrTopicError other = (LeaderAndIsrTopicError) obj; if (!this.topicId.equals(other.topicId)) return false; return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderAndIsrTopicError)) return false; LeaderAndIsrTopicError other = (LeaderAndIsrTopicError) obj; if (!this.topicId.equals(other.topicId)) return false; if (this.partitionErrors == null) { if (other.partitionErrors != null) return false; } else { if (!this.partitionErrors.equals(other.partitionErrors)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + topicId.hashCode(); return hashCode; } @Override public LeaderAndIsrTopicError duplicate() { LeaderAndIsrTopicError _duplicate = new LeaderAndIsrTopicError(); _duplicate.topicId = topicId; ArrayList<LeaderAndIsrPartitionError> newPartitionErrors = new ArrayList<LeaderAndIsrPartitionError>(partitionErrors.size()); for (LeaderAndIsrPartitionError _element : partitionErrors) { newPartitionErrors.add(_element.duplicate()); } _duplicate.partitionErrors = newPartitionErrors; return _duplicate; } @Override public String toString() { return "LeaderAndIsrTopicError(" + "topicId=" + topicId.toString() + ", partitionErrors=" + MessageUtil.deepToString(partitionErrors.iterator()) + ")"; } public Uuid topicId() { return this.topicId; } public List<LeaderAndIsrPartitionError> partitionErrors() { return this.partitionErrors; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderAndIsrTopicError setTopicId(Uuid v) { this.topicId = v; return this; } public LeaderAndIsrTopicError setPartitionErrors(List<LeaderAndIsrPartitionError> v) { this.partitionErrors = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class LeaderAndIsrTopicErrorCollection extends ImplicitLinkedHashMultiCollection<LeaderAndIsrTopicError> { public LeaderAndIsrTopicErrorCollection() { super(); } public LeaderAndIsrTopicErrorCollection(int expectedNumElements) { super(expectedNumElements); } public LeaderAndIsrTopicErrorCollection(Iterator<LeaderAndIsrTopicError> iterator) { super(iterator); } public LeaderAndIsrTopicError find(Uuid topicId) { LeaderAndIsrTopicError _key = new LeaderAndIsrTopicError(); _key.setTopicId(topicId); return find(_key); } public List<LeaderAndIsrTopicError> findAll(Uuid topicId) { LeaderAndIsrTopicError _key = new LeaderAndIsrTopicError(); _key.setTopicId(topicId); return findAll(_key); } public LeaderAndIsrTopicErrorCollection duplicate() { LeaderAndIsrTopicErrorCollection _duplicate = new LeaderAndIsrTopicErrorCollection(size()); for (LeaderAndIsrTopicError _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class LeaderAndIsrPartitionError implements Message { String topicName; int partitionIndex; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topic_name", Type.STRING, "The topic name."), new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The partition error code, or 0 if there was no error.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("topic_name", Type.COMPACT_STRING, "The topic name."), new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The partition error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The partition error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 7; public LeaderAndIsrPartitionError(Readable _readable, short _version) { read(_readable, _version); } public LeaderAndIsrPartitionError() { this.topicName = ""; this.partitionIndex = 0; this.errorCode = (short) 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 32767; } @Override public void read(Readable _readable, short _version) { if (_version <= 4) { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field topicName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topicName had invalid length " + length); } else { this.topicName = _readable.readString(length); } } else { this.topicName = ""; } this.partitionIndex = _readable.readInt(); this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 4) { { byte[] _stringBytes = _cache.getSerializedValue(topicName); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } _writable.writeInt(partitionIndex); _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 4) { { byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topicName' field is too long to be serialized"); } _cache.cacheSerializedValue(topicName, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } _size.addBytes(4); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderAndIsrPartitionError)) return false; LeaderAndIsrPartitionError other = (LeaderAndIsrPartitionError) obj; if (this.topicName == null) { if (other.topicName != null) return false; } else { if (!this.topicName.equals(other.topicName)) return false; } if (partitionIndex != other.partitionIndex) return false; if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode()); hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public LeaderAndIsrPartitionError duplicate() { LeaderAndIsrPartitionError _duplicate = new LeaderAndIsrPartitionError(); _duplicate.topicName = topicName; _duplicate.partitionIndex = partitionIndex; _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "LeaderAndIsrPartitionError(" + "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'") + ", partitionIndex=" + partitionIndex + ", errorCode=" + errorCode + ")"; } public String topicName() { return this.topicName; } public int partitionIndex() { return this.partitionIndex; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderAndIsrPartitionError setTopicName(String v) { this.topicName = v; return this; } public LeaderAndIsrPartitionError setPartitionIndex(int v) { this.partitionIndex = v; return this; } public LeaderAndIsrPartitionError setErrorCode(short v) { this.errorCode = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaderAndIsrResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.LeaderAndIsrResponseData.*; public class LeaderAndIsrResponseDataJsonConverter { public static LeaderAndIsrResponseData read(JsonNode _node, short _version) { LeaderAndIsrResponseData _object = new LeaderAndIsrResponseData(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("LeaderAndIsrResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "LeaderAndIsrResponseData"); } JsonNode _partitionErrorsNode = _node.get("partitionErrors"); if (_partitionErrorsNode == null) { if (_version <= 4) { throw new RuntimeException("LeaderAndIsrResponseData: unable to locate field 'partitionErrors', which is mandatory in version " + _version); } else { _object.partitionErrors = new ArrayList<LeaderAndIsrPartitionError>(0); } } else { if (!_partitionErrorsNode.isArray()) { throw new RuntimeException("LeaderAndIsrResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<LeaderAndIsrPartitionError> _collection = new ArrayList<LeaderAndIsrPartitionError>(_partitionErrorsNode.size()); _object.partitionErrors = _collection; for (JsonNode _element : _partitionErrorsNode) { _collection.add(LeaderAndIsrPartitionErrorJsonConverter.read(_element, _version)); } } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { if (_version >= 5) { throw new RuntimeException("LeaderAndIsrResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { _object.topics = new LeaderAndIsrTopicErrorCollection(0); } } else { if (!_topicsNode.isArray()) { throw new RuntimeException("LeaderAndIsrResponseData expected a JSON array, but got " + _node.getNodeType()); } LeaderAndIsrTopicErrorCollection _collection = new LeaderAndIsrTopicErrorCollection(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(LeaderAndIsrTopicErrorJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(LeaderAndIsrResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_version <= 4) { ArrayNode _partitionErrorsArray = new ArrayNode(JsonNodeFactory.instance); for (LeaderAndIsrPartitionError _element : _object.partitionErrors) { _partitionErrorsArray.add(LeaderAndIsrPartitionErrorJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitionErrors", _partitionErrorsArray); } else { if (!_object.partitionErrors.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default partitionErrors at version " + _version); } } if (_version >= 5) { ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (LeaderAndIsrTopicError _element : _object.topics) { _topicsArray.add(LeaderAndIsrTopicErrorJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); } else { if (!_object.topics.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default topics at version " + _version); } } return _node; } public static JsonNode write(LeaderAndIsrResponseData _object, short _version) { return write(_object, _version, true); } public static class LeaderAndIsrPartitionErrorJsonConverter { public static LeaderAndIsrPartitionError read(JsonNode _node, short _version) { LeaderAndIsrPartitionError _object = new LeaderAndIsrPartitionError(); JsonNode _topicNameNode = _node.get("topicName"); if (_topicNameNode == null) { if (_version <= 4) { throw new RuntimeException("LeaderAndIsrPartitionError: unable to locate field 'topicName', which is mandatory in version " + _version); } else { _object.topicName = ""; } } else { if (!_topicNameNode.isTextual()) { throw new RuntimeException("LeaderAndIsrPartitionError expected a string type, but got " + _node.getNodeType()); } _object.topicName = _topicNameNode.asText(); } JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("LeaderAndIsrPartitionError: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "LeaderAndIsrPartitionError"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("LeaderAndIsrPartitionError: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "LeaderAndIsrPartitionError"); } return _object; } public static JsonNode write(LeaderAndIsrPartitionError _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version <= 4) { _node.set("topicName", new TextNode(_object.topicName)); } _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(LeaderAndIsrPartitionError _object, short _version) { return write(_object, _version, true); } } public static class LeaderAndIsrTopicErrorJsonConverter { public static LeaderAndIsrTopicError read(JsonNode _node, short _version) { LeaderAndIsrTopicError _object = new LeaderAndIsrTopicError(); if (_version < 5) { throw new UnsupportedVersionException("Can't read version " + _version + " of LeaderAndIsrTopicError"); } JsonNode _topicIdNode = _node.get("topicId"); if (_topicIdNode == null) { throw new RuntimeException("LeaderAndIsrTopicError: unable to locate field 'topicId', which is mandatory in version " + _version); } else { if (!_topicIdNode.isTextual()) { throw new RuntimeException("LeaderAndIsrTopicError expected a JSON string type, but got " + _node.getNodeType()); } _object.topicId = Uuid.fromString(_topicIdNode.asText()); } JsonNode _partitionErrorsNode = _node.get("partitionErrors"); if (_partitionErrorsNode == null) { throw new RuntimeException("LeaderAndIsrTopicError: unable to locate field 'partitionErrors', which is mandatory in version " + _version); } else { if (!_partitionErrorsNode.isArray()) { throw new RuntimeException("LeaderAndIsrTopicError expected a JSON array, but got " + _node.getNodeType()); } ArrayList<LeaderAndIsrPartitionError> _collection = new ArrayList<LeaderAndIsrPartitionError>(_partitionErrorsNode.size()); _object.partitionErrors = _collection; for (JsonNode _element : _partitionErrorsNode) { _collection.add(LeaderAndIsrPartitionErrorJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(LeaderAndIsrTopicError _object, short _version, boolean _serializeRecords) { if (_version < 5) { throw new UnsupportedVersionException("Can't write version " + _version + " of LeaderAndIsrTopicError"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("topicId", new TextNode(_object.topicId.toString())); ArrayNode _partitionErrorsArray = new ArrayNode(JsonNodeFactory.instance); for (LeaderAndIsrPartitionError _element : _object.partitionErrors) { _partitionErrorsArray.add(LeaderAndIsrPartitionErrorJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitionErrors", _partitionErrorsArray); return _node; } public static JsonNode write(LeaderAndIsrTopicError _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaderChangeMessage.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class LeaderChangeMessage implements ApiMessage { short version; int leaderId; List<Voter> voters; List<Voter> grantingVoters; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("version", Type.INT16, "The version of the leader change message"), new Field("leader_id", Type.INT32, "The ID of the newly elected leader"), new Field("voters", new CompactArrayOf(Voter.SCHEMA_0), "The set of voters in the quorum for this epoch"), new Field("granting_voters", new CompactArrayOf(Voter.SCHEMA_0), "The voters who voted for the leader at the time of election"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public LeaderChangeMessage(Readable _readable, short _version) { read(_readable, _version); } public LeaderChangeMessage() { this.version = (short) 0; this.leaderId = 0; this.voters = new ArrayList<Voter>(0); this.grantingVoters = new ArrayList<Voter>(0); } @Override public short apiKey() { return -1; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.version = _readable.readShort(); this.leaderId = _readable.readInt(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field voters was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Voter> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new Voter(_readable, _version)); } this.voters = newCollection; } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field grantingVoters was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Voter> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new Voter(_readable, _version)); } this.grantingVoters = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(version); _writable.writeInt(leaderId); _writable.writeUnsignedVarint(voters.size() + 1); for (Voter votersElement : voters) { votersElement.write(_writable, _cache, _version); } _writable.writeUnsignedVarint(grantingVoters.size() + 1); for (Voter grantingVotersElement : grantingVoters) { grantingVotersElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(2); _size.addBytes(4); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(voters.size() + 1)); for (Voter votersElement : voters) { votersElement.addSize(_size, _cache, _version); } } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(grantingVoters.size() + 1)); for (Voter grantingVotersElement : grantingVoters) { grantingVotersElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaderChangeMessage)) return false; LeaderChangeMessage other = (LeaderChangeMessage) obj; if (version != other.version) return false; if (leaderId != other.leaderId) return false; if (this.voters == null) { if (other.voters != null) return false; } else { if (!this.voters.equals(other.voters)) return false; } if (this.grantingVoters == null) { if (other.grantingVoters != null) return false; } else { if (!this.grantingVoters.equals(other.grantingVoters)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + version; hashCode = 31 * hashCode + leaderId; hashCode = 31 * hashCode + (voters == null ? 0 : voters.hashCode()); hashCode = 31 * hashCode + (grantingVoters == null ? 0 : grantingVoters.hashCode()); return hashCode; } @Override public LeaderChangeMessage duplicate() { LeaderChangeMessage _duplicate = new LeaderChangeMessage(); _duplicate.version = version; _duplicate.leaderId = leaderId; ArrayList<Voter> newVoters = new ArrayList<Voter>(voters.size()); for (Voter _element : voters) { newVoters.add(_element.duplicate()); } _duplicate.voters = newVoters; ArrayList<Voter> newGrantingVoters = new ArrayList<Voter>(grantingVoters.size()); for (Voter _element : grantingVoters) { newGrantingVoters.add(_element.duplicate()); } _duplicate.grantingVoters = newGrantingVoters; return _duplicate; } @Override public String toString() { return "LeaderChangeMessage(" + "version=" + version + ", leaderId=" + leaderId + ", voters=" + MessageUtil.deepToString(voters.iterator()) + ", grantingVoters=" + MessageUtil.deepToString(grantingVoters.iterator()) + ")"; } public short version() { return this.version; } public int leaderId() { return this.leaderId; } public List<Voter> voters() { return this.voters; } public List<Voter> grantingVoters() { return this.grantingVoters; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaderChangeMessage setVersion(short v) { this.version = v; return this; } public LeaderChangeMessage setLeaderId(int v) { this.leaderId = v; return this; } public LeaderChangeMessage setVoters(List<Voter> v) { this.voters = v; return this; } public LeaderChangeMessage setGrantingVoters(List<Voter> v) { this.grantingVoters = v; return this; } public static class Voter implements Message { int voterId; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("voter_id", Type.INT32, ""), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public Voter(Readable _readable, short _version) { read(_readable, _version); } public Voter() { this.voterId = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 32767; } @Override public void read(Readable _readable, short _version) { this.voterId = _readable.readInt(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(voterId); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof Voter)) return false; Voter other = (Voter) obj; if (voterId != other.voterId) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + voterId; return hashCode; } @Override public Voter duplicate() { Voter _duplicate = new Voter(); _duplicate.voterId = voterId; return _duplicate; } @Override public String toString() { return "Voter(" + "voterId=" + voterId + ")"; } public int voterId() { return this.voterId; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public Voter setVoterId(int v) { this.voterId = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaderChangeMessageJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.LeaderChangeMessage.*; public class LeaderChangeMessageJsonConverter { public static LeaderChangeMessage read(JsonNode _node, short _version) { LeaderChangeMessage _object = new LeaderChangeMessage(); JsonNode _versionNode = _node.get("version"); if (_versionNode == null) { throw new RuntimeException("LeaderChangeMessage: unable to locate field 'version', which is mandatory in version " + _version); } else { _object.version = MessageUtil.jsonNodeToShort(_versionNode, "LeaderChangeMessage"); } JsonNode _leaderIdNode = _node.get("leaderId"); if (_leaderIdNode == null) { throw new RuntimeException("LeaderChangeMessage: unable to locate field 'leaderId', which is mandatory in version " + _version); } else { _object.leaderId = MessageUtil.jsonNodeToInt(_leaderIdNode, "LeaderChangeMessage"); } JsonNode _votersNode = _node.get("voters"); if (_votersNode == null) { throw new RuntimeException("LeaderChangeMessage: unable to locate field 'voters', which is mandatory in version " + _version); } else { if (!_votersNode.isArray()) { throw new RuntimeException("LeaderChangeMessage expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Voter> _collection = new ArrayList<Voter>(_votersNode.size()); _object.voters = _collection; for (JsonNode _element : _votersNode) { _collection.add(VoterJsonConverter.read(_element, _version)); } } JsonNode _grantingVotersNode = _node.get("grantingVoters"); if (_grantingVotersNode == null) { throw new RuntimeException("LeaderChangeMessage: unable to locate field 'grantingVoters', which is mandatory in version " + _version); } else { if (!_grantingVotersNode.isArray()) { throw new RuntimeException("LeaderChangeMessage expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Voter> _collection = new ArrayList<Voter>(_grantingVotersNode.size()); _object.grantingVoters = _collection; for (JsonNode _element : _grantingVotersNode) { _collection.add(VoterJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(LeaderChangeMessage _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("version", new ShortNode(_object.version)); _node.set("leaderId", new IntNode(_object.leaderId)); ArrayNode _votersArray = new ArrayNode(JsonNodeFactory.instance); for (Voter _element : _object.voters) { _votersArray.add(VoterJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("voters", _votersArray); ArrayNode _grantingVotersArray = new ArrayNode(JsonNodeFactory.instance); for (Voter _element : _object.grantingVoters) { _grantingVotersArray.add(VoterJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("grantingVoters", _grantingVotersArray); return _node; } public static JsonNode write(LeaderChangeMessage _object, short _version) { return write(_object, _version, true); } public static class VoterJsonConverter { public static Voter read(JsonNode _node, short _version) { Voter _object = new Voter(); JsonNode _voterIdNode = _node.get("voterId"); if (_voterIdNode == null) { throw new RuntimeException("Voter: unable to locate field 'voterId', which is mandatory in version " + _version); } else { _object.voterId = MessageUtil.jsonNodeToInt(_voterIdNode, "Voter"); } return _object; } public static JsonNode write(Voter _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("voterId", new IntNode(_object.voterId)); return _node; } public static JsonNode write(Voter _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaveGroupRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class LeaveGroupRequestData implements ApiMessage { String groupId; String memberId; List<MemberIdentity> members; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("group_id", Type.STRING, "The ID of the group to leave."), new Field("member_id", Type.STRING, "The member ID to remove from the group.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("group_id", Type.STRING, "The ID of the group to leave."), new Field("members", new ArrayOf(MemberIdentity.SCHEMA_3), "List of leaving member identities.") ); public static final Schema SCHEMA_4 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The ID of the group to leave."), new Field("members", new CompactArrayOf(MemberIdentity.SCHEMA_4), "List of leaving member identities."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The ID of the group to leave."), new Field("members", new CompactArrayOf(MemberIdentity.SCHEMA_5), "List of leaving member identities."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 5; public LeaveGroupRequestData(Readable _readable, short _version) { read(_readable, _version); } public LeaveGroupRequestData() { this.groupId = ""; this.memberId = ""; this.members = new ArrayList<MemberIdentity>(0); } @Override public short apiKey() { return 13; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 5; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } if (_version <= 2) { int length; length = _readable.readShort(); if (length < 0) { throw new RuntimeException("non-nullable field memberId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field memberId had invalid length " + length); } else { this.memberId = _readable.readString(length); } } else { this.memberId = ""; } if (_version >= 3) { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field members was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<MemberIdentity> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MemberIdentity(_readable, _version)); } this.members = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field members was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<MemberIdentity> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MemberIdentity(_readable, _version)); } this.members = newCollection; } } } else { this.members = new ArrayList<MemberIdentity>(0); } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(groupId); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version <= 2) { { byte[] _stringBytes = _cache.getSerializedValue(memberId); _writable.writeShort((short) _stringBytes.length); _writable.writeByteArray(_stringBytes); } } else { if (!this.memberId.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default memberId at version " + _version); } } if (_version >= 3) { if (_version >= 4) { _writable.writeUnsignedVarint(members.size() + 1); for (MemberIdentity membersElement : members) { membersElement.write(_writable, _cache, _version); } } else { _writable.writeInt(members.size()); for (MemberIdentity membersElement : members) { membersElement.write(_writable, _cache, _version); } } } else { if (!this.members.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default members at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version <= 2) { { byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'memberId' field is too long to be serialized"); } _cache.cacheSerializedValue(memberId, _stringBytes); _size.addBytes(_stringBytes.length + 2); } } if (_version >= 3) { { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(members.size() + 1)); } else { _size.addBytes(4); } for (MemberIdentity membersElement : members) { membersElement.addSize(_size, _cache, _version); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaveGroupRequestData)) return false; LeaveGroupRequestData other = (LeaveGroupRequestData) obj; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } if (this.memberId == null) { if (other.memberId != null) return false; } else { if (!this.memberId.equals(other.memberId)) return false; } if (this.members == null) { if (other.members != null) return false; } else { if (!this.members.equals(other.members)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode()); hashCode = 31 * hashCode + (members == null ? 0 : members.hashCode()); return hashCode; } @Override public LeaveGroupRequestData duplicate() { LeaveGroupRequestData _duplicate = new LeaveGroupRequestData(); _duplicate.groupId = groupId; _duplicate.memberId = memberId; ArrayList<MemberIdentity> newMembers = new ArrayList<MemberIdentity>(members.size()); for (MemberIdentity _element : members) { newMembers.add(_element.duplicate()); } _duplicate.members = newMembers; return _duplicate; } @Override public String toString() { return "LeaveGroupRequestData(" + "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ", memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'") + ", members=" + MessageUtil.deepToString(members.iterator()) + ")"; } public String groupId() { return this.groupId; } public String memberId() { return this.memberId; } public List<MemberIdentity> members() { return this.members; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaveGroupRequestData setGroupId(String v) { this.groupId = v; return this; } public LeaveGroupRequestData setMemberId(String v) { this.memberId = v; return this; } public LeaveGroupRequestData setMembers(List<MemberIdentity> v) { this.members = v; return this; } public static class MemberIdentity implements Message { String memberId; String groupInstanceId; String reason; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_3 = new Schema( new Field("member_id", Type.STRING, "The member ID to remove from the group."), new Field("group_instance_id", Type.NULLABLE_STRING, "The group instance ID to remove from the group.") ); public static final Schema SCHEMA_4 = new Schema( new Field("member_id", Type.COMPACT_STRING, "The member ID to remove from the group."), new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The group instance ID to remove from the group."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = new Schema( new Field("member_id", Type.COMPACT_STRING, "The member ID to remove from the group."), new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The group instance ID to remove from the group."), new Field("reason", Type.COMPACT_NULLABLE_STRING, "The reason why the member left the group."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, SCHEMA_3, SCHEMA_4, SCHEMA_5 }; public static final short LOWEST_SUPPORTED_VERSION = 3; public static final short HIGHEST_SUPPORTED_VERSION = 5; public MemberIdentity(Readable _readable, short _version) { read(_readable, _version); } public MemberIdentity() { this.memberId = ""; this.groupInstanceId = null; this.reason = null; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 5; } @Override public void read(Readable _readable, short _version) { if (_version > 5) { throw new UnsupportedVersionException("Can't read version " + _version + " of MemberIdentity"); } { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field memberId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field memberId had invalid length " + length); } else { this.memberId = _readable.readString(length); } } { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.groupInstanceId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field groupInstanceId had invalid length " + length); } else { this.groupInstanceId = _readable.readString(length); } } if (_version >= 5) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.reason = null; } else if (length > 0x7fff) { throw new RuntimeException("string field reason had invalid length " + length); } else { this.reason = _readable.readString(length); } } else { this.reason = null; } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of MemberIdentity"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(memberId); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (groupInstanceId == null) { if (_version >= 4) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(groupInstanceId); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 5) { if (reason == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(reason); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 5) { throw new UnsupportedVersionException("Can't size version " + _version + " of MemberIdentity"); } { byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'memberId' field is too long to be serialized"); } _cache.cacheSerializedValue(memberId, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (groupInstanceId == null) { if (_version >= 4) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = groupInstanceId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupInstanceId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupInstanceId, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version >= 5) { if (reason == null) { _size.addBytes(1); } else { byte[] _stringBytes = reason.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'reason' field is too long to be serialized"); } _cache.cacheSerializedValue(reason, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof MemberIdentity)) return false; MemberIdentity other = (MemberIdentity) obj; if (this.memberId == null) { if (other.memberId != null) return false; } else { if (!this.memberId.equals(other.memberId)) return false; } if (this.groupInstanceId == null) { if (other.groupInstanceId != null) return false; } else { if (!this.groupInstanceId.equals(other.groupInstanceId)) return false; } if (this.reason == null) { if (other.reason != null) return false; } else { if (!this.reason.equals(other.reason)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode()); hashCode = 31 * hashCode + (groupInstanceId == null ? 0 : groupInstanceId.hashCode()); hashCode = 31 * hashCode + (reason == null ? 0 : reason.hashCode()); return hashCode; } @Override public MemberIdentity duplicate() { MemberIdentity _duplicate = new MemberIdentity(); _duplicate.memberId = memberId; if (groupInstanceId == null) { _duplicate.groupInstanceId = null; } else { _duplicate.groupInstanceId = groupInstanceId; } if (reason == null) { _duplicate.reason = null; } else { _duplicate.reason = reason; } return _duplicate; } @Override public String toString() { return "MemberIdentity(" + "memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'") + ", groupInstanceId=" + ((groupInstanceId == null) ? "null" : "'" + groupInstanceId.toString() + "'") + ", reason=" + ((reason == null) ? "null" : "'" + reason.toString() + "'") + ")"; } public String memberId() { return this.memberId; } public String groupInstanceId() { return this.groupInstanceId; } public String reason() { return this.reason; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public MemberIdentity setMemberId(String v) { this.memberId = v; return this; } public MemberIdentity setGroupInstanceId(String v) { this.groupInstanceId = v; return this; } public MemberIdentity setReason(String v) { this.reason = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaveGroupRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import static org.apache.kafka.common.message.LeaveGroupRequestData.*; public class LeaveGroupRequestDataJsonConverter { public static LeaveGroupRequestData read(JsonNode _node, short _version) { LeaveGroupRequestData _object = new LeaveGroupRequestData(); JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { throw new RuntimeException("LeaveGroupRequestData: unable to locate field 'groupId', which is mandatory in version " + _version); } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("LeaveGroupRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } JsonNode _memberIdNode = _node.get("memberId"); if (_memberIdNode == null) { if (_version <= 2) { throw new RuntimeException("LeaveGroupRequestData: unable to locate field 'memberId', which is mandatory in version " + _version); } else { _object.memberId = ""; } } else { if (!_memberIdNode.isTextual()) { throw new RuntimeException("LeaveGroupRequestData expected a string type, but got " + _node.getNodeType()); } _object.memberId = _memberIdNode.asText(); } JsonNode _membersNode = _node.get("members"); if (_membersNode == null) { if (_version >= 3) { throw new RuntimeException("LeaveGroupRequestData: unable to locate field 'members', which is mandatory in version " + _version); } else { _object.members = new ArrayList<MemberIdentity>(0); } } else { if (!_membersNode.isArray()) { throw new RuntimeException("LeaveGroupRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<MemberIdentity> _collection = new ArrayList<MemberIdentity>(_membersNode.size()); _object.members = _collection; for (JsonNode _element : _membersNode) { _collection.add(MemberIdentityJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(LeaveGroupRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("groupId", new TextNode(_object.groupId)); if (_version <= 2) { _node.set("memberId", new TextNode(_object.memberId)); } else { if (!_object.memberId.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default memberId at version " + _version); } } if (_version >= 3) { ArrayNode _membersArray = new ArrayNode(JsonNodeFactory.instance); for (MemberIdentity _element : _object.members) { _membersArray.add(MemberIdentityJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("members", _membersArray); } else { if (!_object.members.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default members at version " + _version); } } return _node; } public static JsonNode write(LeaveGroupRequestData _object, short _version) { return write(_object, _version, true); } public static class MemberIdentityJsonConverter { public static MemberIdentity read(JsonNode _node, short _version) { MemberIdentity _object = new MemberIdentity(); if (_version < 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of MemberIdentity"); } JsonNode _memberIdNode = _node.get("memberId"); if (_memberIdNode == null) { throw new RuntimeException("MemberIdentity: unable to locate field 'memberId', which is mandatory in version " + _version); } else { if (!_memberIdNode.isTextual()) { throw new RuntimeException("MemberIdentity expected a string type, but got " + _node.getNodeType()); } _object.memberId = _memberIdNode.asText(); } JsonNode _groupInstanceIdNode = _node.get("groupInstanceId"); if (_groupInstanceIdNode == null) { throw new RuntimeException("MemberIdentity: unable to locate field 'groupInstanceId', which is mandatory in version " + _version); } else { if (_groupInstanceIdNode.isNull()) { _object.groupInstanceId = null; } else { if (!_groupInstanceIdNode.isTextual()) { throw new RuntimeException("MemberIdentity expected a string type, but got " + _node.getNodeType()); } _object.groupInstanceId = _groupInstanceIdNode.asText(); } } JsonNode _reasonNode = _node.get("reason"); if (_reasonNode == null) { if (_version >= 5) { throw new RuntimeException("MemberIdentity: unable to locate field 'reason', which is mandatory in version " + _version); } else { _object.reason = null; } } else { if (_reasonNode.isNull()) { _object.reason = null; } else { if (!_reasonNode.isTextual()) { throw new RuntimeException("MemberIdentity expected a string type, but got " + _node.getNodeType()); } _object.reason = _reasonNode.asText(); } } return _object; } public static JsonNode write(MemberIdentity _object, short _version, boolean _serializeRecords) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of MemberIdentity"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("memberId", new TextNode(_object.memberId)); if (_object.groupInstanceId == null) { _node.set("groupInstanceId", NullNode.instance); } else { _node.set("groupInstanceId", new TextNode(_object.groupInstanceId)); } if (_version >= 5) { if (_object.reason == null) { _node.set("reason", NullNode.instance); } else { _node.set("reason", new TextNode(_object.reason)); } } return _node; } public static JsonNode write(MemberIdentity _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaveGroupResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class LeaveGroupResponseData implements ApiMessage { int throttleTimeMs; short errorCode; List<MemberResponse> members; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_1 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("members", new ArrayOf(MemberResponse.SCHEMA_3), "List of leaving member responses.") ); public static final Schema SCHEMA_4 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("members", new CompactArrayOf(MemberResponse.SCHEMA_4), "List of leaving member responses."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 5; public LeaveGroupResponseData(Readable _readable, short _version) { read(_readable, _version); } public LeaveGroupResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.members = new ArrayList<MemberResponse>(0); } @Override public short apiKey() { return 13; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 5; } @Override public void read(Readable _readable, short _version) { if (_version >= 1) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } this.errorCode = _readable.readShort(); if (_version >= 3) { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field members was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<MemberResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MemberResponse(_readable, _version)); } this.members = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field members was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<MemberResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MemberResponse(_readable, _version)); } this.members = newCollection; } } } else { this.members = new ArrayList<MemberResponse>(0); } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _writable.writeInt(throttleTimeMs); } _writable.writeShort(errorCode); if (_version >= 3) { if (_version >= 4) { _writable.writeUnsignedVarint(members.size() + 1); for (MemberResponse membersElement : members) { membersElement.write(_writable, _cache, _version); } } else { _writable.writeInt(members.size()); for (MemberResponse membersElement : members) { membersElement.write(_writable, _cache, _version); } } } else { if (!this.members.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default members at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _size.addBytes(4); } _size.addBytes(2); if (_version >= 3) { { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(members.size() + 1)); } else { _size.addBytes(4); } for (MemberResponse membersElement : members) { membersElement.addSize(_size, _cache, _version); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof LeaveGroupResponseData)) return false; LeaveGroupResponseData other = (LeaveGroupResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (this.members == null) { if (other.members != null) return false; } else { if (!this.members.equals(other.members)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (members == null ? 0 : members.hashCode()); return hashCode; } @Override public LeaveGroupResponseData duplicate() { LeaveGroupResponseData _duplicate = new LeaveGroupResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; ArrayList<MemberResponse> newMembers = new ArrayList<MemberResponse>(members.size()); for (MemberResponse _element : members) { newMembers.add(_element.duplicate()); } _duplicate.members = newMembers; return _duplicate; } @Override public String toString() { return "LeaveGroupResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", members=" + MessageUtil.deepToString(members.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public List<MemberResponse> members() { return this.members; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public LeaveGroupResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public LeaveGroupResponseData setErrorCode(short v) { this.errorCode = v; return this; } public LeaveGroupResponseData setMembers(List<MemberResponse> v) { this.members = v; return this; } public static class MemberResponse implements Message { String memberId; String groupInstanceId; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_3 = new Schema( new Field("member_id", Type.STRING, "The member ID to remove from the group."), new Field("group_instance_id", Type.NULLABLE_STRING, "The group instance ID to remove from the group."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_4 = new Schema( new Field("member_id", Type.COMPACT_STRING, "The member ID to remove from the group."), new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The group instance ID to remove from the group."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema[] SCHEMAS = new Schema[] { null, null, null, SCHEMA_3, SCHEMA_4, SCHEMA_5 }; public static final short LOWEST_SUPPORTED_VERSION = 3; public static final short HIGHEST_SUPPORTED_VERSION = 5; public MemberResponse(Readable _readable, short _version) { read(_readable, _version); } public MemberResponse() { this.memberId = ""; this.groupInstanceId = ""; this.errorCode = (short) 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 5; } @Override public void read(Readable _readable, short _version) { if (_version > 5) { throw new UnsupportedVersionException("Can't read version " + _version + " of MemberResponse"); } { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field memberId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field memberId had invalid length " + length); } else { this.memberId = _readable.readString(length); } } { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.groupInstanceId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field groupInstanceId had invalid length " + length); } else { this.groupInstanceId = _readable.readString(length); } } this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of MemberResponse"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(memberId); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (groupInstanceId == null) { if (_version >= 4) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(groupInstanceId); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 5) { throw new UnsupportedVersionException("Can't size version " + _version + " of MemberResponse"); } { byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'memberId' field is too long to be serialized"); } _cache.cacheSerializedValue(memberId, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (groupInstanceId == null) { if (_version >= 4) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = groupInstanceId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupInstanceId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupInstanceId, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof MemberResponse)) return false; MemberResponse other = (MemberResponse) obj; if (this.memberId == null) { if (other.memberId != null) return false; } else { if (!this.memberId.equals(other.memberId)) return false; } if (this.groupInstanceId == null) { if (other.groupInstanceId != null) return false; } else { if (!this.groupInstanceId.equals(other.groupInstanceId)) return false; } if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode()); hashCode = 31 * hashCode + (groupInstanceId == null ? 0 : groupInstanceId.hashCode()); hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public MemberResponse duplicate() { MemberResponse _duplicate = new MemberResponse(); _duplicate.memberId = memberId; if (groupInstanceId == null) { _duplicate.groupInstanceId = null; } else { _duplicate.groupInstanceId = groupInstanceId; } _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "MemberResponse(" + "memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'") + ", groupInstanceId=" + ((groupInstanceId == null) ? "null" : "'" + groupInstanceId.toString() + "'") + ", errorCode=" + errorCode + ")"; } public String memberId() { return this.memberId; } public String groupInstanceId() { return this.groupInstanceId; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public MemberResponse setMemberId(String v) { this.memberId = v; return this; } public MemberResponse setGroupInstanceId(String v) { this.groupInstanceId = v; return this; } public MemberResponse setErrorCode(short v) { this.errorCode = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/LeaveGroupResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.LeaveGroupResponseData.*; public class LeaveGroupResponseDataJsonConverter { public static LeaveGroupResponseData read(JsonNode _node, short _version) { LeaveGroupResponseData _object = new LeaveGroupResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 1) { throw new RuntimeException("LeaveGroupResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "LeaveGroupResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("LeaveGroupResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "LeaveGroupResponseData"); } JsonNode _membersNode = _node.get("members"); if (_membersNode == null) { if (_version >= 3) { throw new RuntimeException("LeaveGroupResponseData: unable to locate field 'members', which is mandatory in version " + _version); } else { _object.members = new ArrayList<MemberResponse>(0); } } else { if (!_membersNode.isArray()) { throw new RuntimeException("LeaveGroupResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<MemberResponse> _collection = new ArrayList<MemberResponse>(_membersNode.size()); _object.members = _collection; for (JsonNode _element : _membersNode) { _collection.add(MemberResponseJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(LeaveGroupResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 1) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } _node.set("errorCode", new ShortNode(_object.errorCode)); if (_version >= 3) { ArrayNode _membersArray = new ArrayNode(JsonNodeFactory.instance); for (MemberResponse _element : _object.members) { _membersArray.add(MemberResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("members", _membersArray); } else { if (!_object.members.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default members at version " + _version); } } return _node; } public static JsonNode write(LeaveGroupResponseData _object, short _version) { return write(_object, _version, true); } public static class MemberResponseJsonConverter { public static MemberResponse read(JsonNode _node, short _version) { MemberResponse _object = new MemberResponse(); if (_version < 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of MemberResponse"); } JsonNode _memberIdNode = _node.get("memberId"); if (_memberIdNode == null) { throw new RuntimeException("MemberResponse: unable to locate field 'memberId', which is mandatory in version " + _version); } else { if (!_memberIdNode.isTextual()) { throw new RuntimeException("MemberResponse expected a string type, but got " + _node.getNodeType()); } _object.memberId = _memberIdNode.asText(); } JsonNode _groupInstanceIdNode = _node.get("groupInstanceId"); if (_groupInstanceIdNode == null) { throw new RuntimeException("MemberResponse: unable to locate field 'groupInstanceId', which is mandatory in version " + _version); } else { if (_groupInstanceIdNode.isNull()) { _object.groupInstanceId = null; } else { if (!_groupInstanceIdNode.isTextual()) { throw new RuntimeException("MemberResponse expected a string type, but got " + _node.getNodeType()); } _object.groupInstanceId = _groupInstanceIdNode.asText(); } } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("MemberResponse: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "MemberResponse"); } return _object; } public static JsonNode write(MemberResponse _object, short _version, boolean _serializeRecords) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of MemberResponse"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("memberId", new TextNode(_object.memberId)); if (_object.groupInstanceId == null) { _node.set("groupInstanceId", NullNode.instance); } else { _node.set("groupInstanceId", new TextNode(_object.groupInstanceId)); } _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(MemberResponse _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListGroupsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ListGroupsRequestData implements ApiMessage { List<String> statesFilter; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = new Schema( new Field("states_filter", new CompactArrayOf(Type.COMPACT_STRING), "The states of the groups we want to list. If empty all groups are returned with their state."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public ListGroupsRequestData(Readable _readable, short _version) { read(_readable, _version); } public ListGroupsRequestData() { this.statesFilter = new ArrayList<String>(0); } @Override public short apiKey() { return 16; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field statesFilter was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<String> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field statesFilter element was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field statesFilter element had invalid length " + length); } else { newCollection.add(_readable.readString(length)); } } this.statesFilter = newCollection; } } else { this.statesFilter = new ArrayList<String>(0); } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 4) { _writable.writeUnsignedVarint(statesFilter.size() + 1); for (String statesFilterElement : statesFilter) { { byte[] _stringBytes = _cache.getSerializedValue(statesFilterElement); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } } else { if (!this.statesFilter.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default statesFilter at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 4) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(statesFilter.size() + 1)); for (String statesFilterElement : statesFilter) { byte[] _stringBytes = statesFilterElement.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'statesFilterElement' field is too long to be serialized"); } _cache.cacheSerializedValue(statesFilterElement, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ListGroupsRequestData)) return false; ListGroupsRequestData other = (ListGroupsRequestData) obj; if (this.statesFilter == null) { if (other.statesFilter != null) return false; } else { if (!this.statesFilter.equals(other.statesFilter)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (statesFilter == null ? 0 : statesFilter.hashCode()); return hashCode; } @Override public ListGroupsRequestData duplicate() { ListGroupsRequestData _duplicate = new ListGroupsRequestData(); ArrayList<String> newStatesFilter = new ArrayList<String>(statesFilter.size()); for (String _element : statesFilter) { newStatesFilter.add(_element); } _duplicate.statesFilter = newStatesFilter; return _duplicate; } @Override public String toString() { return "ListGroupsRequestData(" + "statesFilter=" + MessageUtil.deepToString(statesFilter.iterator()) + ")"; } public List<String> statesFilter() { return this.statesFilter; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListGroupsRequestData setStatesFilter(List<String> v) { this.statesFilter = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListGroupsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import static org.apache.kafka.common.message.ListGroupsRequestData.*; public class ListGroupsRequestDataJsonConverter { public static ListGroupsRequestData read(JsonNode _node, short _version) { ListGroupsRequestData _object = new ListGroupsRequestData(); JsonNode _statesFilterNode = _node.get("statesFilter"); if (_statesFilterNode == null) { if (_version >= 4) { throw new RuntimeException("ListGroupsRequestData: unable to locate field 'statesFilter', which is mandatory in version " + _version); } else { _object.statesFilter = new ArrayList<String>(0); } } else { if (!_statesFilterNode.isArray()) { throw new RuntimeException("ListGroupsRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<String> _collection = new ArrayList<String>(_statesFilterNode.size()); _object.statesFilter = _collection; for (JsonNode _element : _statesFilterNode) { if (!_element.isTextual()) { throw new RuntimeException("ListGroupsRequestData element expected a string type, but got " + _node.getNodeType()); } _collection.add(_element.asText()); } } return _object; } public static JsonNode write(ListGroupsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 4) { ArrayNode _statesFilterArray = new ArrayNode(JsonNodeFactory.instance); for (String _element : _object.statesFilter) { _statesFilterArray.add(new TextNode(_element)); } _node.set("statesFilter", _statesFilterArray); } else { if (!_object.statesFilter.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default statesFilter at version " + _version); } } return _node; } public static JsonNode write(ListGroupsRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListGroupsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ListGroupsResponseData implements ApiMessage { int throttleTimeMs; short errorCode; List<ListedGroup> groups; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("groups", new ArrayOf(ListedGroup.SCHEMA_0), "Each group in the response.") ); public static final Schema SCHEMA_1 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("groups", new ArrayOf(ListedGroup.SCHEMA_0), "Each group in the response.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("groups", new CompactArrayOf(ListedGroup.SCHEMA_3), "Each group in the response."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("groups", new CompactArrayOf(ListedGroup.SCHEMA_4), "Each group in the response."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public ListGroupsResponseData(Readable _readable, short _version) { read(_readable, _version); } public ListGroupsResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.groups = new ArrayList<ListedGroup>(0); } @Override public short apiKey() { return 16; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version >= 1) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } this.errorCode = _readable.readShort(); { if (_version >= 3) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field groups was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListedGroup> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListedGroup(_readable, _version)); } this.groups = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field groups was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListedGroup> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListedGroup(_readable, _version)); } this.groups = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _writable.writeInt(throttleTimeMs); } _writable.writeShort(errorCode); if (_version >= 3) { _writable.writeUnsignedVarint(groups.size() + 1); for (ListedGroup groupsElement : groups) { groupsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(groups.size()); for (ListedGroup groupsElement : groups) { groupsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _size.addBytes(4); } _size.addBytes(2); { if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(groups.size() + 1)); } else { _size.addBytes(4); } for (ListedGroup groupsElement : groups) { groupsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ListGroupsResponseData)) return false; ListGroupsResponseData other = (ListGroupsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (this.groups == null) { if (other.groups != null) return false; } else { if (!this.groups.equals(other.groups)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (groups == null ? 0 : groups.hashCode()); return hashCode; } @Override public ListGroupsResponseData duplicate() { ListGroupsResponseData _duplicate = new ListGroupsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; ArrayList<ListedGroup> newGroups = new ArrayList<ListedGroup>(groups.size()); for (ListedGroup _element : groups) { newGroups.add(_element.duplicate()); } _duplicate.groups = newGroups; return _duplicate; } @Override public String toString() { return "ListGroupsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", groups=" + MessageUtil.deepToString(groups.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public List<ListedGroup> groups() { return this.groups; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListGroupsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public ListGroupsResponseData setErrorCode(short v) { this.errorCode = v; return this; } public ListGroupsResponseData setGroups(List<ListedGroup> v) { this.groups = v; return this; } public static class ListedGroup implements Message { String groupId; String protocolType; String groupState; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("group_id", Type.STRING, "The group ID."), new Field("protocol_type", Type.STRING, "The group protocol type.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The group ID."), new Field("protocol_type", Type.COMPACT_STRING, "The group protocol type."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The group ID."), new Field("protocol_type", Type.COMPACT_STRING, "The group protocol type."), new Field("group_state", Type.COMPACT_STRING, "The group state name."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public ListedGroup(Readable _readable, short _version) { read(_readable, _version); } public ListedGroup() { this.groupId = ""; this.protocolType = ""; this.groupState = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version > 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of ListedGroup"); } { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field protocolType was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field protocolType had invalid length " + length); } else { this.protocolType = _readable.readString(length); } } if (_version >= 4) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field groupState was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupState had invalid length " + length); } else { this.groupState = _readable.readString(length); } } else { this.groupState = ""; } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(groupId); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } { byte[] _stringBytes = _cache.getSerializedValue(protocolType); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 4) { { byte[] _stringBytes = _cache.getSerializedValue(groupState); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 4) { throw new UnsupportedVersionException("Can't size version " + _version + " of ListedGroup"); } { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { byte[] _stringBytes = protocolType.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'protocolType' field is too long to be serialized"); } _cache.cacheSerializedValue(protocolType, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version >= 4) { { byte[] _stringBytes = groupState.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupState' field is too long to be serialized"); } _cache.cacheSerializedValue(groupState, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ListedGroup)) return false; ListedGroup other = (ListedGroup) obj; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } if (this.protocolType == null) { if (other.protocolType != null) return false; } else { if (!this.protocolType.equals(other.protocolType)) return false; } if (this.groupState == null) { if (other.groupState != null) return false; } else { if (!this.groupState.equals(other.groupState)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); hashCode = 31 * hashCode + (protocolType == null ? 0 : protocolType.hashCode()); hashCode = 31 * hashCode + (groupState == null ? 0 : groupState.hashCode()); return hashCode; } @Override public ListedGroup duplicate() { ListedGroup _duplicate = new ListedGroup(); _duplicate.groupId = groupId; _duplicate.protocolType = protocolType; _duplicate.groupState = groupState; return _duplicate; } @Override public String toString() { return "ListedGroup(" + "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ", protocolType=" + ((protocolType == null) ? "null" : "'" + protocolType.toString() + "'") + ", groupState=" + ((groupState == null) ? "null" : "'" + groupState.toString() + "'") + ")"; } public String groupId() { return this.groupId; } public String protocolType() { return this.protocolType; } public String groupState() { return this.groupState; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListedGroup setGroupId(String v) { this.groupId = v; return this; } public ListedGroup setProtocolType(String v) { this.protocolType = v; return this; } public ListedGroup setGroupState(String v) { this.groupState = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListGroupsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ListGroupsResponseData.*; public class ListGroupsResponseDataJsonConverter { public static ListGroupsResponseData read(JsonNode _node, short _version) { ListGroupsResponseData _object = new ListGroupsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 1) { throw new RuntimeException("ListGroupsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "ListGroupsResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("ListGroupsResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "ListGroupsResponseData"); } JsonNode _groupsNode = _node.get("groups"); if (_groupsNode == null) { throw new RuntimeException("ListGroupsResponseData: unable to locate field 'groups', which is mandatory in version " + _version); } else { if (!_groupsNode.isArray()) { throw new RuntimeException("ListGroupsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ListedGroup> _collection = new ArrayList<ListedGroup>(_groupsNode.size()); _object.groups = _collection; for (JsonNode _element : _groupsNode) { _collection.add(ListedGroupJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ListGroupsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 1) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } _node.set("errorCode", new ShortNode(_object.errorCode)); ArrayNode _groupsArray = new ArrayNode(JsonNodeFactory.instance); for (ListedGroup _element : _object.groups) { _groupsArray.add(ListedGroupJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("groups", _groupsArray); return _node; } public static JsonNode write(ListGroupsResponseData _object, short _version) { return write(_object, _version, true); } public static class ListedGroupJsonConverter { public static ListedGroup read(JsonNode _node, short _version) { ListedGroup _object = new ListedGroup(); JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { throw new RuntimeException("ListedGroup: unable to locate field 'groupId', which is mandatory in version " + _version); } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("ListedGroup expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } JsonNode _protocolTypeNode = _node.get("protocolType"); if (_protocolTypeNode == null) { throw new RuntimeException("ListedGroup: unable to locate field 'protocolType', which is mandatory in version " + _version); } else { if (!_protocolTypeNode.isTextual()) { throw new RuntimeException("ListedGroup expected a string type, but got " + _node.getNodeType()); } _object.protocolType = _protocolTypeNode.asText(); } JsonNode _groupStateNode = _node.get("groupState"); if (_groupStateNode == null) { if (_version >= 4) { throw new RuntimeException("ListedGroup: unable to locate field 'groupState', which is mandatory in version " + _version); } else { _object.groupState = ""; } } else { if (!_groupStateNode.isTextual()) { throw new RuntimeException("ListedGroup expected a string type, but got " + _node.getNodeType()); } _object.groupState = _groupStateNode.asText(); } return _object; } public static JsonNode write(ListedGroup _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("groupId", new TextNode(_object.groupId)); _node.set("protocolType", new TextNode(_object.protocolType)); if (_version >= 4) { _node.set("groupState", new TextNode(_object.groupState)); } return _node; } public static JsonNode write(ListedGroup _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListOffsetsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ListOffsetsRequestData implements ApiMessage { int replicaId; byte isolationLevel; List<ListOffsetsTopic> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the requestor, or -1 if this request is being made by a normal consumer."), new Field("topics", new ArrayOf(ListOffsetsTopic.SCHEMA_0), "Each topic in the request.") ); public static final Schema SCHEMA_1 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the requestor, or -1 if this request is being made by a normal consumer."), new Field("topics", new ArrayOf(ListOffsetsTopic.SCHEMA_1), "Each topic in the request.") ); public static final Schema SCHEMA_2 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the requestor, or -1 if this request is being made by a normal consumer."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("topics", new ArrayOf(ListOffsetsTopic.SCHEMA_1), "Each topic in the request.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the requestor, or -1 if this request is being made by a normal consumer."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("topics", new ArrayOf(ListOffsetsTopic.SCHEMA_4), "Each topic in the request.") ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the requestor, or -1 if this request is being made by a normal consumer."), new Field("isolation_level", Type.INT8, "This setting controls the visibility of transactional records. Using READ_UNCOMMITTED (isolation_level = 0) makes all records visible. With READ_COMMITTED (isolation_level = 1), non-transactional and COMMITTED transactional records are visible. To be more concrete, READ_COMMITTED returns all data from offsets smaller than the current LSO (last stable offset), and enables the inclusion of the list of aborted transactions in the result, which allows consumers to discard ABORTED transactional records"), new Field("topics", new CompactArrayOf(ListOffsetsTopic.SCHEMA_6), "Each topic in the request."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public ListOffsetsRequestData(Readable _readable, short _version) { read(_readable, _version); } public ListOffsetsRequestData() { this.replicaId = 0; this.isolationLevel = (byte) 0; this.topics = new ArrayList<ListOffsetsTopic>(0); } @Override public short apiKey() { return 2; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { this.replicaId = _readable.readInt(); if (_version >= 2) { this.isolationLevel = _readable.readByte(); } else { this.isolationLevel = (byte) 0; } { if (_version >= 6) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListOffsetsTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListOffsetsTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListOffsetsTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListOffsetsTopic(_readable, _version)); } this.topics = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(replicaId); if (_version >= 2) { _writable.writeByte(isolationLevel); } else { if (this.isolationLevel != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default isolationLevel at version " + _version); } } if (_version >= 6) { _writable.writeUnsignedVarint(topics.size() + 1); for (ListOffsetsTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (ListOffsetsTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); if (_version >= 2) { _size.addBytes(1); } { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (ListOffsetsTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ListOffsetsRequestData)) return false; ListOffsetsRequestData other = (ListOffsetsRequestData) obj; if (replicaId != other.replicaId) return false; if (isolationLevel != other.isolationLevel) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + replicaId; hashCode = 31 * hashCode + isolationLevel; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public ListOffsetsRequestData duplicate() { ListOffsetsRequestData _duplicate = new ListOffsetsRequestData(); _duplicate.replicaId = replicaId; _duplicate.isolationLevel = isolationLevel; ArrayList<ListOffsetsTopic> newTopics = new ArrayList<ListOffsetsTopic>(topics.size()); for (ListOffsetsTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "ListOffsetsRequestData(" + "replicaId=" + replicaId + ", isolationLevel=" + isolationLevel + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int replicaId() { return this.replicaId; } public byte isolationLevel() { return this.isolationLevel; } public List<ListOffsetsTopic> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListOffsetsRequestData setReplicaId(int v) { this.replicaId = v; return this; } public ListOffsetsRequestData setIsolationLevel(byte v) { this.isolationLevel = v; return this; } public ListOffsetsRequestData setTopics(List<ListOffsetsTopic> v) { this.topics = v; return this; } public static class ListOffsetsTopic implements Message { String name; List<ListOffsetsPartition> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(ListOffsetsPartition.SCHEMA_0), "Each partition in the request.") ); public static final Schema SCHEMA_1 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(ListOffsetsPartition.SCHEMA_1), "Each partition in the request.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(ListOffsetsPartition.SCHEMA_4), "Each partition in the request.") ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(ListOffsetsPartition.SCHEMA_6), "Each partition in the request."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public ListOffsetsTopic(Readable _readable, short _version) { read(_readable, _version); } public ListOffsetsTopic() { this.name = ""; this.partitions = new ArrayList<ListOffsetsPartition>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of ListOffsetsTopic"); } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { if (_version >= 6) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListOffsetsPartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListOffsetsPartition(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListOffsetsPartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListOffsetsPartition(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 6) { _writable.writeUnsignedVarint(partitions.size() + 1); for (ListOffsetsPartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (ListOffsetsPartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of ListOffsetsTopic"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (ListOffsetsPartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ListOffsetsTopic)) return false; ListOffsetsTopic other = (ListOffsetsTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public ListOffsetsTopic duplicate() { ListOffsetsTopic _duplicate = new ListOffsetsTopic(); _duplicate.name = name; ArrayList<ListOffsetsPartition> newPartitions = new ArrayList<ListOffsetsPartition>(partitions.size()); for (ListOffsetsPartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "ListOffsetsTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<ListOffsetsPartition> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListOffsetsTopic setName(String v) { this.name = v; return this; } public ListOffsetsTopic setPartitions(List<ListOffsetsPartition> v) { this.partitions = v; return this; } } public static class ListOffsetsPartition implements Message { int partitionIndex; int currentLeaderEpoch; long timestamp; int maxNumOffsets; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("timestamp", Type.INT64, "The current timestamp."), new Field("max_num_offsets", Type.INT32, "The maximum number of offsets to report.") ); public static final Schema SCHEMA_1 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("timestamp", Type.INT64, "The current timestamp.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("current_leader_epoch", Type.INT32, "The current leader epoch."), new Field("timestamp", Type.INT64, "The current timestamp.") ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("current_leader_epoch", Type.INT32, "The current leader epoch."), new Field("timestamp", Type.INT64, "The current timestamp."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public ListOffsetsPartition(Readable _readable, short _version) { read(_readable, _version); } public ListOffsetsPartition() { this.partitionIndex = 0; this.currentLeaderEpoch = -1; this.timestamp = 0L; this.maxNumOffsets = 1; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of ListOffsetsPartition"); } this.partitionIndex = _readable.readInt(); if (_version >= 4) { this.currentLeaderEpoch = _readable.readInt(); } else { this.currentLeaderEpoch = -1; } this.timestamp = _readable.readLong(); if (_version <= 0) { this.maxNumOffsets = _readable.readInt(); } else { this.maxNumOffsets = 1; } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); if (_version >= 4) { _writable.writeInt(currentLeaderEpoch); } _writable.writeLong(timestamp); if (_version <= 0) { _writable.writeInt(maxNumOffsets); } else { if (this.maxNumOffsets != 1) { throw new UnsupportedVersionException("Attempted to write a non-default maxNumOffsets at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of ListOffsetsPartition"); } _size.addBytes(4); if (_version >= 4) { _size.addBytes(4); } _size.addBytes(8); if (_version <= 0) { _size.addBytes(4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ListOffsetsPartition)) return false; ListOffsetsPartition other = (ListOffsetsPartition) obj; if (partitionIndex != other.partitionIndex) return false; if (currentLeaderEpoch != other.currentLeaderEpoch) return false; if (timestamp != other.timestamp) return false; if (maxNumOffsets != other.maxNumOffsets) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + currentLeaderEpoch; hashCode = 31 * hashCode + ((int) (timestamp >> 32) ^ (int) timestamp); hashCode = 31 * hashCode + maxNumOffsets; return hashCode; } @Override public ListOffsetsPartition duplicate() { ListOffsetsPartition _duplicate = new ListOffsetsPartition(); _duplicate.partitionIndex = partitionIndex; _duplicate.currentLeaderEpoch = currentLeaderEpoch; _duplicate.timestamp = timestamp; _duplicate.maxNumOffsets = maxNumOffsets; return _duplicate; } @Override public String toString() { return "ListOffsetsPartition(" + "partitionIndex=" + partitionIndex + ", currentLeaderEpoch=" + currentLeaderEpoch + ", timestamp=" + timestamp + ", maxNumOffsets=" + maxNumOffsets + ")"; } public int partitionIndex() { return this.partitionIndex; } public int currentLeaderEpoch() { return this.currentLeaderEpoch; } public long timestamp() { return this.timestamp; } public int maxNumOffsets() { return this.maxNumOffsets; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListOffsetsPartition setPartitionIndex(int v) { this.partitionIndex = v; return this; } public ListOffsetsPartition setCurrentLeaderEpoch(int v) { this.currentLeaderEpoch = v; return this; } public ListOffsetsPartition setTimestamp(long v) { this.timestamp = v; return this; } public ListOffsetsPartition setMaxNumOffsets(int v) { this.maxNumOffsets = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListOffsetsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ListOffsetsRequestData.*; public class ListOffsetsRequestDataJsonConverter { public static ListOffsetsRequestData read(JsonNode _node, short _version) { ListOffsetsRequestData _object = new ListOffsetsRequestData(); JsonNode _replicaIdNode = _node.get("replicaId"); if (_replicaIdNode == null) { throw new RuntimeException("ListOffsetsRequestData: unable to locate field 'replicaId', which is mandatory in version " + _version); } else { _object.replicaId = MessageUtil.jsonNodeToInt(_replicaIdNode, "ListOffsetsRequestData"); } JsonNode _isolationLevelNode = _node.get("isolationLevel"); if (_isolationLevelNode == null) { if (_version >= 2) { throw new RuntimeException("ListOffsetsRequestData: unable to locate field 'isolationLevel', which is mandatory in version " + _version); } else { _object.isolationLevel = (byte) 0; } } else { _object.isolationLevel = MessageUtil.jsonNodeToByte(_isolationLevelNode, "ListOffsetsRequestData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("ListOffsetsRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("ListOffsetsRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ListOffsetsTopic> _collection = new ArrayList<ListOffsetsTopic>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(ListOffsetsTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ListOffsetsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("replicaId", new IntNode(_object.replicaId)); if (_version >= 2) { _node.set("isolationLevel", new ShortNode(_object.isolationLevel)); } else { if (_object.isolationLevel != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default isolationLevel at version " + _version); } } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (ListOffsetsTopic _element : _object.topics) { _topicsArray.add(ListOffsetsTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(ListOffsetsRequestData _object, short _version) { return write(_object, _version, true); } public static class ListOffsetsPartitionJsonConverter { public static ListOffsetsPartition read(JsonNode _node, short _version) { ListOffsetsPartition _object = new ListOffsetsPartition(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("ListOffsetsPartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "ListOffsetsPartition"); } JsonNode _currentLeaderEpochNode = _node.get("currentLeaderEpoch"); if (_currentLeaderEpochNode == null) { if (_version >= 4) { throw new RuntimeException("ListOffsetsPartition: unable to locate field 'currentLeaderEpoch', which is mandatory in version " + _version); } else { _object.currentLeaderEpoch = -1; } } else { _object.currentLeaderEpoch = MessageUtil.jsonNodeToInt(_currentLeaderEpochNode, "ListOffsetsPartition"); } JsonNode _timestampNode = _node.get("timestamp"); if (_timestampNode == null) { throw new RuntimeException("ListOffsetsPartition: unable to locate field 'timestamp', which is mandatory in version " + _version); } else { _object.timestamp = MessageUtil.jsonNodeToLong(_timestampNode, "ListOffsetsPartition"); } JsonNode _maxNumOffsetsNode = _node.get("maxNumOffsets"); if (_maxNumOffsetsNode == null) { if (_version <= 0) { throw new RuntimeException("ListOffsetsPartition: unable to locate field 'maxNumOffsets', which is mandatory in version " + _version); } else { _object.maxNumOffsets = 1; } } else { _object.maxNumOffsets = MessageUtil.jsonNodeToInt(_maxNumOffsetsNode, "ListOffsetsPartition"); } return _object; } public static JsonNode write(ListOffsetsPartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); if (_version >= 4) { _node.set("currentLeaderEpoch", new IntNode(_object.currentLeaderEpoch)); } _node.set("timestamp", new LongNode(_object.timestamp)); if (_version <= 0) { _node.set("maxNumOffsets", new IntNode(_object.maxNumOffsets)); } else { if (_object.maxNumOffsets != 1) { throw new UnsupportedVersionException("Attempted to write a non-default maxNumOffsets at version " + _version); } } return _node; } public static JsonNode write(ListOffsetsPartition _object, short _version) { return write(_object, _version, true); } } public static class ListOffsetsTopicJsonConverter { public static ListOffsetsTopic read(JsonNode _node, short _version) { ListOffsetsTopic _object = new ListOffsetsTopic(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("ListOffsetsTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("ListOffsetsTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("ListOffsetsTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("ListOffsetsTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ListOffsetsPartition> _collection = new ArrayList<ListOffsetsPartition>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(ListOffsetsPartitionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ListOffsetsTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (ListOffsetsPartition _element : _object.partitions) { _partitionsArray.add(ListOffsetsPartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(ListOffsetsTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListOffsetsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ListOffsetsResponseData implements ApiMessage { int throttleTimeMs; List<ListOffsetsTopicResponse> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topics", new ArrayOf(ListOffsetsTopicResponse.SCHEMA_0), "Each topic in the response.") ); public static final Schema SCHEMA_1 = new Schema( new Field("topics", new ArrayOf(ListOffsetsTopicResponse.SCHEMA_1), "Each topic in the response.") ); public static final Schema SCHEMA_2 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new ArrayOf(ListOffsetsTopicResponse.SCHEMA_1), "Each topic in the response.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new ArrayOf(ListOffsetsTopicResponse.SCHEMA_4), "Each topic in the response.") ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new CompactArrayOf(ListOffsetsTopicResponse.SCHEMA_6), "Each topic in the response."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public ListOffsetsResponseData(Readable _readable, short _version) { read(_readable, _version); } public ListOffsetsResponseData() { this.throttleTimeMs = 0; this.topics = new ArrayList<ListOffsetsTopicResponse>(0); } @Override public short apiKey() { return 2; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version >= 2) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } { if (_version >= 6) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListOffsetsTopicResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListOffsetsTopicResponse(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListOffsetsTopicResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListOffsetsTopicResponse(_readable, _version)); } this.topics = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _writable.writeInt(throttleTimeMs); } if (_version >= 6) { _writable.writeUnsignedVarint(topics.size() + 1); for (ListOffsetsTopicResponse topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (ListOffsetsTopicResponse topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _size.addBytes(4); } { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (ListOffsetsTopicResponse topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ListOffsetsResponseData)) return false; ListOffsetsResponseData other = (ListOffsetsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public ListOffsetsResponseData duplicate() { ListOffsetsResponseData _duplicate = new ListOffsetsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; ArrayList<ListOffsetsTopicResponse> newTopics = new ArrayList<ListOffsetsTopicResponse>(topics.size()); for (ListOffsetsTopicResponse _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "ListOffsetsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public List<ListOffsetsTopicResponse> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListOffsetsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public ListOffsetsResponseData setTopics(List<ListOffsetsTopicResponse> v) { this.topics = v; return this; } public static class ListOffsetsTopicResponse implements Message { String name; List<ListOffsetsPartitionResponse> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name"), new Field("partitions", new ArrayOf(ListOffsetsPartitionResponse.SCHEMA_0), "Each partition in the response.") ); public static final Schema SCHEMA_1 = new Schema( new Field("name", Type.STRING, "The topic name"), new Field("partitions", new ArrayOf(ListOffsetsPartitionResponse.SCHEMA_1), "Each partition in the response.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("name", Type.STRING, "The topic name"), new Field("partitions", new ArrayOf(ListOffsetsPartitionResponse.SCHEMA_4), "Each partition in the response.") ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name"), new Field("partitions", new CompactArrayOf(ListOffsetsPartitionResponse.SCHEMA_6), "Each partition in the response."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public ListOffsetsTopicResponse(Readable _readable, short _version) { read(_readable, _version); } public ListOffsetsTopicResponse() { this.name = ""; this.partitions = new ArrayList<ListOffsetsPartitionResponse>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of ListOffsetsTopicResponse"); } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { if (_version >= 6) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListOffsetsPartitionResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListOffsetsPartitionResponse(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListOffsetsPartitionResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListOffsetsPartitionResponse(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 6) { _writable.writeUnsignedVarint(partitions.size() + 1); for (ListOffsetsPartitionResponse partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (ListOffsetsPartitionResponse partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of ListOffsetsTopicResponse"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (ListOffsetsPartitionResponse partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ListOffsetsTopicResponse)) return false; ListOffsetsTopicResponse other = (ListOffsetsTopicResponse) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public ListOffsetsTopicResponse duplicate() { ListOffsetsTopicResponse _duplicate = new ListOffsetsTopicResponse(); _duplicate.name = name; ArrayList<ListOffsetsPartitionResponse> newPartitions = new ArrayList<ListOffsetsPartitionResponse>(partitions.size()); for (ListOffsetsPartitionResponse _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "ListOffsetsTopicResponse(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<ListOffsetsPartitionResponse> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListOffsetsTopicResponse setName(String v) { this.name = v; return this; } public ListOffsetsTopicResponse setPartitions(List<ListOffsetsPartitionResponse> v) { this.partitions = v; return this; } } public static class ListOffsetsPartitionResponse implements Message { int partitionIndex; short errorCode; List<Long> oldStyleOffsets; long timestamp; long offset; int leaderEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The partition error code, or 0 if there was no error."), new Field("old_style_offsets", new ArrayOf(Type.INT64), "The result offsets.") ); public static final Schema SCHEMA_1 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The partition error code, or 0 if there was no error."), new Field("timestamp", Type.INT64, "The timestamp associated with the returned offset."), new Field("offset", Type.INT64, "The returned offset.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The partition error code, or 0 if there was no error."), new Field("timestamp", Type.INT64, "The timestamp associated with the returned offset."), new Field("offset", Type.INT64, "The returned offset."), new Field("leader_epoch", Type.INT32, "") ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The partition error code, or 0 if there was no error."), new Field("timestamp", Type.INT64, "The timestamp associated with the returned offset."), new Field("offset", Type.INT64, "The returned offset."), new Field("leader_epoch", Type.INT32, ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public ListOffsetsPartitionResponse(Readable _readable, short _version) { read(_readable, _version); } public ListOffsetsPartitionResponse() { this.partitionIndex = 0; this.errorCode = (short) 0; this.oldStyleOffsets = new ArrayList<Long>(0); this.timestamp = -1L; this.offset = -1L; this.leaderEpoch = -1; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of ListOffsetsPartitionResponse"); } this.partitionIndex = _readable.readInt(); this.errorCode = _readable.readShort(); if (_version <= 0) { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field oldStyleOffsets was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Long> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readLong()); } this.oldStyleOffsets = newCollection; } } else { this.oldStyleOffsets = new ArrayList<Long>(0); } if (_version >= 1) { this.timestamp = _readable.readLong(); } else { this.timestamp = -1L; } if (_version >= 1) { this.offset = _readable.readLong(); } else { this.offset = -1L; } if (_version >= 4) { this.leaderEpoch = _readable.readInt(); } else { this.leaderEpoch = -1; } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeShort(errorCode); if (_version <= 0) { _writable.writeInt(oldStyleOffsets.size()); for (Long oldStyleOffsetsElement : oldStyleOffsets) { _writable.writeLong(oldStyleOffsetsElement); } } else { if (!this.oldStyleOffsets.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default oldStyleOffsets at version " + _version); } } if (_version >= 1) { _writable.writeLong(timestamp); } else { if (this.timestamp != -1L) { throw new UnsupportedVersionException("Attempted to write a non-default timestamp at version " + _version); } } if (_version >= 1) { _writable.writeLong(offset); } else { if (this.offset != -1L) { throw new UnsupportedVersionException("Attempted to write a non-default offset at version " + _version); } } if (_version >= 4) { _writable.writeInt(leaderEpoch); } else { if (this.leaderEpoch != -1) { throw new UnsupportedVersionException("Attempted to write a non-default leaderEpoch at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of ListOffsetsPartitionResponse"); } _size.addBytes(4); _size.addBytes(2); if (_version <= 0) { { _size.addBytes(4); _size.addBytes(oldStyleOffsets.size() * 8); } } if (_version >= 1) { _size.addBytes(8); } if (_version >= 1) { _size.addBytes(8); } if (_version >= 4) { _size.addBytes(4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ListOffsetsPartitionResponse)) return false; ListOffsetsPartitionResponse other = (ListOffsetsPartitionResponse) obj; if (partitionIndex != other.partitionIndex) return false; if (errorCode != other.errorCode) return false; if (this.oldStyleOffsets == null) { if (other.oldStyleOffsets != null) return false; } else { if (!this.oldStyleOffsets.equals(other.oldStyleOffsets)) return false; } if (timestamp != other.timestamp) return false; if (offset != other.offset) return false; if (leaderEpoch != other.leaderEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (oldStyleOffsets == null ? 0 : oldStyleOffsets.hashCode()); hashCode = 31 * hashCode + ((int) (timestamp >> 32) ^ (int) timestamp); hashCode = 31 * hashCode + ((int) (offset >> 32) ^ (int) offset); hashCode = 31 * hashCode + leaderEpoch; return hashCode; } @Override public ListOffsetsPartitionResponse duplicate() { ListOffsetsPartitionResponse _duplicate = new ListOffsetsPartitionResponse(); _duplicate.partitionIndex = partitionIndex; _duplicate.errorCode = errorCode; ArrayList<Long> newOldStyleOffsets = new ArrayList<Long>(oldStyleOffsets.size()); for (Long _element : oldStyleOffsets) { newOldStyleOffsets.add(_element); } _duplicate.oldStyleOffsets = newOldStyleOffsets; _duplicate.timestamp = timestamp; _duplicate.offset = offset; _duplicate.leaderEpoch = leaderEpoch; return _duplicate; } @Override public String toString() { return "ListOffsetsPartitionResponse(" + "partitionIndex=" + partitionIndex + ", errorCode=" + errorCode + ", oldStyleOffsets=" + MessageUtil.deepToString(oldStyleOffsets.iterator()) + ", timestamp=" + timestamp + ", offset=" + offset + ", leaderEpoch=" + leaderEpoch + ")"; } public int partitionIndex() { return this.partitionIndex; } public short errorCode() { return this.errorCode; } public List<Long> oldStyleOffsets() { return this.oldStyleOffsets; } public long timestamp() { return this.timestamp; } public long offset() { return this.offset; } public int leaderEpoch() { return this.leaderEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListOffsetsPartitionResponse setPartitionIndex(int v) { this.partitionIndex = v; return this; } public ListOffsetsPartitionResponse setErrorCode(short v) { this.errorCode = v; return this; } public ListOffsetsPartitionResponse setOldStyleOffsets(List<Long> v) { this.oldStyleOffsets = v; return this; } public ListOffsetsPartitionResponse setTimestamp(long v) { this.timestamp = v; return this; } public ListOffsetsPartitionResponse setOffset(long v) { this.offset = v; return this; } public ListOffsetsPartitionResponse setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListOffsetsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ListOffsetsResponseData.*; public class ListOffsetsResponseDataJsonConverter { public static ListOffsetsResponseData read(JsonNode _node, short _version) { ListOffsetsResponseData _object = new ListOffsetsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 2) { throw new RuntimeException("ListOffsetsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "ListOffsetsResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("ListOffsetsResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("ListOffsetsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ListOffsetsTopicResponse> _collection = new ArrayList<ListOffsetsTopicResponse>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(ListOffsetsTopicResponseJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ListOffsetsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 2) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (ListOffsetsTopicResponse _element : _object.topics) { _topicsArray.add(ListOffsetsTopicResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(ListOffsetsResponseData _object, short _version) { return write(_object, _version, true); } public static class ListOffsetsPartitionResponseJsonConverter { public static ListOffsetsPartitionResponse read(JsonNode _node, short _version) { ListOffsetsPartitionResponse _object = new ListOffsetsPartitionResponse(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("ListOffsetsPartitionResponse: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "ListOffsetsPartitionResponse"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("ListOffsetsPartitionResponse: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "ListOffsetsPartitionResponse"); } JsonNode _oldStyleOffsetsNode = _node.get("oldStyleOffsets"); if (_oldStyleOffsetsNode == null) { if (_version <= 0) { throw new RuntimeException("ListOffsetsPartitionResponse: unable to locate field 'oldStyleOffsets', which is mandatory in version " + _version); } else { _object.oldStyleOffsets = new ArrayList<Long>(0); } } else { if (!_oldStyleOffsetsNode.isArray()) { throw new RuntimeException("ListOffsetsPartitionResponse expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Long> _collection = new ArrayList<Long>(_oldStyleOffsetsNode.size()); _object.oldStyleOffsets = _collection; for (JsonNode _element : _oldStyleOffsetsNode) { _collection.add(MessageUtil.jsonNodeToLong(_element, "ListOffsetsPartitionResponse element")); } } JsonNode _timestampNode = _node.get("timestamp"); if (_timestampNode == null) { if (_version >= 1) { throw new RuntimeException("ListOffsetsPartitionResponse: unable to locate field 'timestamp', which is mandatory in version " + _version); } else { _object.timestamp = -1L; } } else { _object.timestamp = MessageUtil.jsonNodeToLong(_timestampNode, "ListOffsetsPartitionResponse"); } JsonNode _offsetNode = _node.get("offset"); if (_offsetNode == null) { if (_version >= 1) { throw new RuntimeException("ListOffsetsPartitionResponse: unable to locate field 'offset', which is mandatory in version " + _version); } else { _object.offset = -1L; } } else { _object.offset = MessageUtil.jsonNodeToLong(_offsetNode, "ListOffsetsPartitionResponse"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { if (_version >= 4) { throw new RuntimeException("ListOffsetsPartitionResponse: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = -1; } } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "ListOffsetsPartitionResponse"); } return _object; } public static JsonNode write(ListOffsetsPartitionResponse _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_version <= 0) { ArrayNode _oldStyleOffsetsArray = new ArrayNode(JsonNodeFactory.instance); for (Long _element : _object.oldStyleOffsets) { _oldStyleOffsetsArray.add(new LongNode(_element)); } _node.set("oldStyleOffsets", _oldStyleOffsetsArray); } else { if (!_object.oldStyleOffsets.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default oldStyleOffsets at version " + _version); } } if (_version >= 1) { _node.set("timestamp", new LongNode(_object.timestamp)); } else { if (_object.timestamp != -1L) { throw new UnsupportedVersionException("Attempted to write a non-default timestamp at version " + _version); } } if (_version >= 1) { _node.set("offset", new LongNode(_object.offset)); } else { if (_object.offset != -1L) { throw new UnsupportedVersionException("Attempted to write a non-default offset at version " + _version); } } if (_version >= 4) { _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); } else { if (_object.leaderEpoch != -1) { throw new UnsupportedVersionException("Attempted to write a non-default leaderEpoch at version " + _version); } } return _node; } public static JsonNode write(ListOffsetsPartitionResponse _object, short _version) { return write(_object, _version, true); } } public static class ListOffsetsTopicResponseJsonConverter { public static ListOffsetsTopicResponse read(JsonNode _node, short _version) { ListOffsetsTopicResponse _object = new ListOffsetsTopicResponse(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("ListOffsetsTopicResponse: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("ListOffsetsTopicResponse expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("ListOffsetsTopicResponse: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("ListOffsetsTopicResponse expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ListOffsetsPartitionResponse> _collection = new ArrayList<ListOffsetsPartitionResponse>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(ListOffsetsPartitionResponseJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ListOffsetsTopicResponse _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (ListOffsetsPartitionResponse _element : _object.partitions) { _partitionsArray.add(ListOffsetsPartitionResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(ListOffsetsTopicResponse _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListPartitionReassignmentsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ListPartitionReassignmentsRequestData implements ApiMessage { int timeoutMs; List<ListPartitionReassignmentsTopics> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("timeout_ms", Type.INT32, "The time in ms to wait for the request to complete."), new Field("topics", CompactArrayOf.nullable(ListPartitionReassignmentsTopics.SCHEMA_0), "The topics to list partition reassignments for, or null to list everything."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ListPartitionReassignmentsRequestData(Readable _readable, short _version) { read(_readable, _version); } public ListPartitionReassignmentsRequestData() { this.timeoutMs = 60000; this.topics = null; } @Override public short apiKey() { return 46; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.timeoutMs = _readable.readInt(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { this.topics = null; } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ListPartitionReassignmentsTopics> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ListPartitionReassignmentsTopics(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(timeoutMs); if (topics == null) { _writable.writeUnsignedVarint(0); } else { _writable.writeUnsignedVarint(topics.size() + 1); for (ListPartitionReassignmentsTopics topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); if (topics == null) { _size.addBytes(1); } else { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (ListPartitionReassignmentsTopics topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ListPartitionReassignmentsRequestData)) return false; ListPartitionReassignmentsRequestData other = (ListPartitionReassignmentsRequestData) obj; if (timeoutMs != other.timeoutMs) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + timeoutMs; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public ListPartitionReassignmentsRequestData duplicate() { ListPartitionReassignmentsRequestData _duplicate = new ListPartitionReassignmentsRequestData(); _duplicate.timeoutMs = timeoutMs; if (topics == null) { _duplicate.topics = null; } else { ArrayList<ListPartitionReassignmentsTopics> newTopics = new ArrayList<ListPartitionReassignmentsTopics>(topics.size()); for (ListPartitionReassignmentsTopics _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; } return _duplicate; } @Override public String toString() { return "ListPartitionReassignmentsRequestData(" + "timeoutMs=" + timeoutMs + ", topics=" + ((topics == null) ? "null" : MessageUtil.deepToString(topics.iterator())) + ")"; } public int timeoutMs() { return this.timeoutMs; } public List<ListPartitionReassignmentsTopics> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListPartitionReassignmentsRequestData setTimeoutMs(int v) { this.timeoutMs = v; return this; } public ListPartitionReassignmentsRequestData setTopics(List<ListPartitionReassignmentsTopics> v) { this.topics = v; return this; } public static class ListPartitionReassignmentsTopics implements Message { String name; List<Integer> partitionIndexes; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name"), new Field("partition_indexes", new CompactArrayOf(Type.INT32), "The partitions to list partition reassignments for."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ListPartitionReassignmentsTopics(Readable _readable, short _version) { read(_readable, _version); } public ListPartitionReassignmentsTopics() { this.name = ""; this.partitionIndexes = new ArrayList<Integer>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of ListPartitionReassignmentsTopics"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionIndexes was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.partitionIndexes = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(partitionIndexes.size() + 1); for (Integer partitionIndexesElement : partitionIndexes) { _writable.writeInt(partitionIndexesElement); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of ListPartitionReassignmentsTopics"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionIndexes.size() + 1)); _size.addBytes(partitionIndexes.size() * 4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ListPartitionReassignmentsTopics)) return false; ListPartitionReassignmentsTopics other = (ListPartitionReassignmentsTopics) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitionIndexes == null) { if (other.partitionIndexes != null) return false; } else { if (!this.partitionIndexes.equals(other.partitionIndexes)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitionIndexes == null ? 0 : partitionIndexes.hashCode()); return hashCode; } @Override public ListPartitionReassignmentsTopics duplicate() { ListPartitionReassignmentsTopics _duplicate = new ListPartitionReassignmentsTopics(); _duplicate.name = name; ArrayList<Integer> newPartitionIndexes = new ArrayList<Integer>(partitionIndexes.size()); for (Integer _element : partitionIndexes) { newPartitionIndexes.add(_element); } _duplicate.partitionIndexes = newPartitionIndexes; return _duplicate; } @Override public String toString() { return "ListPartitionReassignmentsTopics(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitionIndexes=" + MessageUtil.deepToString(partitionIndexes.iterator()) + ")"; } public String name() { return this.name; } public List<Integer> partitionIndexes() { return this.partitionIndexes; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListPartitionReassignmentsTopics setName(String v) { this.name = v; return this; } public ListPartitionReassignmentsTopics setPartitionIndexes(List<Integer> v) { this.partitionIndexes = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListPartitionReassignmentsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ListPartitionReassignmentsRequestData.*; public class ListPartitionReassignmentsRequestDataJsonConverter { public static ListPartitionReassignmentsRequestData read(JsonNode _node, short _version) { ListPartitionReassignmentsRequestData _object = new ListPartitionReassignmentsRequestData(); JsonNode _timeoutMsNode = _node.get("timeoutMs"); if (_timeoutMsNode == null) { throw new RuntimeException("ListPartitionReassignmentsRequestData: unable to locate field 'timeoutMs', which is mandatory in version " + _version); } else { _object.timeoutMs = MessageUtil.jsonNodeToInt(_timeoutMsNode, "ListPartitionReassignmentsRequestData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("ListPartitionReassignmentsRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (_topicsNode.isNull()) { _object.topics = null; } else { if (!_topicsNode.isArray()) { throw new RuntimeException("ListPartitionReassignmentsRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ListPartitionReassignmentsTopics> _collection = new ArrayList<ListPartitionReassignmentsTopics>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(ListPartitionReassignmentsTopicsJsonConverter.read(_element, _version)); } } } return _object; } public static JsonNode write(ListPartitionReassignmentsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("timeoutMs", new IntNode(_object.timeoutMs)); if (_object.topics == null) { _node.set("topics", NullNode.instance); } else { ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (ListPartitionReassignmentsTopics _element : _object.topics) { _topicsArray.add(ListPartitionReassignmentsTopicsJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); } return _node; } public static JsonNode write(ListPartitionReassignmentsRequestData _object, short _version) { return write(_object, _version, true); } public static class ListPartitionReassignmentsTopicsJsonConverter { public static ListPartitionReassignmentsTopics read(JsonNode _node, short _version) { ListPartitionReassignmentsTopics _object = new ListPartitionReassignmentsTopics(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("ListPartitionReassignmentsTopics: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("ListPartitionReassignmentsTopics expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionIndexesNode = _node.get("partitionIndexes"); if (_partitionIndexesNode == null) { throw new RuntimeException("ListPartitionReassignmentsTopics: unable to locate field 'partitionIndexes', which is mandatory in version " + _version); } else { if (!_partitionIndexesNode.isArray()) { throw new RuntimeException("ListPartitionReassignmentsTopics expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_partitionIndexesNode.size()); _object.partitionIndexes = _collection; for (JsonNode _element : _partitionIndexesNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "ListPartitionReassignmentsTopics element")); } } return _object; } public static JsonNode write(ListPartitionReassignmentsTopics _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionIndexesArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.partitionIndexes) { _partitionIndexesArray.add(new IntNode(_element)); } _node.set("partitionIndexes", _partitionIndexesArray); return _node; } public static JsonNode write(ListPartitionReassignmentsTopics _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListPartitionReassignmentsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ListPartitionReassignmentsResponseData implements ApiMessage { int throttleTimeMs; short errorCode; String errorMessage; List<OngoingTopicReassignment> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no error"), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The top-level error message, or null if there was no error."), new Field("topics", new CompactArrayOf(OngoingTopicReassignment.SCHEMA_0), "The ongoing reassignments for each topic."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ListPartitionReassignmentsResponseData(Readable _readable, short _version) { read(_readable, _version); } public ListPartitionReassignmentsResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.errorMessage = ""; this.topics = new ArrayList<OngoingTopicReassignment>(0); } @Override public short apiKey() { return 46; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OngoingTopicReassignment> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OngoingTopicReassignment(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); if (errorMessage == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(topics.size() + 1); for (OngoingTopicReassignment topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); if (errorMessage == null) { _size.addBytes(1); } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (OngoingTopicReassignment topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ListPartitionReassignmentsResponseData)) return false; ListPartitionReassignmentsResponseData other = (ListPartitionReassignmentsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public ListPartitionReassignmentsResponseData duplicate() { ListPartitionReassignmentsResponseData _duplicate = new ListPartitionReassignmentsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } ArrayList<OngoingTopicReassignment> newTopics = new ArrayList<OngoingTopicReassignment>(topics.size()); for (OngoingTopicReassignment _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "ListPartitionReassignmentsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public String errorMessage() { return this.errorMessage; } public List<OngoingTopicReassignment> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListPartitionReassignmentsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public ListPartitionReassignmentsResponseData setErrorCode(short v) { this.errorCode = v; return this; } public ListPartitionReassignmentsResponseData setErrorMessage(String v) { this.errorMessage = v; return this; } public ListPartitionReassignmentsResponseData setTopics(List<OngoingTopicReassignment> v) { this.topics = v; return this; } public static class OngoingTopicReassignment implements Message { String name; List<OngoingPartitionReassignment> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(OngoingPartitionReassignment.SCHEMA_0), "The ongoing reassignments for each partition."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public OngoingTopicReassignment(Readable _readable, short _version) { read(_readable, _version); } public OngoingTopicReassignment() { this.name = ""; this.partitions = new ArrayList<OngoingPartitionReassignment>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of OngoingTopicReassignment"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OngoingPartitionReassignment> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OngoingPartitionReassignment(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(partitions.size() + 1); for (OngoingPartitionReassignment partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of OngoingTopicReassignment"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); for (OngoingPartitionReassignment partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof OngoingTopicReassignment)) return false; OngoingTopicReassignment other = (OngoingTopicReassignment) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public OngoingTopicReassignment duplicate() { OngoingTopicReassignment _duplicate = new OngoingTopicReassignment(); _duplicate.name = name; ArrayList<OngoingPartitionReassignment> newPartitions = new ArrayList<OngoingPartitionReassignment>(partitions.size()); for (OngoingPartitionReassignment _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "OngoingTopicReassignment(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<OngoingPartitionReassignment> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OngoingTopicReassignment setName(String v) { this.name = v; return this; } public OngoingTopicReassignment setPartitions(List<OngoingPartitionReassignment> v) { this.partitions = v; return this; } } public static class OngoingPartitionReassignment implements Message { int partitionIndex; List<Integer> replicas; List<Integer> addingReplicas; List<Integer> removingReplicas; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The index of the partition."), new Field("replicas", new CompactArrayOf(Type.INT32), "The current replica set."), new Field("adding_replicas", new CompactArrayOf(Type.INT32), "The set of replicas we are currently adding."), new Field("removing_replicas", new CompactArrayOf(Type.INT32), "The set of replicas we are currently removing."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public OngoingPartitionReassignment(Readable _readable, short _version) { read(_readable, _version); } public OngoingPartitionReassignment() { this.partitionIndex = 0; this.replicas = new ArrayList<Integer>(0); this.addingReplicas = new ArrayList<Integer>(0); this.removingReplicas = new ArrayList<Integer>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of OngoingPartitionReassignment"); } this.partitionIndex = _readable.readInt(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field replicas was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.replicas = newCollection; } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field addingReplicas was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.addingReplicas = newCollection; } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field removingReplicas was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.removingReplicas = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeUnsignedVarint(replicas.size() + 1); for (Integer replicasElement : replicas) { _writable.writeInt(replicasElement); } _writable.writeUnsignedVarint(addingReplicas.size() + 1); for (Integer addingReplicasElement : addingReplicas) { _writable.writeInt(addingReplicasElement); } _writable.writeUnsignedVarint(removingReplicas.size() + 1); for (Integer removingReplicasElement : removingReplicas) { _writable.writeInt(removingReplicasElement); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of OngoingPartitionReassignment"); } _size.addBytes(4); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(replicas.size() + 1)); _size.addBytes(replicas.size() * 4); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(addingReplicas.size() + 1)); _size.addBytes(addingReplicas.size() * 4); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(removingReplicas.size() + 1)); _size.addBytes(removingReplicas.size() * 4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof OngoingPartitionReassignment)) return false; OngoingPartitionReassignment other = (OngoingPartitionReassignment) obj; if (partitionIndex != other.partitionIndex) return false; if (this.replicas == null) { if (other.replicas != null) return false; } else { if (!this.replicas.equals(other.replicas)) return false; } if (this.addingReplicas == null) { if (other.addingReplicas != null) return false; } else { if (!this.addingReplicas.equals(other.addingReplicas)) return false; } if (this.removingReplicas == null) { if (other.removingReplicas != null) return false; } else { if (!this.removingReplicas.equals(other.removingReplicas)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + (replicas == null ? 0 : replicas.hashCode()); hashCode = 31 * hashCode + (addingReplicas == null ? 0 : addingReplicas.hashCode()); hashCode = 31 * hashCode + (removingReplicas == null ? 0 : removingReplicas.hashCode()); return hashCode; } @Override public OngoingPartitionReassignment duplicate() { OngoingPartitionReassignment _duplicate = new OngoingPartitionReassignment(); _duplicate.partitionIndex = partitionIndex; ArrayList<Integer> newReplicas = new ArrayList<Integer>(replicas.size()); for (Integer _element : replicas) { newReplicas.add(_element); } _duplicate.replicas = newReplicas; ArrayList<Integer> newAddingReplicas = new ArrayList<Integer>(addingReplicas.size()); for (Integer _element : addingReplicas) { newAddingReplicas.add(_element); } _duplicate.addingReplicas = newAddingReplicas; ArrayList<Integer> newRemovingReplicas = new ArrayList<Integer>(removingReplicas.size()); for (Integer _element : removingReplicas) { newRemovingReplicas.add(_element); } _duplicate.removingReplicas = newRemovingReplicas; return _duplicate; } @Override public String toString() { return "OngoingPartitionReassignment(" + "partitionIndex=" + partitionIndex + ", replicas=" + MessageUtil.deepToString(replicas.iterator()) + ", addingReplicas=" + MessageUtil.deepToString(addingReplicas.iterator()) + ", removingReplicas=" + MessageUtil.deepToString(removingReplicas.iterator()) + ")"; } public int partitionIndex() { return this.partitionIndex; } public List<Integer> replicas() { return this.replicas; } public List<Integer> addingReplicas() { return this.addingReplicas; } public List<Integer> removingReplicas() { return this.removingReplicas; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OngoingPartitionReassignment setPartitionIndex(int v) { this.partitionIndex = v; return this; } public OngoingPartitionReassignment setReplicas(List<Integer> v) { this.replicas = v; return this; } public OngoingPartitionReassignment setAddingReplicas(List<Integer> v) { this.addingReplicas = v; return this; } public OngoingPartitionReassignment setRemovingReplicas(List<Integer> v) { this.removingReplicas = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListPartitionReassignmentsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.*; public class ListPartitionReassignmentsResponseDataJsonConverter { public static ListPartitionReassignmentsResponseData read(JsonNode _node, short _version) { ListPartitionReassignmentsResponseData _object = new ListPartitionReassignmentsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("ListPartitionReassignmentsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "ListPartitionReassignmentsResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("ListPartitionReassignmentsResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "ListPartitionReassignmentsResponseData"); } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { throw new RuntimeException("ListPartitionReassignmentsResponseData: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("ListPartitionReassignmentsResponseData expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("ListPartitionReassignmentsResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("ListPartitionReassignmentsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OngoingTopicReassignment> _collection = new ArrayList<OngoingTopicReassignment>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OngoingTopicReassignmentJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ListPartitionReassignmentsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OngoingTopicReassignment _element : _object.topics) { _topicsArray.add(OngoingTopicReassignmentJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(ListPartitionReassignmentsResponseData _object, short _version) { return write(_object, _version, true); } public static class OngoingPartitionReassignmentJsonConverter { public static OngoingPartitionReassignment read(JsonNode _node, short _version) { OngoingPartitionReassignment _object = new OngoingPartitionReassignment(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("OngoingPartitionReassignment: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "OngoingPartitionReassignment"); } JsonNode _replicasNode = _node.get("replicas"); if (_replicasNode == null) { throw new RuntimeException("OngoingPartitionReassignment: unable to locate field 'replicas', which is mandatory in version " + _version); } else { if (!_replicasNode.isArray()) { throw new RuntimeException("OngoingPartitionReassignment expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_replicasNode.size()); _object.replicas = _collection; for (JsonNode _element : _replicasNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "OngoingPartitionReassignment element")); } } JsonNode _addingReplicasNode = _node.get("addingReplicas"); if (_addingReplicasNode == null) { throw new RuntimeException("OngoingPartitionReassignment: unable to locate field 'addingReplicas', which is mandatory in version " + _version); } else { if (!_addingReplicasNode.isArray()) { throw new RuntimeException("OngoingPartitionReassignment expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_addingReplicasNode.size()); _object.addingReplicas = _collection; for (JsonNode _element : _addingReplicasNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "OngoingPartitionReassignment element")); } } JsonNode _removingReplicasNode = _node.get("removingReplicas"); if (_removingReplicasNode == null) { throw new RuntimeException("OngoingPartitionReassignment: unable to locate field 'removingReplicas', which is mandatory in version " + _version); } else { if (!_removingReplicasNode.isArray()) { throw new RuntimeException("OngoingPartitionReassignment expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_removingReplicasNode.size()); _object.removingReplicas = _collection; for (JsonNode _element : _removingReplicasNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "OngoingPartitionReassignment element")); } } return _object; } public static JsonNode write(OngoingPartitionReassignment _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); ArrayNode _replicasArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.replicas) { _replicasArray.add(new IntNode(_element)); } _node.set("replicas", _replicasArray); ArrayNode _addingReplicasArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.addingReplicas) { _addingReplicasArray.add(new IntNode(_element)); } _node.set("addingReplicas", _addingReplicasArray); ArrayNode _removingReplicasArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.removingReplicas) { _removingReplicasArray.add(new IntNode(_element)); } _node.set("removingReplicas", _removingReplicasArray); return _node; } public static JsonNode write(OngoingPartitionReassignment _object, short _version) { return write(_object, _version, true); } } public static class OngoingTopicReassignmentJsonConverter { public static OngoingTopicReassignment read(JsonNode _node, short _version) { OngoingTopicReassignment _object = new OngoingTopicReassignment(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("OngoingTopicReassignment: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("OngoingTopicReassignment expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("OngoingTopicReassignment: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("OngoingTopicReassignment expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OngoingPartitionReassignment> _collection = new ArrayList<OngoingPartitionReassignment>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(OngoingPartitionReassignmentJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OngoingTopicReassignment _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (OngoingPartitionReassignment _element : _object.partitions) { _partitionsArray.add(OngoingPartitionReassignmentJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(OngoingTopicReassignment _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListTransactionsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ListTransactionsRequestData implements ApiMessage { List<String> stateFilters; List<Long> producerIdFilters; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("state_filters", new CompactArrayOf(Type.COMPACT_STRING), "The transaction states to filter by: if empty, all transactions are returned; if non-empty, then only transactions matching one of the filtered states will be returned"), new Field("producer_id_filters", new CompactArrayOf(Type.INT64), "The producerIds to filter by: if empty, all transactions will be returned; if non-empty, only transactions which match one of the filtered producerIds will be returned"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ListTransactionsRequestData(Readable _readable, short _version) { read(_readable, _version); } public ListTransactionsRequestData() { this.stateFilters = new ArrayList<String>(0); this.producerIdFilters = new ArrayList<Long>(0); } @Override public short apiKey() { return 66; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field stateFilters was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<String> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field stateFilters element was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field stateFilters element had invalid length " + length); } else { newCollection.add(_readable.readString(length)); } } this.stateFilters = newCollection; } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field producerIdFilters was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Long> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readLong()); } this.producerIdFilters = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeUnsignedVarint(stateFilters.size() + 1); for (String stateFiltersElement : stateFilters) { { byte[] _stringBytes = _cache.getSerializedValue(stateFiltersElement); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } _writable.writeUnsignedVarint(producerIdFilters.size() + 1); for (Long producerIdFiltersElement : producerIdFilters) { _writable.writeLong(producerIdFiltersElement); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(stateFilters.size() + 1)); for (String stateFiltersElement : stateFilters) { byte[] _stringBytes = stateFiltersElement.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'stateFiltersElement' field is too long to be serialized"); } _cache.cacheSerializedValue(stateFiltersElement, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(producerIdFilters.size() + 1)); _size.addBytes(producerIdFilters.size() * 8); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ListTransactionsRequestData)) return false; ListTransactionsRequestData other = (ListTransactionsRequestData) obj; if (this.stateFilters == null) { if (other.stateFilters != null) return false; } else { if (!this.stateFilters.equals(other.stateFilters)) return false; } if (this.producerIdFilters == null) { if (other.producerIdFilters != null) return false; } else { if (!this.producerIdFilters.equals(other.producerIdFilters)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (stateFilters == null ? 0 : stateFilters.hashCode()); hashCode = 31 * hashCode + (producerIdFilters == null ? 0 : producerIdFilters.hashCode()); return hashCode; } @Override public ListTransactionsRequestData duplicate() { ListTransactionsRequestData _duplicate = new ListTransactionsRequestData(); ArrayList<String> newStateFilters = new ArrayList<String>(stateFilters.size()); for (String _element : stateFilters) { newStateFilters.add(_element); } _duplicate.stateFilters = newStateFilters; ArrayList<Long> newProducerIdFilters = new ArrayList<Long>(producerIdFilters.size()); for (Long _element : producerIdFilters) { newProducerIdFilters.add(_element); } _duplicate.producerIdFilters = newProducerIdFilters; return _duplicate; } @Override public String toString() { return "ListTransactionsRequestData(" + "stateFilters=" + MessageUtil.deepToString(stateFilters.iterator()) + ", producerIdFilters=" + MessageUtil.deepToString(producerIdFilters.iterator()) + ")"; } public List<String> stateFilters() { return this.stateFilters; } public List<Long> producerIdFilters() { return this.producerIdFilters; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListTransactionsRequestData setStateFilters(List<String> v) { this.stateFilters = v; return this; } public ListTransactionsRequestData setProducerIdFilters(List<Long> v) { this.producerIdFilters = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListTransactionsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ListTransactionsRequestData.*; public class ListTransactionsRequestDataJsonConverter { public static ListTransactionsRequestData read(JsonNode _node, short _version) { ListTransactionsRequestData _object = new ListTransactionsRequestData(); JsonNode _stateFiltersNode = _node.get("stateFilters"); if (_stateFiltersNode == null) { throw new RuntimeException("ListTransactionsRequestData: unable to locate field 'stateFilters', which is mandatory in version " + _version); } else { if (!_stateFiltersNode.isArray()) { throw new RuntimeException("ListTransactionsRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<String> _collection = new ArrayList<String>(_stateFiltersNode.size()); _object.stateFilters = _collection; for (JsonNode _element : _stateFiltersNode) { if (!_element.isTextual()) { throw new RuntimeException("ListTransactionsRequestData element expected a string type, but got " + _node.getNodeType()); } _collection.add(_element.asText()); } } JsonNode _producerIdFiltersNode = _node.get("producerIdFilters"); if (_producerIdFiltersNode == null) { throw new RuntimeException("ListTransactionsRequestData: unable to locate field 'producerIdFilters', which is mandatory in version " + _version); } else { if (!_producerIdFiltersNode.isArray()) { throw new RuntimeException("ListTransactionsRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Long> _collection = new ArrayList<Long>(_producerIdFiltersNode.size()); _object.producerIdFilters = _collection; for (JsonNode _element : _producerIdFiltersNode) { _collection.add(MessageUtil.jsonNodeToLong(_element, "ListTransactionsRequestData element")); } } return _object; } public static JsonNode write(ListTransactionsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); ArrayNode _stateFiltersArray = new ArrayNode(JsonNodeFactory.instance); for (String _element : _object.stateFilters) { _stateFiltersArray.add(new TextNode(_element)); } _node.set("stateFilters", _stateFiltersArray); ArrayNode _producerIdFiltersArray = new ArrayNode(JsonNodeFactory.instance); for (Long _element : _object.producerIdFilters) { _producerIdFiltersArray.add(new LongNode(_element)); } _node.set("producerIdFilters", _producerIdFiltersArray); return _node; } public static JsonNode write(ListTransactionsRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListTransactionsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ListTransactionsResponseData implements ApiMessage { int throttleTimeMs; short errorCode; List<String> unknownStateFilters; List<TransactionState> transactionStates; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, ""), new Field("unknown_state_filters", new CompactArrayOf(Type.COMPACT_STRING), "Set of state filters provided in the request which were unknown to the transaction coordinator"), new Field("transaction_states", new CompactArrayOf(TransactionState.SCHEMA_0), ""), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ListTransactionsResponseData(Readable _readable, short _version) { read(_readable, _version); } public ListTransactionsResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.unknownStateFilters = new ArrayList<String>(0); this.transactionStates = new ArrayList<TransactionState>(0); } @Override public short apiKey() { return 66; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field unknownStateFilters was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<String> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field unknownStateFilters element was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field unknownStateFilters element had invalid length " + length); } else { newCollection.add(_readable.readString(length)); } } this.unknownStateFilters = newCollection; } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field transactionStates was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<TransactionState> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TransactionState(_readable, _version)); } this.transactionStates = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); _writable.writeUnsignedVarint(unknownStateFilters.size() + 1); for (String unknownStateFiltersElement : unknownStateFilters) { { byte[] _stringBytes = _cache.getSerializedValue(unknownStateFiltersElement); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } _writable.writeUnsignedVarint(transactionStates.size() + 1); for (TransactionState transactionStatesElement : transactionStates) { transactionStatesElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(unknownStateFilters.size() + 1)); for (String unknownStateFiltersElement : unknownStateFilters) { byte[] _stringBytes = unknownStateFiltersElement.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'unknownStateFiltersElement' field is too long to be serialized"); } _cache.cacheSerializedValue(unknownStateFiltersElement, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(transactionStates.size() + 1)); for (TransactionState transactionStatesElement : transactionStates) { transactionStatesElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ListTransactionsResponseData)) return false; ListTransactionsResponseData other = (ListTransactionsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (this.unknownStateFilters == null) { if (other.unknownStateFilters != null) return false; } else { if (!this.unknownStateFilters.equals(other.unknownStateFilters)) return false; } if (this.transactionStates == null) { if (other.transactionStates != null) return false; } else { if (!this.transactionStates.equals(other.transactionStates)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (unknownStateFilters == null ? 0 : unknownStateFilters.hashCode()); hashCode = 31 * hashCode + (transactionStates == null ? 0 : transactionStates.hashCode()); return hashCode; } @Override public ListTransactionsResponseData duplicate() { ListTransactionsResponseData _duplicate = new ListTransactionsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; ArrayList<String> newUnknownStateFilters = new ArrayList<String>(unknownStateFilters.size()); for (String _element : unknownStateFilters) { newUnknownStateFilters.add(_element); } _duplicate.unknownStateFilters = newUnknownStateFilters; ArrayList<TransactionState> newTransactionStates = new ArrayList<TransactionState>(transactionStates.size()); for (TransactionState _element : transactionStates) { newTransactionStates.add(_element.duplicate()); } _duplicate.transactionStates = newTransactionStates; return _duplicate; } @Override public String toString() { return "ListTransactionsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", unknownStateFilters=" + MessageUtil.deepToString(unknownStateFilters.iterator()) + ", transactionStates=" + MessageUtil.deepToString(transactionStates.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public List<String> unknownStateFilters() { return this.unknownStateFilters; } public List<TransactionState> transactionStates() { return this.transactionStates; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ListTransactionsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public ListTransactionsResponseData setErrorCode(short v) { this.errorCode = v; return this; } public ListTransactionsResponseData setUnknownStateFilters(List<String> v) { this.unknownStateFilters = v; return this; } public ListTransactionsResponseData setTransactionStates(List<TransactionState> v) { this.transactionStates = v; return this; } public static class TransactionState implements Message { String transactionalId; long producerId; String transactionState; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("transactional_id", Type.COMPACT_STRING, ""), new Field("producer_id", Type.INT64, ""), new Field("transaction_state", Type.COMPACT_STRING, "The current transaction state of the producer"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public TransactionState(Readable _readable, short _version) { read(_readable, _version); } public TransactionState() { this.transactionalId = ""; this.producerId = 0L; this.transactionState = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of TransactionState"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field transactionalId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field transactionalId had invalid length " + length); } else { this.transactionalId = _readable.readString(length); } } this.producerId = _readable.readLong(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field transactionState was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field transactionState had invalid length " + length); } else { this.transactionState = _readable.readString(length); } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(transactionalId); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeLong(producerId); { byte[] _stringBytes = _cache.getSerializedValue(transactionState); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of TransactionState"); } { byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'transactionalId' field is too long to be serialized"); } _cache.cacheSerializedValue(transactionalId, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(8); { byte[] _stringBytes = transactionState.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'transactionState' field is too long to be serialized"); } _cache.cacheSerializedValue(transactionState, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof TransactionState)) return false; TransactionState other = (TransactionState) obj; if (this.transactionalId == null) { if (other.transactionalId != null) return false; } else { if (!this.transactionalId.equals(other.transactionalId)) return false; } if (producerId != other.producerId) return false; if (this.transactionState == null) { if (other.transactionState != null) return false; } else { if (!this.transactionState.equals(other.transactionState)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode()); hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId); hashCode = 31 * hashCode + (transactionState == null ? 0 : transactionState.hashCode()); return hashCode; } @Override public TransactionState duplicate() { TransactionState _duplicate = new TransactionState(); _duplicate.transactionalId = transactionalId; _duplicate.producerId = producerId; _duplicate.transactionState = transactionState; return _duplicate; } @Override public String toString() { return "TransactionState(" + "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'") + ", producerId=" + producerId + ", transactionState=" + ((transactionState == null) ? "null" : "'" + transactionState.toString() + "'") + ")"; } public String transactionalId() { return this.transactionalId; } public long producerId() { return this.producerId; } public String transactionState() { return this.transactionState; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public TransactionState setTransactionalId(String v) { this.transactionalId = v; return this; } public TransactionState setProducerId(long v) { this.producerId = v; return this; } public TransactionState setTransactionState(String v) { this.transactionState = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ListTransactionsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ListTransactionsResponseData.*; public class ListTransactionsResponseDataJsonConverter { public static ListTransactionsResponseData read(JsonNode _node, short _version) { ListTransactionsResponseData _object = new ListTransactionsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("ListTransactionsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "ListTransactionsResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("ListTransactionsResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "ListTransactionsResponseData"); } JsonNode _unknownStateFiltersNode = _node.get("unknownStateFilters"); if (_unknownStateFiltersNode == null) { throw new RuntimeException("ListTransactionsResponseData: unable to locate field 'unknownStateFilters', which is mandatory in version " + _version); } else { if (!_unknownStateFiltersNode.isArray()) { throw new RuntimeException("ListTransactionsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<String> _collection = new ArrayList<String>(_unknownStateFiltersNode.size()); _object.unknownStateFilters = _collection; for (JsonNode _element : _unknownStateFiltersNode) { if (!_element.isTextual()) { throw new RuntimeException("ListTransactionsResponseData element expected a string type, but got " + _node.getNodeType()); } _collection.add(_element.asText()); } } JsonNode _transactionStatesNode = _node.get("transactionStates"); if (_transactionStatesNode == null) { throw new RuntimeException("ListTransactionsResponseData: unable to locate field 'transactionStates', which is mandatory in version " + _version); } else { if (!_transactionStatesNode.isArray()) { throw new RuntimeException("ListTransactionsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<TransactionState> _collection = new ArrayList<TransactionState>(_transactionStatesNode.size()); _object.transactionStates = _collection; for (JsonNode _element : _transactionStatesNode) { _collection.add(TransactionStateJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ListTransactionsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); ArrayNode _unknownStateFiltersArray = new ArrayNode(JsonNodeFactory.instance); for (String _element : _object.unknownStateFilters) { _unknownStateFiltersArray.add(new TextNode(_element)); } _node.set("unknownStateFilters", _unknownStateFiltersArray); ArrayNode _transactionStatesArray = new ArrayNode(JsonNodeFactory.instance); for (TransactionState _element : _object.transactionStates) { _transactionStatesArray.add(TransactionStateJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("transactionStates", _transactionStatesArray); return _node; } public static JsonNode write(ListTransactionsResponseData _object, short _version) { return write(_object, _version, true); } public static class TransactionStateJsonConverter { public static TransactionState read(JsonNode _node, short _version) { TransactionState _object = new TransactionState(); JsonNode _transactionalIdNode = _node.get("transactionalId"); if (_transactionalIdNode == null) { throw new RuntimeException("TransactionState: unable to locate field 'transactionalId', which is mandatory in version " + _version); } else { if (!_transactionalIdNode.isTextual()) { throw new RuntimeException("TransactionState expected a string type, but got " + _node.getNodeType()); } _object.transactionalId = _transactionalIdNode.asText(); } JsonNode _producerIdNode = _node.get("producerId"); if (_producerIdNode == null) { throw new RuntimeException("TransactionState: unable to locate field 'producerId', which is mandatory in version " + _version); } else { _object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "TransactionState"); } JsonNode _transactionStateNode = _node.get("transactionState"); if (_transactionStateNode == null) { throw new RuntimeException("TransactionState: unable to locate field 'transactionState', which is mandatory in version " + _version); } else { if (!_transactionStateNode.isTextual()) { throw new RuntimeException("TransactionState expected a string type, but got " + _node.getNodeType()); } _object.transactionState = _transactionStateNode.asText(); } return _object; } public static JsonNode write(TransactionState _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("transactionalId", new TextNode(_object.transactionalId)); _node.set("producerId", new LongNode(_object.producerId)); _node.set("transactionState", new TextNode(_object.transactionState)); return _node; } public static JsonNode write(TransactionState _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/MetadataRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class MetadataRequestData implements ApiMessage { List<MetadataRequestTopic> topics; boolean allowAutoTopicCreation; boolean includeClusterAuthorizedOperations; boolean includeTopicAuthorizedOperations; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topics", new ArrayOf(MetadataRequestTopic.SCHEMA_0), "The topics to fetch metadata for.") ); public static final Schema SCHEMA_1 = new Schema( new Field("topics", ArrayOf.nullable(MetadataRequestTopic.SCHEMA_0), "The topics to fetch metadata for.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("topics", ArrayOf.nullable(MetadataRequestTopic.SCHEMA_0), "The topics to fetch metadata for."), new Field("allow_auto_topic_creation", Type.BOOLEAN, "If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.") ); public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("topics", ArrayOf.nullable(MetadataRequestTopic.SCHEMA_0), "The topics to fetch metadata for."), new Field("allow_auto_topic_creation", Type.BOOLEAN, "If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so."), new Field("include_cluster_authorized_operations", Type.BOOLEAN, "Whether to include cluster authorized operations."), new Field("include_topic_authorized_operations", Type.BOOLEAN, "Whether to include topic authorized operations.") ); public static final Schema SCHEMA_9 = new Schema( new Field("topics", CompactArrayOf.nullable(MetadataRequestTopic.SCHEMA_9), "The topics to fetch metadata for."), new Field("allow_auto_topic_creation", Type.BOOLEAN, "If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so."), new Field("include_cluster_authorized_operations", Type.BOOLEAN, "Whether to include cluster authorized operations."), new Field("include_topic_authorized_operations", Type.BOOLEAN, "Whether to include topic authorized operations."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_10 = new Schema( new Field("topics", CompactArrayOf.nullable(MetadataRequestTopic.SCHEMA_10), "The topics to fetch metadata for."), new Field("allow_auto_topic_creation", Type.BOOLEAN, "If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so."), new Field("include_cluster_authorized_operations", Type.BOOLEAN, "Whether to include cluster authorized operations."), new Field("include_topic_authorized_operations", Type.BOOLEAN, "Whether to include topic authorized operations."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_11 = new Schema( new Field("topics", CompactArrayOf.nullable(MetadataRequestTopic.SCHEMA_10), "The topics to fetch metadata for."), new Field("allow_auto_topic_creation", Type.BOOLEAN, "If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so."), new Field("include_topic_authorized_operations", Type.BOOLEAN, "Whether to include topic authorized operations."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_12 = SCHEMA_11; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 12; public MetadataRequestData(Readable _readable, short _version) { read(_readable, _version); } public MetadataRequestData() { this.topics = new ArrayList<MetadataRequestTopic>(0); this.allowAutoTopicCreation = true; this.includeClusterAuthorizedOperations = false; this.includeTopicAuthorizedOperations = false; } @Override public short apiKey() { return 3; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 12; } @Override public void read(Readable _readable, short _version) { { if (_version >= 9) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { this.topics = null; } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<MetadataRequestTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MetadataRequestTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { if (_version >= 1) { this.topics = null; } else { throw new RuntimeException("non-nullable field topics was serialized as null"); } } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<MetadataRequestTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MetadataRequestTopic(_readable, _version)); } this.topics = newCollection; } } } if (_version >= 4) { this.allowAutoTopicCreation = _readable.readByte() != 0; } else { this.allowAutoTopicCreation = true; } if ((_version >= 8) && (_version <= 10)) { this.includeClusterAuthorizedOperations = _readable.readByte() != 0; } else { this.includeClusterAuthorizedOperations = false; } if (_version >= 8) { this.includeTopicAuthorizedOperations = _readable.readByte() != 0; } else { this.includeTopicAuthorizedOperations = false; } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 9) { if (topics == null) { _writable.writeUnsignedVarint(0); } else { _writable.writeUnsignedVarint(topics.size() + 1); for (MetadataRequestTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } } else { if (topics == null) { if (_version >= 1) { _writable.writeInt(-1); } else { throw new NullPointerException(); } } else { _writable.writeInt(topics.size()); for (MetadataRequestTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } } if (_version >= 4) { _writable.writeByte(allowAutoTopicCreation ? (byte) 1 : (byte) 0); } else { if (!this.allowAutoTopicCreation) { throw new UnsupportedVersionException("Attempted to write a non-default allowAutoTopicCreation at version " + _version); } } if ((_version >= 8) && (_version <= 10)) { _writable.writeByte(includeClusterAuthorizedOperations ? (byte) 1 : (byte) 0); } else { if (this.includeClusterAuthorizedOperations) { throw new UnsupportedVersionException("Attempted to write a non-default includeClusterAuthorizedOperations at version " + _version); } } if (_version >= 8) { _writable.writeByte(includeTopicAuthorizedOperations ? (byte) 1 : (byte) 0); } else { if (this.includeTopicAuthorizedOperations) { throw new UnsupportedVersionException("Attempted to write a non-default includeTopicAuthorizedOperations at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (topics == null) { if (_version >= 9) { _size.addBytes(1); } else { _size.addBytes(4); } } else { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (MetadataRequestTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_version >= 4) { _size.addBytes(1); } if ((_version >= 8) && (_version <= 10)) { _size.addBytes(1); } if (_version >= 8) { _size.addBytes(1); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof MetadataRequestData)) return false; MetadataRequestData other = (MetadataRequestData) obj; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } if (allowAutoTopicCreation != other.allowAutoTopicCreation) return false; if (includeClusterAuthorizedOperations != other.includeClusterAuthorizedOperations) return false; if (includeTopicAuthorizedOperations != other.includeTopicAuthorizedOperations) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); hashCode = 31 * hashCode + (allowAutoTopicCreation ? 1231 : 1237); hashCode = 31 * hashCode + (includeClusterAuthorizedOperations ? 1231 : 1237); hashCode = 31 * hashCode + (includeTopicAuthorizedOperations ? 1231 : 1237); return hashCode; } @Override public MetadataRequestData duplicate() { MetadataRequestData _duplicate = new MetadataRequestData(); if (topics == null) { _duplicate.topics = null; } else { ArrayList<MetadataRequestTopic> newTopics = new ArrayList<MetadataRequestTopic>(topics.size()); for (MetadataRequestTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; } _duplicate.allowAutoTopicCreation = allowAutoTopicCreation; _duplicate.includeClusterAuthorizedOperations = includeClusterAuthorizedOperations; _duplicate.includeTopicAuthorizedOperations = includeTopicAuthorizedOperations; return _duplicate; } @Override public String toString() { return "MetadataRequestData(" + "topics=" + ((topics == null) ? "null" : MessageUtil.deepToString(topics.iterator())) + ", allowAutoTopicCreation=" + (allowAutoTopicCreation ? "true" : "false") + ", includeClusterAuthorizedOperations=" + (includeClusterAuthorizedOperations ? "true" : "false") + ", includeTopicAuthorizedOperations=" + (includeTopicAuthorizedOperations ? "true" : "false") + ")"; } public List<MetadataRequestTopic> topics() { return this.topics; } public boolean allowAutoTopicCreation() { return this.allowAutoTopicCreation; } public boolean includeClusterAuthorizedOperations() { return this.includeClusterAuthorizedOperations; } public boolean includeTopicAuthorizedOperations() { return this.includeTopicAuthorizedOperations; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public MetadataRequestData setTopics(List<MetadataRequestTopic> v) { this.topics = v; return this; } public MetadataRequestData setAllowAutoTopicCreation(boolean v) { this.allowAutoTopicCreation = v; return this; } public MetadataRequestData setIncludeClusterAuthorizedOperations(boolean v) { this.includeClusterAuthorizedOperations = v; return this; } public MetadataRequestData setIncludeTopicAuthorizedOperations(boolean v) { this.includeTopicAuthorizedOperations = v; return this; } public static class MetadataRequestTopic implements Message { Uuid topicId; String name; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_10 = new Schema( new Field("topic_id", Type.UUID, "The topic id."), new Field("name", Type.COMPACT_NULLABLE_STRING, "The topic name."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_11 = SCHEMA_10; public static final Schema SCHEMA_12 = SCHEMA_11; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 12; public MetadataRequestTopic(Readable _readable, short _version) { read(_readable, _version); } public MetadataRequestTopic() { this.topicId = Uuid.ZERO_UUID; this.name = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 12; } @Override public void read(Readable _readable, short _version) { if (_version > 12) { throw new UnsupportedVersionException("Can't read version " + _version + " of MetadataRequestTopic"); } if (_version >= 10) { this.topicId = _readable.readUuid(); } else { this.topicId = Uuid.ZERO_UUID; } { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { if (_version >= 10) { this.name = null; } else { throw new RuntimeException("non-nullable field name was serialized as null"); } } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 10) { _writable.writeUuid(topicId); } if (name == null) { if (_version >= 10) { _writable.writeUnsignedVarint(0); } else { throw new NullPointerException(); } } else { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 12) { throw new UnsupportedVersionException("Can't size version " + _version + " of MetadataRequestTopic"); } if (_version >= 10) { _size.addBytes(16); } if (name == null) { if (_version >= 9) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof MetadataRequestTopic)) return false; MetadataRequestTopic other = (MetadataRequestTopic) obj; if (!this.topicId.equals(other.topicId)) return false; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + topicId.hashCode(); hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public MetadataRequestTopic duplicate() { MetadataRequestTopic _duplicate = new MetadataRequestTopic(); _duplicate.topicId = topicId; if (name == null) { _duplicate.name = null; } else { _duplicate.name = name; } return _duplicate; } @Override public String toString() { return "MetadataRequestTopic(" + "topicId=" + topicId.toString() + ", name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ")"; } public Uuid topicId() { return this.topicId; } public String name() { return this.name; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public MetadataRequestTopic setTopicId(Uuid v) { this.topicId = v; return this; } public MetadataRequestTopic setName(String v) { this.name = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/MetadataRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import static org.apache.kafka.common.message.MetadataRequestData.*; public class MetadataRequestDataJsonConverter { public static MetadataRequestData read(JsonNode _node, short _version) { MetadataRequestData _object = new MetadataRequestData(); JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("MetadataRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (_topicsNode.isNull()) { _object.topics = null; } else { if (!_topicsNode.isArray()) { throw new RuntimeException("MetadataRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<MetadataRequestTopic> _collection = new ArrayList<MetadataRequestTopic>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(MetadataRequestTopicJsonConverter.read(_element, _version)); } } } JsonNode _allowAutoTopicCreationNode = _node.get("allowAutoTopicCreation"); if (_allowAutoTopicCreationNode == null) { if (_version >= 4) { throw new RuntimeException("MetadataRequestData: unable to locate field 'allowAutoTopicCreation', which is mandatory in version " + _version); } else { _object.allowAutoTopicCreation = true; } } else { if (!_allowAutoTopicCreationNode.isBoolean()) { throw new RuntimeException("MetadataRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.allowAutoTopicCreation = _allowAutoTopicCreationNode.asBoolean(); } JsonNode _includeClusterAuthorizedOperationsNode = _node.get("includeClusterAuthorizedOperations"); if (_includeClusterAuthorizedOperationsNode == null) { if ((_version >= 8) && (_version <= 10)) { throw new RuntimeException("MetadataRequestData: unable to locate field 'includeClusterAuthorizedOperations', which is mandatory in version " + _version); } else { _object.includeClusterAuthorizedOperations = false; } } else { if (!_includeClusterAuthorizedOperationsNode.isBoolean()) { throw new RuntimeException("MetadataRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.includeClusterAuthorizedOperations = _includeClusterAuthorizedOperationsNode.asBoolean(); } JsonNode _includeTopicAuthorizedOperationsNode = _node.get("includeTopicAuthorizedOperations"); if (_includeTopicAuthorizedOperationsNode == null) { if (_version >= 8) { throw new RuntimeException("MetadataRequestData: unable to locate field 'includeTopicAuthorizedOperations', which is mandatory in version " + _version); } else { _object.includeTopicAuthorizedOperations = false; } } else { if (!_includeTopicAuthorizedOperationsNode.isBoolean()) { throw new RuntimeException("MetadataRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.includeTopicAuthorizedOperations = _includeTopicAuthorizedOperationsNode.asBoolean(); } return _object; } public static JsonNode write(MetadataRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_object.topics == null) { _node.set("topics", NullNode.instance); } else { ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (MetadataRequestTopic _element : _object.topics) { _topicsArray.add(MetadataRequestTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); } if (_version >= 4) { _node.set("allowAutoTopicCreation", BooleanNode.valueOf(_object.allowAutoTopicCreation)); } else { if (!_object.allowAutoTopicCreation) { throw new UnsupportedVersionException("Attempted to write a non-default allowAutoTopicCreation at version " + _version); } } if ((_version >= 8) && (_version <= 10)) { _node.set("includeClusterAuthorizedOperations", BooleanNode.valueOf(_object.includeClusterAuthorizedOperations)); } else { if (_object.includeClusterAuthorizedOperations) { throw new UnsupportedVersionException("Attempted to write a non-default includeClusterAuthorizedOperations at version " + _version); } } if (_version >= 8) { _node.set("includeTopicAuthorizedOperations", BooleanNode.valueOf(_object.includeTopicAuthorizedOperations)); } else { if (_object.includeTopicAuthorizedOperations) { throw new UnsupportedVersionException("Attempted to write a non-default includeTopicAuthorizedOperations at version " + _version); } } return _node; } public static JsonNode write(MetadataRequestData _object, short _version) { return write(_object, _version, true); } public static class MetadataRequestTopicJsonConverter { public static MetadataRequestTopic read(JsonNode _node, short _version) { MetadataRequestTopic _object = new MetadataRequestTopic(); JsonNode _topicIdNode = _node.get("topicId"); if (_topicIdNode == null) { if (_version >= 10) { throw new RuntimeException("MetadataRequestTopic: unable to locate field 'topicId', which is mandatory in version " + _version); } else { _object.topicId = Uuid.ZERO_UUID; } } else { if (!_topicIdNode.isTextual()) { throw new RuntimeException("MetadataRequestTopic expected a JSON string type, but got " + _node.getNodeType()); } _object.topicId = Uuid.fromString(_topicIdNode.asText()); } JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("MetadataRequestTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (_nameNode.isNull()) { _object.name = null; } else { if (!_nameNode.isTextual()) { throw new RuntimeException("MetadataRequestTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } } return _object; } public static JsonNode write(MetadataRequestTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 10) { _node.set("topicId", new TextNode(_object.topicId.toString())); } if (_object.name == null) { _node.set("name", NullNode.instance); } else { _node.set("name", new TextNode(_object.name)); } return _node; } public static JsonNode write(MetadataRequestTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/MetadataResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class MetadataResponseData implements ApiMessage { int throttleTimeMs; MetadataResponseBrokerCollection brokers; String clusterId; int controllerId; MetadataResponseTopicCollection topics; int clusterAuthorizedOperations; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("brokers", new ArrayOf(MetadataResponseBroker.SCHEMA_0), "Each broker in the response."), new Field("topics", new ArrayOf(MetadataResponseTopic.SCHEMA_0), "Each topic in the response.") ); public static final Schema SCHEMA_1 = new Schema( new Field("brokers", new ArrayOf(MetadataResponseBroker.SCHEMA_1), "Each broker in the response."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new ArrayOf(MetadataResponseTopic.SCHEMA_1), "Each topic in the response.") ); public static final Schema SCHEMA_2 = new Schema( new Field("brokers", new ArrayOf(MetadataResponseBroker.SCHEMA_1), "Each broker in the response."), new Field("cluster_id", Type.NULLABLE_STRING, "The cluster ID that responding broker belongs to."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new ArrayOf(MetadataResponseTopic.SCHEMA_1), "Each topic in the response.") ); public static final Schema SCHEMA_3 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("brokers", new ArrayOf(MetadataResponseBroker.SCHEMA_1), "Each broker in the response."), new Field("cluster_id", Type.NULLABLE_STRING, "The cluster ID that responding broker belongs to."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new ArrayOf(MetadataResponseTopic.SCHEMA_1), "Each topic in the response.") ); public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("brokers", new ArrayOf(MetadataResponseBroker.SCHEMA_1), "Each broker in the response."), new Field("cluster_id", Type.NULLABLE_STRING, "The cluster ID that responding broker belongs to."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new ArrayOf(MetadataResponseTopic.SCHEMA_5), "Each topic in the response.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("brokers", new ArrayOf(MetadataResponseBroker.SCHEMA_1), "Each broker in the response."), new Field("cluster_id", Type.NULLABLE_STRING, "The cluster ID that responding broker belongs to."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new ArrayOf(MetadataResponseTopic.SCHEMA_7), "Each topic in the response.") ); public static final Schema SCHEMA_8 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("brokers", new ArrayOf(MetadataResponseBroker.SCHEMA_1), "Each broker in the response."), new Field("cluster_id", Type.NULLABLE_STRING, "The cluster ID that responding broker belongs to."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new ArrayOf(MetadataResponseTopic.SCHEMA_8), "Each topic in the response."), new Field("cluster_authorized_operations", Type.INT32, "32-bit bitfield to represent authorized operations for this cluster.") ); public static final Schema SCHEMA_9 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("brokers", new CompactArrayOf(MetadataResponseBroker.SCHEMA_9), "Each broker in the response."), new Field("cluster_id", Type.COMPACT_NULLABLE_STRING, "The cluster ID that responding broker belongs to."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new CompactArrayOf(MetadataResponseTopic.SCHEMA_9), "Each topic in the response."), new Field("cluster_authorized_operations", Type.INT32, "32-bit bitfield to represent authorized operations for this cluster."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_10 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("brokers", new CompactArrayOf(MetadataResponseBroker.SCHEMA_9), "Each broker in the response."), new Field("cluster_id", Type.COMPACT_NULLABLE_STRING, "The cluster ID that responding broker belongs to."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new CompactArrayOf(MetadataResponseTopic.SCHEMA_10), "Each topic in the response."), new Field("cluster_authorized_operations", Type.INT32, "32-bit bitfield to represent authorized operations for this cluster."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_11 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("brokers", new CompactArrayOf(MetadataResponseBroker.SCHEMA_9), "Each broker in the response."), new Field("cluster_id", Type.COMPACT_NULLABLE_STRING, "The cluster ID that responding broker belongs to."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new CompactArrayOf(MetadataResponseTopic.SCHEMA_10), "Each topic in the response."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_12 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("brokers", new CompactArrayOf(MetadataResponseBroker.SCHEMA_9), "Each broker in the response."), new Field("cluster_id", Type.COMPACT_NULLABLE_STRING, "The cluster ID that responding broker belongs to."), new Field("controller_id", Type.INT32, "The ID of the controller broker."), new Field("topics", new CompactArrayOf(MetadataResponseTopic.SCHEMA_12), "Each topic in the response."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 12; public MetadataResponseData(Readable _readable, short _version) { read(_readable, _version); } public MetadataResponseData() { this.throttleTimeMs = 0; this.brokers = new MetadataResponseBrokerCollection(0); this.clusterId = null; this.controllerId = -1; this.topics = new MetadataResponseTopicCollection(0); this.clusterAuthorizedOperations = -2147483648; } @Override public short apiKey() { return 3; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 12; } @Override public void read(Readable _readable, short _version) { if (_version >= 3) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } { if (_version >= 9) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field brokers was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } MetadataResponseBrokerCollection newCollection = new MetadataResponseBrokerCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MetadataResponseBroker(_readable, _version)); } this.brokers = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field brokers was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } MetadataResponseBrokerCollection newCollection = new MetadataResponseBrokerCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MetadataResponseBroker(_readable, _version)); } this.brokers = newCollection; } } } if (_version >= 2) { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.clusterId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field clusterId had invalid length " + length); } else { this.clusterId = _readable.readString(length); } } else { this.clusterId = null; } if (_version >= 1) { this.controllerId = _readable.readInt(); } else { this.controllerId = -1; } { if (_version >= 9) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } MetadataResponseTopicCollection newCollection = new MetadataResponseTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MetadataResponseTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } MetadataResponseTopicCollection newCollection = new MetadataResponseTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MetadataResponseTopic(_readable, _version)); } this.topics = newCollection; } } } if ((_version >= 8) && (_version <= 10)) { this.clusterAuthorizedOperations = _readable.readInt(); } else { this.clusterAuthorizedOperations = -2147483648; } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { _writable.writeInt(throttleTimeMs); } if (_version >= 9) { _writable.writeUnsignedVarint(brokers.size() + 1); for (MetadataResponseBroker brokersElement : brokers) { brokersElement.write(_writable, _cache, _version); } } else { _writable.writeInt(brokers.size()); for (MetadataResponseBroker brokersElement : brokers) { brokersElement.write(_writable, _cache, _version); } } if (_version >= 2) { if (clusterId == null) { if (_version >= 9) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(clusterId); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } if (_version >= 1) { _writable.writeInt(controllerId); } if (_version >= 9) { _writable.writeUnsignedVarint(topics.size() + 1); for (MetadataResponseTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (MetadataResponseTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } if ((_version >= 8) && (_version <= 10)) { _writable.writeInt(clusterAuthorizedOperations); } else { if (this.clusterAuthorizedOperations != -2147483648) { throw new UnsupportedVersionException("Attempted to write a non-default clusterAuthorizedOperations at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { _size.addBytes(4); } { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(brokers.size() + 1)); } else { _size.addBytes(4); } for (MetadataResponseBroker brokersElement : brokers) { brokersElement.addSize(_size, _cache, _version); } } if (_version >= 2) { if (clusterId == null) { if (_version >= 9) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = clusterId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'clusterId' field is too long to be serialized"); } _cache.cacheSerializedValue(clusterId, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version >= 1) { _size.addBytes(4); } { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (MetadataResponseTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if ((_version >= 8) && (_version <= 10)) { _size.addBytes(4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof MetadataResponseData)) return false; MetadataResponseData other = (MetadataResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.brokers == null) { if (other.brokers != null) return false; } else { if (!this.brokers.equals(other.brokers)) return false; } if (this.clusterId == null) { if (other.clusterId != null) return false; } else { if (!this.clusterId.equals(other.clusterId)) return false; } if (controllerId != other.controllerId) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } if (clusterAuthorizedOperations != other.clusterAuthorizedOperations) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (brokers == null ? 0 : brokers.hashCode()); hashCode = 31 * hashCode + (clusterId == null ? 0 : clusterId.hashCode()); hashCode = 31 * hashCode + controllerId; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); hashCode = 31 * hashCode + clusterAuthorizedOperations; return hashCode; } @Override public MetadataResponseData duplicate() { MetadataResponseData _duplicate = new MetadataResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; MetadataResponseBrokerCollection newBrokers = new MetadataResponseBrokerCollection(brokers.size()); for (MetadataResponseBroker _element : brokers) { newBrokers.add(_element.duplicate()); } _duplicate.brokers = newBrokers; if (clusterId == null) { _duplicate.clusterId = null; } else { _duplicate.clusterId = clusterId; } _duplicate.controllerId = controllerId; MetadataResponseTopicCollection newTopics = new MetadataResponseTopicCollection(topics.size()); for (MetadataResponseTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; _duplicate.clusterAuthorizedOperations = clusterAuthorizedOperations; return _duplicate; } @Override public String toString() { return "MetadataResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", brokers=" + MessageUtil.deepToString(brokers.iterator()) + ", clusterId=" + ((clusterId == null) ? "null" : "'" + clusterId.toString() + "'") + ", controllerId=" + controllerId + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ", clusterAuthorizedOperations=" + clusterAuthorizedOperations + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public MetadataResponseBrokerCollection brokers() { return this.brokers; } public String clusterId() { return this.clusterId; } public int controllerId() { return this.controllerId; } public MetadataResponseTopicCollection topics() { return this.topics; } public int clusterAuthorizedOperations() { return this.clusterAuthorizedOperations; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public MetadataResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public MetadataResponseData setBrokers(MetadataResponseBrokerCollection v) { this.brokers = v; return this; } public MetadataResponseData setClusterId(String v) { this.clusterId = v; return this; } public MetadataResponseData setControllerId(int v) { this.controllerId = v; return this; } public MetadataResponseData setTopics(MetadataResponseTopicCollection v) { this.topics = v; return this; } public MetadataResponseData setClusterAuthorizedOperations(int v) { this.clusterAuthorizedOperations = v; return this; } public static class MetadataResponseBroker implements Message, ImplicitLinkedHashMultiCollection.Element { int nodeId; String host; int port; String rack; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("node_id", Type.INT32, "The broker ID."), new Field("host", Type.STRING, "The broker hostname."), new Field("port", Type.INT32, "The broker port.") ); public static final Schema SCHEMA_1 = new Schema( new Field("node_id", Type.INT32, "The broker ID."), new Field("host", Type.STRING, "The broker hostname."), new Field("port", Type.INT32, "The broker port."), new Field("rack", Type.NULLABLE_STRING, "The rack of the broker, or null if it has not been assigned to a rack.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("node_id", Type.INT32, "The broker ID."), new Field("host", Type.COMPACT_STRING, "The broker hostname."), new Field("port", Type.INT32, "The broker port."), new Field("rack", Type.COMPACT_NULLABLE_STRING, "The rack of the broker, or null if it has not been assigned to a rack."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = SCHEMA_10; public static final Schema SCHEMA_12 = SCHEMA_11; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 12; public MetadataResponseBroker(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public MetadataResponseBroker() { this.nodeId = 0; this.host = ""; this.port = 0; this.rack = null; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 12; } @Override public void read(Readable _readable, short _version) { if (_version > 12) { throw new UnsupportedVersionException("Can't read version " + _version + " of MetadataResponseBroker"); } this.nodeId = _readable.readInt(); { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field host was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field host had invalid length " + length); } else { this.host = _readable.readString(length); } } this.port = _readable.readInt(); if (_version >= 1) { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.rack = null; } else if (length > 0x7fff) { throw new RuntimeException("string field rack had invalid length " + length); } else { this.rack = _readable.readString(length); } } else { this.rack = null; } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(nodeId); { byte[] _stringBytes = _cache.getSerializedValue(host); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeInt(port); if (_version >= 1) { if (rack == null) { if (_version >= 9) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(rack); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 12) { throw new UnsupportedVersionException("Can't size version " + _version + " of MetadataResponseBroker"); } _size.addBytes(4); { byte[] _stringBytes = host.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'host' field is too long to be serialized"); } _cache.cacheSerializedValue(host, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(4); if (_version >= 1) { if (rack == null) { if (_version >= 9) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = rack.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'rack' field is too long to be serialized"); } _cache.cacheSerializedValue(rack, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof MetadataResponseBroker)) return false; MetadataResponseBroker other = (MetadataResponseBroker) obj; if (nodeId != other.nodeId) return false; return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof MetadataResponseBroker)) return false; MetadataResponseBroker other = (MetadataResponseBroker) obj; if (nodeId != other.nodeId) return false; if (this.host == null) { if (other.host != null) return false; } else { if (!this.host.equals(other.host)) return false; } if (port != other.port) return false; if (this.rack == null) { if (other.rack != null) return false; } else { if (!this.rack.equals(other.rack)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + nodeId; return hashCode; } @Override public MetadataResponseBroker duplicate() { MetadataResponseBroker _duplicate = new MetadataResponseBroker(); _duplicate.nodeId = nodeId; _duplicate.host = host; _duplicate.port = port; if (rack == null) { _duplicate.rack = null; } else { _duplicate.rack = rack; } return _duplicate; } @Override public String toString() { return "MetadataResponseBroker(" + "nodeId=" + nodeId + ", host=" + ((host == null) ? "null" : "'" + host.toString() + "'") + ", port=" + port + ", rack=" + ((rack == null) ? "null" : "'" + rack.toString() + "'") + ")"; } public int nodeId() { return this.nodeId; } public String host() { return this.host; } public int port() { return this.port; } public String rack() { return this.rack; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public MetadataResponseBroker setNodeId(int v) { this.nodeId = v; return this; } public MetadataResponseBroker setHost(String v) { this.host = v; return this; } public MetadataResponseBroker setPort(int v) { this.port = v; return this; } public MetadataResponseBroker setRack(String v) { this.rack = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class MetadataResponseBrokerCollection extends ImplicitLinkedHashMultiCollection<MetadataResponseBroker> { public MetadataResponseBrokerCollection() { super(); } public MetadataResponseBrokerCollection(int expectedNumElements) { super(expectedNumElements); } public MetadataResponseBrokerCollection(Iterator<MetadataResponseBroker> iterator) { super(iterator); } public MetadataResponseBroker find(int nodeId) { MetadataResponseBroker _key = new MetadataResponseBroker(); _key.setNodeId(nodeId); return find(_key); } public List<MetadataResponseBroker> findAll(int nodeId) { MetadataResponseBroker _key = new MetadataResponseBroker(); _key.setNodeId(nodeId); return findAll(_key); } public MetadataResponseBrokerCollection duplicate() { MetadataResponseBrokerCollection _duplicate = new MetadataResponseBrokerCollection(size()); for (MetadataResponseBroker _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class MetadataResponseTopic implements Message, ImplicitLinkedHashMultiCollection.Element { short errorCode; String name; Uuid topicId; boolean isInternal; List<MetadataResponsePartition> partitions; int topicAuthorizedOperations; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The topic error, or 0 if there was no error."), new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(MetadataResponsePartition.SCHEMA_0), "Each partition in the topic.") ); public static final Schema SCHEMA_1 = new Schema( new Field("error_code", Type.INT16, "The topic error, or 0 if there was no error."), new Field("name", Type.STRING, "The topic name."), new Field("is_internal", Type.BOOLEAN, "True if the topic is internal."), new Field("partitions", new ArrayOf(MetadataResponsePartition.SCHEMA_0), "Each partition in the topic.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("error_code", Type.INT16, "The topic error, or 0 if there was no error."), new Field("name", Type.STRING, "The topic name."), new Field("is_internal", Type.BOOLEAN, "True if the topic is internal."), new Field("partitions", new ArrayOf(MetadataResponsePartition.SCHEMA_5), "Each partition in the topic.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = new Schema( new Field("error_code", Type.INT16, "The topic error, or 0 if there was no error."), new Field("name", Type.STRING, "The topic name."), new Field("is_internal", Type.BOOLEAN, "True if the topic is internal."), new Field("partitions", new ArrayOf(MetadataResponsePartition.SCHEMA_7), "Each partition in the topic.") ); public static final Schema SCHEMA_8 = new Schema( new Field("error_code", Type.INT16, "The topic error, or 0 if there was no error."), new Field("name", Type.STRING, "The topic name."), new Field("is_internal", Type.BOOLEAN, "True if the topic is internal."), new Field("partitions", new ArrayOf(MetadataResponsePartition.SCHEMA_7), "Each partition in the topic."), new Field("topic_authorized_operations", Type.INT32, "32-bit bitfield to represent authorized operations for this topic.") ); public static final Schema SCHEMA_9 = new Schema( new Field("error_code", Type.INT16, "The topic error, or 0 if there was no error."), new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("is_internal", Type.BOOLEAN, "True if the topic is internal."), new Field("partitions", new CompactArrayOf(MetadataResponsePartition.SCHEMA_9), "Each partition in the topic."), new Field("topic_authorized_operations", Type.INT32, "32-bit bitfield to represent authorized operations for this topic."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_10 = new Schema( new Field("error_code", Type.INT16, "The topic error, or 0 if there was no error."), new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("topic_id", Type.UUID, "The topic id."), new Field("is_internal", Type.BOOLEAN, "True if the topic is internal."), new Field("partitions", new CompactArrayOf(MetadataResponsePartition.SCHEMA_9), "Each partition in the topic."), new Field("topic_authorized_operations", Type.INT32, "32-bit bitfield to represent authorized operations for this topic."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_11 = SCHEMA_10; public static final Schema SCHEMA_12 = new Schema( new Field("error_code", Type.INT16, "The topic error, or 0 if there was no error."), new Field("name", Type.COMPACT_NULLABLE_STRING, "The topic name."), new Field("topic_id", Type.UUID, "The topic id."), new Field("is_internal", Type.BOOLEAN, "True if the topic is internal."), new Field("partitions", new CompactArrayOf(MetadataResponsePartition.SCHEMA_9), "Each partition in the topic."), new Field("topic_authorized_operations", Type.INT32, "32-bit bitfield to represent authorized operations for this topic."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 12; public MetadataResponseTopic(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public MetadataResponseTopic() { this.errorCode = (short) 0; this.name = ""; this.topicId = Uuid.ZERO_UUID; this.isInternal = false; this.partitions = new ArrayList<MetadataResponsePartition>(0); this.topicAuthorizedOperations = -2147483648; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 12; } @Override public void read(Readable _readable, short _version) { if (_version > 12) { throw new UnsupportedVersionException("Can't read version " + _version + " of MetadataResponseTopic"); } this.errorCode = _readable.readShort(); { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { if (_version >= 12) { this.name = null; } else { throw new RuntimeException("non-nullable field name was serialized as null"); } } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } if (_version >= 10) { this.topicId = _readable.readUuid(); } else { this.topicId = Uuid.ZERO_UUID; } if (_version >= 1) { this.isInternal = _readable.readByte() != 0; } else { this.isInternal = false; } { if (_version >= 9) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<MetadataResponsePartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MetadataResponsePartition(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<MetadataResponsePartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new MetadataResponsePartition(_readable, _version)); } this.partitions = newCollection; } } } if (_version >= 8) { this.topicAuthorizedOperations = _readable.readInt(); } else { this.topicAuthorizedOperations = -2147483648; } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); if (name == null) { if (_version >= 12) { _writable.writeUnsignedVarint(0); } else { throw new NullPointerException(); } } else { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 10) { _writable.writeUuid(topicId); } if (_version >= 1) { _writable.writeByte(isInternal ? (byte) 1 : (byte) 0); } if (_version >= 9) { _writable.writeUnsignedVarint(partitions.size() + 1); for (MetadataResponsePartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (MetadataResponsePartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } if (_version >= 8) { _writable.writeInt(topicAuthorizedOperations); } else { if (this.topicAuthorizedOperations != -2147483648) { throw new UnsupportedVersionException("Attempted to write a non-default topicAuthorizedOperations at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 12) { throw new UnsupportedVersionException("Can't size version " + _version + " of MetadataResponseTopic"); } _size.addBytes(2); if (name == null) { if (_version >= 9) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version >= 10) { _size.addBytes(16); } if (_version >= 1) { _size.addBytes(1); } { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (MetadataResponsePartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_version >= 8) { _size.addBytes(4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof MetadataResponseTopic)) return false; MetadataResponseTopic other = (MetadataResponseTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof MetadataResponseTopic)) return false; MetadataResponseTopic other = (MetadataResponseTopic) obj; if (errorCode != other.errorCode) return false; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (!this.topicId.equals(other.topicId)) return false; if (isInternal != other.isInternal) return false; if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } if (topicAuthorizedOperations != other.topicAuthorizedOperations) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public MetadataResponseTopic duplicate() { MetadataResponseTopic _duplicate = new MetadataResponseTopic(); _duplicate.errorCode = errorCode; if (name == null) { _duplicate.name = null; } else { _duplicate.name = name; } _duplicate.topicId = topicId; _duplicate.isInternal = isInternal; ArrayList<MetadataResponsePartition> newPartitions = new ArrayList<MetadataResponsePartition>(partitions.size()); for (MetadataResponsePartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; _duplicate.topicAuthorizedOperations = topicAuthorizedOperations; return _duplicate; } @Override public String toString() { return "MetadataResponseTopic(" + "errorCode=" + errorCode + ", name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", topicId=" + topicId.toString() + ", isInternal=" + (isInternal ? "true" : "false") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ", topicAuthorizedOperations=" + topicAuthorizedOperations + ")"; } public short errorCode() { return this.errorCode; } public String name() { return this.name; } public Uuid topicId() { return this.topicId; } public boolean isInternal() { return this.isInternal; } public List<MetadataResponsePartition> partitions() { return this.partitions; } public int topicAuthorizedOperations() { return this.topicAuthorizedOperations; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public MetadataResponseTopic setErrorCode(short v) { this.errorCode = v; return this; } public MetadataResponseTopic setName(String v) { this.name = v; return this; } public MetadataResponseTopic setTopicId(Uuid v) { this.topicId = v; return this; } public MetadataResponseTopic setIsInternal(boolean v) { this.isInternal = v; return this; } public MetadataResponseTopic setPartitions(List<MetadataResponsePartition> v) { this.partitions = v; return this; } public MetadataResponseTopic setTopicAuthorizedOperations(int v) { this.topicAuthorizedOperations = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class MetadataResponsePartition implements Message { short errorCode; int partitionIndex; int leaderId; int leaderEpoch; List<Integer> replicaNodes; List<Integer> isrNodes; List<Integer> offlineReplicas; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The partition error, or 0 if there was no error."), new Field("partition_index", Type.INT32, "The partition index."), new Field("leader_id", Type.INT32, "The ID of the leader broker."), new Field("replica_nodes", new ArrayOf(Type.INT32), "The set of all nodes that host this partition."), new Field("isr_nodes", new ArrayOf(Type.INT32), "The set of nodes that are in sync with the leader for this partition.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("error_code", Type.INT16, "The partition error, or 0 if there was no error."), new Field("partition_index", Type.INT32, "The partition index."), new Field("leader_id", Type.INT32, "The ID of the leader broker."), new Field("replica_nodes", new ArrayOf(Type.INT32), "The set of all nodes that host this partition."), new Field("isr_nodes", new ArrayOf(Type.INT32), "The set of nodes that are in sync with the leader for this partition."), new Field("offline_replicas", new ArrayOf(Type.INT32), "The set of offline replicas of this partition.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = new Schema( new Field("error_code", Type.INT16, "The partition error, or 0 if there was no error."), new Field("partition_index", Type.INT32, "The partition index."), new Field("leader_id", Type.INT32, "The ID of the leader broker."), new Field("leader_epoch", Type.INT32, "The leader epoch of this partition."), new Field("replica_nodes", new ArrayOf(Type.INT32), "The set of all nodes that host this partition."), new Field("isr_nodes", new ArrayOf(Type.INT32), "The set of nodes that are in sync with the leader for this partition."), new Field("offline_replicas", new ArrayOf(Type.INT32), "The set of offline replicas of this partition.") ); public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("error_code", Type.INT16, "The partition error, or 0 if there was no error."), new Field("partition_index", Type.INT32, "The partition index."), new Field("leader_id", Type.INT32, "The ID of the leader broker."), new Field("leader_epoch", Type.INT32, "The leader epoch of this partition."), new Field("replica_nodes", new CompactArrayOf(Type.INT32), "The set of all nodes that host this partition."), new Field("isr_nodes", new CompactArrayOf(Type.INT32), "The set of nodes that are in sync with the leader for this partition."), new Field("offline_replicas", new CompactArrayOf(Type.INT32), "The set of offline replicas of this partition."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_10 = SCHEMA_9; public static final Schema SCHEMA_11 = SCHEMA_10; public static final Schema SCHEMA_12 = SCHEMA_11; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9, SCHEMA_10, SCHEMA_11, SCHEMA_12 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 12; public MetadataResponsePartition(Readable _readable, short _version) { read(_readable, _version); } public MetadataResponsePartition() { this.errorCode = (short) 0; this.partitionIndex = 0; this.leaderId = 0; this.leaderEpoch = -1; this.replicaNodes = new ArrayList<Integer>(0); this.isrNodes = new ArrayList<Integer>(0); this.offlineReplicas = new ArrayList<Integer>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 12; } @Override public void read(Readable _readable, short _version) { if (_version > 12) { throw new UnsupportedVersionException("Can't read version " + _version + " of MetadataResponsePartition"); } this.errorCode = _readable.readShort(); this.partitionIndex = _readable.readInt(); this.leaderId = _readable.readInt(); if (_version >= 7) { this.leaderEpoch = _readable.readInt(); } else { this.leaderEpoch = -1; } { int arrayLength; if (_version >= 9) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field replicaNodes was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.replicaNodes = newCollection; } } { int arrayLength; if (_version >= 9) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field isrNodes was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.isrNodes = newCollection; } } if (_version >= 5) { int arrayLength; if (_version >= 9) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field offlineReplicas was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.offlineReplicas = newCollection; } } else { this.offlineReplicas = new ArrayList<Integer>(0); } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); _writable.writeInt(partitionIndex); _writable.writeInt(leaderId); if (_version >= 7) { _writable.writeInt(leaderEpoch); } if (_version >= 9) { _writable.writeUnsignedVarint(replicaNodes.size() + 1); } else { _writable.writeInt(replicaNodes.size()); } for (Integer replicaNodesElement : replicaNodes) { _writable.writeInt(replicaNodesElement); } if (_version >= 9) { _writable.writeUnsignedVarint(isrNodes.size() + 1); } else { _writable.writeInt(isrNodes.size()); } for (Integer isrNodesElement : isrNodes) { _writable.writeInt(isrNodesElement); } if (_version >= 5) { if (_version >= 9) { _writable.writeUnsignedVarint(offlineReplicas.size() + 1); } else { _writable.writeInt(offlineReplicas.size()); } for (Integer offlineReplicasElement : offlineReplicas) { _writable.writeInt(offlineReplicasElement); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 12) { throw new UnsupportedVersionException("Can't size version " + _version + " of MetadataResponsePartition"); } _size.addBytes(2); _size.addBytes(4); _size.addBytes(4); if (_version >= 7) { _size.addBytes(4); } { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(replicaNodes.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(replicaNodes.size() * 4); } { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(isrNodes.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(isrNodes.size() * 4); } if (_version >= 5) { { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(offlineReplicas.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(offlineReplicas.size() * 4); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof MetadataResponsePartition)) return false; MetadataResponsePartition other = (MetadataResponsePartition) obj; if (errorCode != other.errorCode) return false; if (partitionIndex != other.partitionIndex) return false; if (leaderId != other.leaderId) return false; if (leaderEpoch != other.leaderEpoch) return false; if (this.replicaNodes == null) { if (other.replicaNodes != null) return false; } else { if (!this.replicaNodes.equals(other.replicaNodes)) return false; } if (this.isrNodes == null) { if (other.isrNodes != null) return false; } else { if (!this.isrNodes.equals(other.isrNodes)) return false; } if (this.offlineReplicas == null) { if (other.offlineReplicas != null) return false; } else { if (!this.offlineReplicas.equals(other.offlineReplicas)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + leaderId; hashCode = 31 * hashCode + leaderEpoch; hashCode = 31 * hashCode + (replicaNodes == null ? 0 : replicaNodes.hashCode()); hashCode = 31 * hashCode + (isrNodes == null ? 0 : isrNodes.hashCode()); hashCode = 31 * hashCode + (offlineReplicas == null ? 0 : offlineReplicas.hashCode()); return hashCode; } @Override public MetadataResponsePartition duplicate() { MetadataResponsePartition _duplicate = new MetadataResponsePartition(); _duplicate.errorCode = errorCode; _duplicate.partitionIndex = partitionIndex; _duplicate.leaderId = leaderId; _duplicate.leaderEpoch = leaderEpoch; ArrayList<Integer> newReplicaNodes = new ArrayList<Integer>(replicaNodes.size()); for (Integer _element : replicaNodes) { newReplicaNodes.add(_element); } _duplicate.replicaNodes = newReplicaNodes; ArrayList<Integer> newIsrNodes = new ArrayList<Integer>(isrNodes.size()); for (Integer _element : isrNodes) { newIsrNodes.add(_element); } _duplicate.isrNodes = newIsrNodes; ArrayList<Integer> newOfflineReplicas = new ArrayList<Integer>(offlineReplicas.size()); for (Integer _element : offlineReplicas) { newOfflineReplicas.add(_element); } _duplicate.offlineReplicas = newOfflineReplicas; return _duplicate; } @Override public String toString() { return "MetadataResponsePartition(" + "errorCode=" + errorCode + ", partitionIndex=" + partitionIndex + ", leaderId=" + leaderId + ", leaderEpoch=" + leaderEpoch + ", replicaNodes=" + MessageUtil.deepToString(replicaNodes.iterator()) + ", isrNodes=" + MessageUtil.deepToString(isrNodes.iterator()) + ", offlineReplicas=" + MessageUtil.deepToString(offlineReplicas.iterator()) + ")"; } public short errorCode() { return this.errorCode; } public int partitionIndex() { return this.partitionIndex; } public int leaderId() { return this.leaderId; } public int leaderEpoch() { return this.leaderEpoch; } public List<Integer> replicaNodes() { return this.replicaNodes; } public List<Integer> isrNodes() { return this.isrNodes; } public List<Integer> offlineReplicas() { return this.offlineReplicas; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public MetadataResponsePartition setErrorCode(short v) { this.errorCode = v; return this; } public MetadataResponsePartition setPartitionIndex(int v) { this.partitionIndex = v; return this; } public MetadataResponsePartition setLeaderId(int v) { this.leaderId = v; return this; } public MetadataResponsePartition setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } public MetadataResponsePartition setReplicaNodes(List<Integer> v) { this.replicaNodes = v; return this; } public MetadataResponsePartition setIsrNodes(List<Integer> v) { this.isrNodes = v; return this; } public MetadataResponsePartition setOfflineReplicas(List<Integer> v) { this.offlineReplicas = v; return this; } } public static class MetadataResponseTopicCollection extends ImplicitLinkedHashMultiCollection<MetadataResponseTopic> { public MetadataResponseTopicCollection() { super(); } public MetadataResponseTopicCollection(int expectedNumElements) { super(expectedNumElements); } public MetadataResponseTopicCollection(Iterator<MetadataResponseTopic> iterator) { super(iterator); } public MetadataResponseTopic find(String name) { MetadataResponseTopic _key = new MetadataResponseTopic(); _key.setName(name); return find(_key); } public List<MetadataResponseTopic> findAll(String name) { MetadataResponseTopic _key = new MetadataResponseTopic(); _key.setName(name); return findAll(_key); } public MetadataResponseTopicCollection duplicate() { MetadataResponseTopicCollection _duplicate = new MetadataResponseTopicCollection(size()); for (MetadataResponseTopic _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/MetadataResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.MetadataResponseData.*; public class MetadataResponseDataJsonConverter { public static MetadataResponseData read(JsonNode _node, short _version) { MetadataResponseData _object = new MetadataResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 3) { throw new RuntimeException("MetadataResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "MetadataResponseData"); } JsonNode _brokersNode = _node.get("brokers"); if (_brokersNode == null) { throw new RuntimeException("MetadataResponseData: unable to locate field 'brokers', which is mandatory in version " + _version); } else { if (!_brokersNode.isArray()) { throw new RuntimeException("MetadataResponseData expected a JSON array, but got " + _node.getNodeType()); } MetadataResponseBrokerCollection _collection = new MetadataResponseBrokerCollection(_brokersNode.size()); _object.brokers = _collection; for (JsonNode _element : _brokersNode) { _collection.add(MetadataResponseBrokerJsonConverter.read(_element, _version)); } } JsonNode _clusterIdNode = _node.get("clusterId"); if (_clusterIdNode == null) { if (_version >= 2) { throw new RuntimeException("MetadataResponseData: unable to locate field 'clusterId', which is mandatory in version " + _version); } else { _object.clusterId = null; } } else { if (_clusterIdNode.isNull()) { _object.clusterId = null; } else { if (!_clusterIdNode.isTextual()) { throw new RuntimeException("MetadataResponseData expected a string type, but got " + _node.getNodeType()); } _object.clusterId = _clusterIdNode.asText(); } } JsonNode _controllerIdNode = _node.get("controllerId"); if (_controllerIdNode == null) { if (_version >= 1) { throw new RuntimeException("MetadataResponseData: unable to locate field 'controllerId', which is mandatory in version " + _version); } else { _object.controllerId = -1; } } else { _object.controllerId = MessageUtil.jsonNodeToInt(_controllerIdNode, "MetadataResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("MetadataResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("MetadataResponseData expected a JSON array, but got " + _node.getNodeType()); } MetadataResponseTopicCollection _collection = new MetadataResponseTopicCollection(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(MetadataResponseTopicJsonConverter.read(_element, _version)); } } JsonNode _clusterAuthorizedOperationsNode = _node.get("clusterAuthorizedOperations"); if (_clusterAuthorizedOperationsNode == null) { if ((_version >= 8) && (_version <= 10)) { throw new RuntimeException("MetadataResponseData: unable to locate field 'clusterAuthorizedOperations', which is mandatory in version " + _version); } else { _object.clusterAuthorizedOperations = -2147483648; } } else { _object.clusterAuthorizedOperations = MessageUtil.jsonNodeToInt(_clusterAuthorizedOperationsNode, "MetadataResponseData"); } return _object; } public static JsonNode write(MetadataResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 3) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } ArrayNode _brokersArray = new ArrayNode(JsonNodeFactory.instance); for (MetadataResponseBroker _element : _object.brokers) { _brokersArray.add(MetadataResponseBrokerJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("brokers", _brokersArray); if (_version >= 2) { if (_object.clusterId == null) { _node.set("clusterId", NullNode.instance); } else { _node.set("clusterId", new TextNode(_object.clusterId)); } } if (_version >= 1) { _node.set("controllerId", new IntNode(_object.controllerId)); } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (MetadataResponseTopic _element : _object.topics) { _topicsArray.add(MetadataResponseTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); if ((_version >= 8) && (_version <= 10)) { _node.set("clusterAuthorizedOperations", new IntNode(_object.clusterAuthorizedOperations)); } else { if (_object.clusterAuthorizedOperations != -2147483648) { throw new UnsupportedVersionException("Attempted to write a non-default clusterAuthorizedOperations at version " + _version); } } return _node; } public static JsonNode write(MetadataResponseData _object, short _version) { return write(_object, _version, true); } public static class MetadataResponseBrokerJsonConverter { public static MetadataResponseBroker read(JsonNode _node, short _version) { MetadataResponseBroker _object = new MetadataResponseBroker(); JsonNode _nodeIdNode = _node.get("nodeId"); if (_nodeIdNode == null) { throw new RuntimeException("MetadataResponseBroker: unable to locate field 'nodeId', which is mandatory in version " + _version); } else { _object.nodeId = MessageUtil.jsonNodeToInt(_nodeIdNode, "MetadataResponseBroker"); } JsonNode _hostNode = _node.get("host"); if (_hostNode == null) { throw new RuntimeException("MetadataResponseBroker: unable to locate field 'host', which is mandatory in version " + _version); } else { if (!_hostNode.isTextual()) { throw new RuntimeException("MetadataResponseBroker expected a string type, but got " + _node.getNodeType()); } _object.host = _hostNode.asText(); } JsonNode _portNode = _node.get("port"); if (_portNode == null) { throw new RuntimeException("MetadataResponseBroker: unable to locate field 'port', which is mandatory in version " + _version); } else { _object.port = MessageUtil.jsonNodeToInt(_portNode, "MetadataResponseBroker"); } JsonNode _rackNode = _node.get("rack"); if (_rackNode == null) { if (_version >= 1) { throw new RuntimeException("MetadataResponseBroker: unable to locate field 'rack', which is mandatory in version " + _version); } else { _object.rack = null; } } else { if (_rackNode.isNull()) { _object.rack = null; } else { if (!_rackNode.isTextual()) { throw new RuntimeException("MetadataResponseBroker expected a string type, but got " + _node.getNodeType()); } _object.rack = _rackNode.asText(); } } return _object; } public static JsonNode write(MetadataResponseBroker _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("nodeId", new IntNode(_object.nodeId)); _node.set("host", new TextNode(_object.host)); _node.set("port", new IntNode(_object.port)); if (_version >= 1) { if (_object.rack == null) { _node.set("rack", NullNode.instance); } else { _node.set("rack", new TextNode(_object.rack)); } } return _node; } public static JsonNode write(MetadataResponseBroker _object, short _version) { return write(_object, _version, true); } } public static class MetadataResponsePartitionJsonConverter { public static MetadataResponsePartition read(JsonNode _node, short _version) { MetadataResponsePartition _object = new MetadataResponsePartition(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("MetadataResponsePartition: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "MetadataResponsePartition"); } JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("MetadataResponsePartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "MetadataResponsePartition"); } JsonNode _leaderIdNode = _node.get("leaderId"); if (_leaderIdNode == null) { throw new RuntimeException("MetadataResponsePartition: unable to locate field 'leaderId', which is mandatory in version " + _version); } else { _object.leaderId = MessageUtil.jsonNodeToInt(_leaderIdNode, "MetadataResponsePartition"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { if (_version >= 7) { throw new RuntimeException("MetadataResponsePartition: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = -1; } } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "MetadataResponsePartition"); } JsonNode _replicaNodesNode = _node.get("replicaNodes"); if (_replicaNodesNode == null) { throw new RuntimeException("MetadataResponsePartition: unable to locate field 'replicaNodes', which is mandatory in version " + _version); } else { if (!_replicaNodesNode.isArray()) { throw new RuntimeException("MetadataResponsePartition expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_replicaNodesNode.size()); _object.replicaNodes = _collection; for (JsonNode _element : _replicaNodesNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "MetadataResponsePartition element")); } } JsonNode _isrNodesNode = _node.get("isrNodes"); if (_isrNodesNode == null) { throw new RuntimeException("MetadataResponsePartition: unable to locate field 'isrNodes', which is mandatory in version " + _version); } else { if (!_isrNodesNode.isArray()) { throw new RuntimeException("MetadataResponsePartition expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_isrNodesNode.size()); _object.isrNodes = _collection; for (JsonNode _element : _isrNodesNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "MetadataResponsePartition element")); } } JsonNode _offlineReplicasNode = _node.get("offlineReplicas"); if (_offlineReplicasNode == null) { if (_version >= 5) { throw new RuntimeException("MetadataResponsePartition: unable to locate field 'offlineReplicas', which is mandatory in version " + _version); } else { _object.offlineReplicas = new ArrayList<Integer>(0); } } else { if (!_offlineReplicasNode.isArray()) { throw new RuntimeException("MetadataResponsePartition expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_offlineReplicasNode.size()); _object.offlineReplicas = _collection; for (JsonNode _element : _offlineReplicasNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "MetadataResponsePartition element")); } } return _object; } public static JsonNode write(MetadataResponsePartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("leaderId", new IntNode(_object.leaderId)); if (_version >= 7) { _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); } ArrayNode _replicaNodesArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.replicaNodes) { _replicaNodesArray.add(new IntNode(_element)); } _node.set("replicaNodes", _replicaNodesArray); ArrayNode _isrNodesArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.isrNodes) { _isrNodesArray.add(new IntNode(_element)); } _node.set("isrNodes", _isrNodesArray); if (_version >= 5) { ArrayNode _offlineReplicasArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.offlineReplicas) { _offlineReplicasArray.add(new IntNode(_element)); } _node.set("offlineReplicas", _offlineReplicasArray); } return _node; } public static JsonNode write(MetadataResponsePartition _object, short _version) { return write(_object, _version, true); } } public static class MetadataResponseTopicJsonConverter { public static MetadataResponseTopic read(JsonNode _node, short _version) { MetadataResponseTopic _object = new MetadataResponseTopic(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("MetadataResponseTopic: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "MetadataResponseTopic"); } JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("MetadataResponseTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (_nameNode.isNull()) { _object.name = null; } else { if (!_nameNode.isTextual()) { throw new RuntimeException("MetadataResponseTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } } JsonNode _topicIdNode = _node.get("topicId"); if (_topicIdNode == null) { if (_version >= 10) { throw new RuntimeException("MetadataResponseTopic: unable to locate field 'topicId', which is mandatory in version " + _version); } else { _object.topicId = Uuid.ZERO_UUID; } } else { if (!_topicIdNode.isTextual()) { throw new RuntimeException("MetadataResponseTopic expected a JSON string type, but got " + _node.getNodeType()); } _object.topicId = Uuid.fromString(_topicIdNode.asText()); } JsonNode _isInternalNode = _node.get("isInternal"); if (_isInternalNode == null) { if (_version >= 1) { throw new RuntimeException("MetadataResponseTopic: unable to locate field 'isInternal', which is mandatory in version " + _version); } else { _object.isInternal = false; } } else { if (!_isInternalNode.isBoolean()) { throw new RuntimeException("MetadataResponseTopic expected Boolean type, but got " + _node.getNodeType()); } _object.isInternal = _isInternalNode.asBoolean(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("MetadataResponseTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("MetadataResponseTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<MetadataResponsePartition> _collection = new ArrayList<MetadataResponsePartition>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(MetadataResponsePartitionJsonConverter.read(_element, _version)); } } JsonNode _topicAuthorizedOperationsNode = _node.get("topicAuthorizedOperations"); if (_topicAuthorizedOperationsNode == null) { if (_version >= 8) { throw new RuntimeException("MetadataResponseTopic: unable to locate field 'topicAuthorizedOperations', which is mandatory in version " + _version); } else { _object.topicAuthorizedOperations = -2147483648; } } else { _object.topicAuthorizedOperations = MessageUtil.jsonNodeToInt(_topicAuthorizedOperationsNode, "MetadataResponseTopic"); } return _object; } public static JsonNode write(MetadataResponseTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_object.name == null) { _node.set("name", NullNode.instance); } else { _node.set("name", new TextNode(_object.name)); } if (_version >= 10) { _node.set("topicId", new TextNode(_object.topicId.toString())); } if (_version >= 1) { _node.set("isInternal", BooleanNode.valueOf(_object.isInternal)); } ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (MetadataResponsePartition _element : _object.partitions) { _partitionsArray.add(MetadataResponsePartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); if (_version >= 8) { _node.set("topicAuthorizedOperations", new IntNode(_object.topicAuthorizedOperations)); } else { if (_object.topicAuthorizedOperations != -2147483648) { throw new UnsupportedVersionException("Attempted to write a non-default topicAuthorizedOperations at version " + _version); } } return _node; } public static JsonNode write(MetadataResponseTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetCommitRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class OffsetCommitRequestData implements ApiMessage { String groupId; int generationId; String memberId; String groupInstanceId; long retentionTimeMs; List<OffsetCommitRequestTopic> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("group_id", Type.STRING, "The unique group identifier."), new Field("topics", new ArrayOf(OffsetCommitRequestTopic.SCHEMA_0), "The topics to commit offsets for.") ); public static final Schema SCHEMA_1 = new Schema( new Field("group_id", Type.STRING, "The unique group identifier."), new Field("generation_id", Type.INT32, "The generation of the group."), new Field("member_id", Type.STRING, "The member ID assigned by the group coordinator."), new Field("topics", new ArrayOf(OffsetCommitRequestTopic.SCHEMA_1), "The topics to commit offsets for.") ); public static final Schema SCHEMA_2 = new Schema( new Field("group_id", Type.STRING, "The unique group identifier."), new Field("generation_id", Type.INT32, "The generation of the group."), new Field("member_id", Type.STRING, "The member ID assigned by the group coordinator."), new Field("retention_time_ms", Type.INT64, "The time period in ms to retain the offset."), new Field("topics", new ArrayOf(OffsetCommitRequestTopic.SCHEMA_2), "The topics to commit offsets for.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("group_id", Type.STRING, "The unique group identifier."), new Field("generation_id", Type.INT32, "The generation of the group."), new Field("member_id", Type.STRING, "The member ID assigned by the group coordinator."), new Field("topics", new ArrayOf(OffsetCommitRequestTopic.SCHEMA_2), "The topics to commit offsets for.") ); public static final Schema SCHEMA_6 = new Schema( new Field("group_id", Type.STRING, "The unique group identifier."), new Field("generation_id", Type.INT32, "The generation of the group."), new Field("member_id", Type.STRING, "The member ID assigned by the group coordinator."), new Field("topics", new ArrayOf(OffsetCommitRequestTopic.SCHEMA_6), "The topics to commit offsets for.") ); public static final Schema SCHEMA_7 = new Schema( new Field("group_id", Type.STRING, "The unique group identifier."), new Field("generation_id", Type.INT32, "The generation of the group."), new Field("member_id", Type.STRING, "The member ID assigned by the group coordinator."), new Field("group_instance_id", Type.NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."), new Field("topics", new ArrayOf(OffsetCommitRequestTopic.SCHEMA_6), "The topics to commit offsets for.") ); public static final Schema SCHEMA_8 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The unique group identifier."), new Field("generation_id", Type.INT32, "The generation of the group."), new Field("member_id", Type.COMPACT_STRING, "The member ID assigned by the group coordinator."), new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, "The unique identifier of the consumer instance provided by end user."), new Field("topics", new CompactArrayOf(OffsetCommitRequestTopic.SCHEMA_8), "The topics to commit offsets for."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetCommitRequestData(Readable _readable, short _version) { read(_readable, _version); } public OffsetCommitRequestData() { this.groupId = ""; this.generationId = -1; this.memberId = ""; this.groupInstanceId = null; this.retentionTimeMs = -1L; this.topics = new ArrayList<OffsetCommitRequestTopic>(0); } @Override public short apiKey() { return 8; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 8) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } if (_version >= 1) { this.generationId = _readable.readInt(); } else { this.generationId = -1; } if (_version >= 1) { int length; if (_version >= 8) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field memberId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field memberId had invalid length " + length); } else { this.memberId = _readable.readString(length); } } else { this.memberId = ""; } if (_version >= 7) { int length; if (_version >= 8) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.groupInstanceId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field groupInstanceId had invalid length " + length); } else { this.groupInstanceId = _readable.readString(length); } } else { this.groupInstanceId = null; } if ((_version >= 2) && (_version <= 4)) { this.retentionTimeMs = _readable.readLong(); } else { this.retentionTimeMs = -1L; } { if (_version >= 8) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetCommitRequestTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetCommitRequestTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetCommitRequestTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetCommitRequestTopic(_readable, _version)); } this.topics = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 8) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(groupId); if (_version >= 8) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 1) { _writable.writeInt(generationId); } if (_version >= 1) { { byte[] _stringBytes = _cache.getSerializedValue(memberId); if (_version >= 8) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } if (_version >= 7) { if (groupInstanceId == null) { if (_version >= 8) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(groupInstanceId); if (_version >= 8) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } else { if (this.groupInstanceId != null) { throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version); } } if ((_version >= 2) && (_version <= 4)) { _writable.writeLong(retentionTimeMs); } if (_version >= 8) { _writable.writeUnsignedVarint(topics.size() + 1); for (OffsetCommitRequestTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (OffsetCommitRequestTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 8) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); if (_version >= 8) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_version >= 1) { _size.addBytes(4); } if (_version >= 1) { { byte[] _stringBytes = memberId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'memberId' field is too long to be serialized"); } _cache.cacheSerializedValue(memberId, _stringBytes); if (_version >= 8) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version >= 7) { if (groupInstanceId == null) { if (_version >= 8) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = groupInstanceId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupInstanceId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupInstanceId, _stringBytes); if (_version >= 8) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if ((_version >= 2) && (_version <= 4)) { _size.addBytes(8); } { if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (OffsetCommitRequestTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetCommitRequestData)) return false; OffsetCommitRequestData other = (OffsetCommitRequestData) obj; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } if (generationId != other.generationId) return false; if (this.memberId == null) { if (other.memberId != null) return false; } else { if (!this.memberId.equals(other.memberId)) return false; } if (this.groupInstanceId == null) { if (other.groupInstanceId != null) return false; } else { if (!this.groupInstanceId.equals(other.groupInstanceId)) return false; } if (retentionTimeMs != other.retentionTimeMs) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); hashCode = 31 * hashCode + generationId; hashCode = 31 * hashCode + (memberId == null ? 0 : memberId.hashCode()); hashCode = 31 * hashCode + (groupInstanceId == null ? 0 : groupInstanceId.hashCode()); hashCode = 31 * hashCode + ((int) (retentionTimeMs >> 32) ^ (int) retentionTimeMs); hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public OffsetCommitRequestData duplicate() { OffsetCommitRequestData _duplicate = new OffsetCommitRequestData(); _duplicate.groupId = groupId; _duplicate.generationId = generationId; _duplicate.memberId = memberId; if (groupInstanceId == null) { _duplicate.groupInstanceId = null; } else { _duplicate.groupInstanceId = groupInstanceId; } _duplicate.retentionTimeMs = retentionTimeMs; ArrayList<OffsetCommitRequestTopic> newTopics = new ArrayList<OffsetCommitRequestTopic>(topics.size()); for (OffsetCommitRequestTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "OffsetCommitRequestData(" + "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ", generationId=" + generationId + ", memberId=" + ((memberId == null) ? "null" : "'" + memberId.toString() + "'") + ", groupInstanceId=" + ((groupInstanceId == null) ? "null" : "'" + groupInstanceId.toString() + "'") + ", retentionTimeMs=" + retentionTimeMs + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public String groupId() { return this.groupId; } public int generationId() { return this.generationId; } public String memberId() { return this.memberId; } public String groupInstanceId() { return this.groupInstanceId; } public long retentionTimeMs() { return this.retentionTimeMs; } public List<OffsetCommitRequestTopic> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetCommitRequestData setGroupId(String v) { this.groupId = v; return this; } public OffsetCommitRequestData setGenerationId(int v) { this.generationId = v; return this; } public OffsetCommitRequestData setMemberId(String v) { this.memberId = v; return this; } public OffsetCommitRequestData setGroupInstanceId(String v) { this.groupInstanceId = v; return this; } public OffsetCommitRequestData setRetentionTimeMs(long v) { this.retentionTimeMs = v; return this; } public OffsetCommitRequestData setTopics(List<OffsetCommitRequestTopic> v) { this.topics = v; return this; } public static class OffsetCommitRequestTopic implements Message { String name; List<OffsetCommitRequestPartition> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetCommitRequestPartition.SCHEMA_0), "Each partition to commit offsets for.") ); public static final Schema SCHEMA_1 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetCommitRequestPartition.SCHEMA_1), "Each partition to commit offsets for.") ); public static final Schema SCHEMA_2 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetCommitRequestPartition.SCHEMA_2), "Each partition to commit offsets for.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetCommitRequestPartition.SCHEMA_6), "Each partition to commit offsets for.") ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(OffsetCommitRequestPartition.SCHEMA_8), "Each partition to commit offsets for."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetCommitRequestTopic(Readable _readable, short _version) { read(_readable, _version); } public OffsetCommitRequestTopic() { this.name = ""; this.partitions = new ArrayList<OffsetCommitRequestPartition>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetCommitRequestTopic"); } { int length; if (_version >= 8) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { if (_version >= 8) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetCommitRequestPartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetCommitRequestPartition(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetCommitRequestPartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetCommitRequestPartition(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 8) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 8) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 8) { _writable.writeUnsignedVarint(partitions.size() + 1); for (OffsetCommitRequestPartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (OffsetCommitRequestPartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 8) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetCommitRequestTopic"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 8) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (OffsetCommitRequestPartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetCommitRequestTopic)) return false; OffsetCommitRequestTopic other = (OffsetCommitRequestTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public OffsetCommitRequestTopic duplicate() { OffsetCommitRequestTopic _duplicate = new OffsetCommitRequestTopic(); _duplicate.name = name; ArrayList<OffsetCommitRequestPartition> newPartitions = new ArrayList<OffsetCommitRequestPartition>(partitions.size()); for (OffsetCommitRequestPartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "OffsetCommitRequestTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<OffsetCommitRequestPartition> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetCommitRequestTopic setName(String v) { this.name = v; return this; } public OffsetCommitRequestTopic setPartitions(List<OffsetCommitRequestPartition> v) { this.partitions = v; return this; } } public static class OffsetCommitRequestPartition implements Message { int partitionIndex; long committedOffset; int committedLeaderEpoch; long commitTimestamp; String committedMetadata; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("committed_offset", Type.INT64, "The message offset to be committed."), new Field("committed_metadata", Type.NULLABLE_STRING, "Any associated metadata the client wants to keep.") ); public static final Schema SCHEMA_1 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("committed_offset", Type.INT64, "The message offset to be committed."), new Field("commit_timestamp", Type.INT64, "The timestamp of the commit."), new Field("committed_metadata", Type.NULLABLE_STRING, "Any associated metadata the client wants to keep.") ); public static final Schema SCHEMA_2 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("committed_offset", Type.INT64, "The message offset to be committed."), new Field("committed_metadata", Type.NULLABLE_STRING, "Any associated metadata the client wants to keep.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("committed_offset", Type.INT64, "The message offset to be committed."), new Field("committed_leader_epoch", Type.INT32, "The leader epoch of this partition."), new Field("committed_metadata", Type.NULLABLE_STRING, "Any associated metadata the client wants to keep.") ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("committed_offset", Type.INT64, "The message offset to be committed."), new Field("committed_leader_epoch", Type.INT32, "The leader epoch of this partition."), new Field("committed_metadata", Type.COMPACT_NULLABLE_STRING, "Any associated metadata the client wants to keep."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetCommitRequestPartition(Readable _readable, short _version) { read(_readable, _version); } public OffsetCommitRequestPartition() { this.partitionIndex = 0; this.committedOffset = 0L; this.committedLeaderEpoch = -1; this.commitTimestamp = -1L; this.committedMetadata = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetCommitRequestPartition"); } this.partitionIndex = _readable.readInt(); this.committedOffset = _readable.readLong(); if (_version >= 6) { this.committedLeaderEpoch = _readable.readInt(); } else { this.committedLeaderEpoch = -1; } if ((_version >= 1) && (_version <= 1)) { this.commitTimestamp = _readable.readLong(); } else { this.commitTimestamp = -1L; } { int length; if (_version >= 8) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.committedMetadata = null; } else if (length > 0x7fff) { throw new RuntimeException("string field committedMetadata had invalid length " + length); } else { this.committedMetadata = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 8) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeLong(committedOffset); if (_version >= 6) { _writable.writeInt(committedLeaderEpoch); } if ((_version >= 1) && (_version <= 1)) { _writable.writeLong(commitTimestamp); } else { if (this.commitTimestamp != -1L) { throw new UnsupportedVersionException("Attempted to write a non-default commitTimestamp at version " + _version); } } if (committedMetadata == null) { if (_version >= 8) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(committedMetadata); if (_version >= 8) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 8) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetCommitRequestPartition"); } _size.addBytes(4); _size.addBytes(8); if (_version >= 6) { _size.addBytes(4); } if ((_version >= 1) && (_version <= 1)) { _size.addBytes(8); } if (committedMetadata == null) { if (_version >= 8) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = committedMetadata.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'committedMetadata' field is too long to be serialized"); } _cache.cacheSerializedValue(committedMetadata, _stringBytes); if (_version >= 8) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetCommitRequestPartition)) return false; OffsetCommitRequestPartition other = (OffsetCommitRequestPartition) obj; if (partitionIndex != other.partitionIndex) return false; if (committedOffset != other.committedOffset) return false; if (committedLeaderEpoch != other.committedLeaderEpoch) return false; if (commitTimestamp != other.commitTimestamp) return false; if (this.committedMetadata == null) { if (other.committedMetadata != null) return false; } else { if (!this.committedMetadata.equals(other.committedMetadata)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + ((int) (committedOffset >> 32) ^ (int) committedOffset); hashCode = 31 * hashCode + committedLeaderEpoch; hashCode = 31 * hashCode + ((int) (commitTimestamp >> 32) ^ (int) commitTimestamp); hashCode = 31 * hashCode + (committedMetadata == null ? 0 : committedMetadata.hashCode()); return hashCode; } @Override public OffsetCommitRequestPartition duplicate() { OffsetCommitRequestPartition _duplicate = new OffsetCommitRequestPartition(); _duplicate.partitionIndex = partitionIndex; _duplicate.committedOffset = committedOffset; _duplicate.committedLeaderEpoch = committedLeaderEpoch; _duplicate.commitTimestamp = commitTimestamp; if (committedMetadata == null) { _duplicate.committedMetadata = null; } else { _duplicate.committedMetadata = committedMetadata; } return _duplicate; } @Override public String toString() { return "OffsetCommitRequestPartition(" + "partitionIndex=" + partitionIndex + ", committedOffset=" + committedOffset + ", committedLeaderEpoch=" + committedLeaderEpoch + ", commitTimestamp=" + commitTimestamp + ", committedMetadata=" + ((committedMetadata == null) ? "null" : "'" + committedMetadata.toString() + "'") + ")"; } public int partitionIndex() { return this.partitionIndex; } public long committedOffset() { return this.committedOffset; } public int committedLeaderEpoch() { return this.committedLeaderEpoch; } public long commitTimestamp() { return this.commitTimestamp; } public String committedMetadata() { return this.committedMetadata; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetCommitRequestPartition setPartitionIndex(int v) { this.partitionIndex = v; return this; } public OffsetCommitRequestPartition setCommittedOffset(long v) { this.committedOffset = v; return this; } public OffsetCommitRequestPartition setCommittedLeaderEpoch(int v) { this.committedLeaderEpoch = v; return this; } public OffsetCommitRequestPartition setCommitTimestamp(long v) { this.commitTimestamp = v; return this; } public OffsetCommitRequestPartition setCommittedMetadata(String v) { this.committedMetadata = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetCommitRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.OffsetCommitRequestData.*; public class OffsetCommitRequestDataJsonConverter { public static OffsetCommitRequestData read(JsonNode _node, short _version) { OffsetCommitRequestData _object = new OffsetCommitRequestData(); JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { throw new RuntimeException("OffsetCommitRequestData: unable to locate field 'groupId', which is mandatory in version " + _version); } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("OffsetCommitRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } JsonNode _generationIdNode = _node.get("generationId"); if (_generationIdNode == null) { if (_version >= 1) { throw new RuntimeException("OffsetCommitRequestData: unable to locate field 'generationId', which is mandatory in version " + _version); } else { _object.generationId = -1; } } else { _object.generationId = MessageUtil.jsonNodeToInt(_generationIdNode, "OffsetCommitRequestData"); } JsonNode _memberIdNode = _node.get("memberId"); if (_memberIdNode == null) { if (_version >= 1) { throw new RuntimeException("OffsetCommitRequestData: unable to locate field 'memberId', which is mandatory in version " + _version); } else { _object.memberId = ""; } } else { if (!_memberIdNode.isTextual()) { throw new RuntimeException("OffsetCommitRequestData expected a string type, but got " + _node.getNodeType()); } _object.memberId = _memberIdNode.asText(); } JsonNode _groupInstanceIdNode = _node.get("groupInstanceId"); if (_groupInstanceIdNode == null) { if (_version >= 7) { throw new RuntimeException("OffsetCommitRequestData: unable to locate field 'groupInstanceId', which is mandatory in version " + _version); } else { _object.groupInstanceId = null; } } else { if (_groupInstanceIdNode.isNull()) { _object.groupInstanceId = null; } else { if (!_groupInstanceIdNode.isTextual()) { throw new RuntimeException("OffsetCommitRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupInstanceId = _groupInstanceIdNode.asText(); } } JsonNode _retentionTimeMsNode = _node.get("retentionTimeMs"); if (_retentionTimeMsNode == null) { if ((_version >= 2) && (_version <= 4)) { throw new RuntimeException("OffsetCommitRequestData: unable to locate field 'retentionTimeMs', which is mandatory in version " + _version); } else { _object.retentionTimeMs = -1L; } } else { _object.retentionTimeMs = MessageUtil.jsonNodeToLong(_retentionTimeMsNode, "OffsetCommitRequestData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("OffsetCommitRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetCommitRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetCommitRequestTopic> _collection = new ArrayList<OffsetCommitRequestTopic>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetCommitRequestTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetCommitRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("groupId", new TextNode(_object.groupId)); if (_version >= 1) { _node.set("generationId", new IntNode(_object.generationId)); } if (_version >= 1) { _node.set("memberId", new TextNode(_object.memberId)); } if (_version >= 7) { if (_object.groupInstanceId == null) { _node.set("groupInstanceId", NullNode.instance); } else { _node.set("groupInstanceId", new TextNode(_object.groupInstanceId)); } } else { if (_object.groupInstanceId != null) { throw new UnsupportedVersionException("Attempted to write a non-default groupInstanceId at version " + _version); } } if ((_version >= 2) && (_version <= 4)) { _node.set("retentionTimeMs", new LongNode(_object.retentionTimeMs)); } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetCommitRequestTopic _element : _object.topics) { _topicsArray.add(OffsetCommitRequestTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(OffsetCommitRequestData _object, short _version) { return write(_object, _version, true); } public static class OffsetCommitRequestPartitionJsonConverter { public static OffsetCommitRequestPartition read(JsonNode _node, short _version) { OffsetCommitRequestPartition _object = new OffsetCommitRequestPartition(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("OffsetCommitRequestPartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "OffsetCommitRequestPartition"); } JsonNode _committedOffsetNode = _node.get("committedOffset"); if (_committedOffsetNode == null) { throw new RuntimeException("OffsetCommitRequestPartition: unable to locate field 'committedOffset', which is mandatory in version " + _version); } else { _object.committedOffset = MessageUtil.jsonNodeToLong(_committedOffsetNode, "OffsetCommitRequestPartition"); } JsonNode _committedLeaderEpochNode = _node.get("committedLeaderEpoch"); if (_committedLeaderEpochNode == null) { if (_version >= 6) { throw new RuntimeException("OffsetCommitRequestPartition: unable to locate field 'committedLeaderEpoch', which is mandatory in version " + _version); } else { _object.committedLeaderEpoch = -1; } } else { _object.committedLeaderEpoch = MessageUtil.jsonNodeToInt(_committedLeaderEpochNode, "OffsetCommitRequestPartition"); } JsonNode _commitTimestampNode = _node.get("commitTimestamp"); if (_commitTimestampNode == null) { if ((_version >= 1) && (_version <= 1)) { throw new RuntimeException("OffsetCommitRequestPartition: unable to locate field 'commitTimestamp', which is mandatory in version " + _version); } else { _object.commitTimestamp = -1L; } } else { _object.commitTimestamp = MessageUtil.jsonNodeToLong(_commitTimestampNode, "OffsetCommitRequestPartition"); } JsonNode _committedMetadataNode = _node.get("committedMetadata"); if (_committedMetadataNode == null) { throw new RuntimeException("OffsetCommitRequestPartition: unable to locate field 'committedMetadata', which is mandatory in version " + _version); } else { if (_committedMetadataNode.isNull()) { _object.committedMetadata = null; } else { if (!_committedMetadataNode.isTextual()) { throw new RuntimeException("OffsetCommitRequestPartition expected a string type, but got " + _node.getNodeType()); } _object.committedMetadata = _committedMetadataNode.asText(); } } return _object; } public static JsonNode write(OffsetCommitRequestPartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("committedOffset", new LongNode(_object.committedOffset)); if (_version >= 6) { _node.set("committedLeaderEpoch", new IntNode(_object.committedLeaderEpoch)); } if ((_version >= 1) && (_version <= 1)) { _node.set("commitTimestamp", new LongNode(_object.commitTimestamp)); } else { if (_object.commitTimestamp != -1L) { throw new UnsupportedVersionException("Attempted to write a non-default commitTimestamp at version " + _version); } } if (_object.committedMetadata == null) { _node.set("committedMetadata", NullNode.instance); } else { _node.set("committedMetadata", new TextNode(_object.committedMetadata)); } return _node; } public static JsonNode write(OffsetCommitRequestPartition _object, short _version) { return write(_object, _version, true); } } public static class OffsetCommitRequestTopicJsonConverter { public static OffsetCommitRequestTopic read(JsonNode _node, short _version) { OffsetCommitRequestTopic _object = new OffsetCommitRequestTopic(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("OffsetCommitRequestTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("OffsetCommitRequestTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("OffsetCommitRequestTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("OffsetCommitRequestTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetCommitRequestPartition> _collection = new ArrayList<OffsetCommitRequestPartition>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(OffsetCommitRequestPartitionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetCommitRequestTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetCommitRequestPartition _element : _object.partitions) { _partitionsArray.add(OffsetCommitRequestPartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(OffsetCommitRequestTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetCommitResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class OffsetCommitResponseData implements ApiMessage { int throttleTimeMs; List<OffsetCommitResponseTopic> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topics", new ArrayOf(OffsetCommitResponseTopic.SCHEMA_0), "The responses for each topic.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new ArrayOf(OffsetCommitResponseTopic.SCHEMA_0), "The responses for each topic.") ); public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new CompactArrayOf(OffsetCommitResponseTopic.SCHEMA_8), "The responses for each topic."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetCommitResponseData(Readable _readable, short _version) { read(_readable, _version); } public OffsetCommitResponseData() { this.throttleTimeMs = 0; this.topics = new ArrayList<OffsetCommitResponseTopic>(0); } @Override public short apiKey() { return 8; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version >= 3) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } { if (_version >= 8) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetCommitResponseTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetCommitResponseTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetCommitResponseTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetCommitResponseTopic(_readable, _version)); } this.topics = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 8) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { _writable.writeInt(throttleTimeMs); } if (_version >= 8) { _writable.writeUnsignedVarint(topics.size() + 1); for (OffsetCommitResponseTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (OffsetCommitResponseTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 8) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { _size.addBytes(4); } { if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (OffsetCommitResponseTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetCommitResponseData)) return false; OffsetCommitResponseData other = (OffsetCommitResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public OffsetCommitResponseData duplicate() { OffsetCommitResponseData _duplicate = new OffsetCommitResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; ArrayList<OffsetCommitResponseTopic> newTopics = new ArrayList<OffsetCommitResponseTopic>(topics.size()); for (OffsetCommitResponseTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "OffsetCommitResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public List<OffsetCommitResponseTopic> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetCommitResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public OffsetCommitResponseData setTopics(List<OffsetCommitResponseTopic> v) { this.topics = v; return this; } public static class OffsetCommitResponseTopic implements Message { String name; List<OffsetCommitResponsePartition> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetCommitResponsePartition.SCHEMA_0), "The responses for each partition in the topic.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(OffsetCommitResponsePartition.SCHEMA_8), "The responses for each partition in the topic."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetCommitResponseTopic(Readable _readable, short _version) { read(_readable, _version); } public OffsetCommitResponseTopic() { this.name = ""; this.partitions = new ArrayList<OffsetCommitResponsePartition>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetCommitResponseTopic"); } { int length; if (_version >= 8) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { if (_version >= 8) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetCommitResponsePartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetCommitResponsePartition(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetCommitResponsePartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetCommitResponsePartition(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 8) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 8) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 8) { _writable.writeUnsignedVarint(partitions.size() + 1); for (OffsetCommitResponsePartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (OffsetCommitResponsePartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 8) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetCommitResponseTopic"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 8) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (OffsetCommitResponsePartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetCommitResponseTopic)) return false; OffsetCommitResponseTopic other = (OffsetCommitResponseTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public OffsetCommitResponseTopic duplicate() { OffsetCommitResponseTopic _duplicate = new OffsetCommitResponseTopic(); _duplicate.name = name; ArrayList<OffsetCommitResponsePartition> newPartitions = new ArrayList<OffsetCommitResponsePartition>(partitions.size()); for (OffsetCommitResponsePartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "OffsetCommitResponseTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<OffsetCommitResponsePartition> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetCommitResponseTopic setName(String v) { this.name = v; return this; } public OffsetCommitResponseTopic setPartitions(List<OffsetCommitResponsePartition> v) { this.partitions = v; return this; } } public static class OffsetCommitResponsePartition implements Message { int partitionIndex; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetCommitResponsePartition(Readable _readable, short _version) { read(_readable, _version); } public OffsetCommitResponsePartition() { this.partitionIndex = 0; this.errorCode = (short) 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetCommitResponsePartition"); } this.partitionIndex = _readable.readInt(); this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 8) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 8) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetCommitResponsePartition"); } _size.addBytes(4); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 8) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetCommitResponsePartition)) return false; OffsetCommitResponsePartition other = (OffsetCommitResponsePartition) obj; if (partitionIndex != other.partitionIndex) return false; if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public OffsetCommitResponsePartition duplicate() { OffsetCommitResponsePartition _duplicate = new OffsetCommitResponsePartition(); _duplicate.partitionIndex = partitionIndex; _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "OffsetCommitResponsePartition(" + "partitionIndex=" + partitionIndex + ", errorCode=" + errorCode + ")"; } public int partitionIndex() { return this.partitionIndex; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetCommitResponsePartition setPartitionIndex(int v) { this.partitionIndex = v; return this; } public OffsetCommitResponsePartition setErrorCode(short v) { this.errorCode = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetCommitResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.OffsetCommitResponseData.*; public class OffsetCommitResponseDataJsonConverter { public static OffsetCommitResponseData read(JsonNode _node, short _version) { OffsetCommitResponseData _object = new OffsetCommitResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 3) { throw new RuntimeException("OffsetCommitResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "OffsetCommitResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("OffsetCommitResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetCommitResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetCommitResponseTopic> _collection = new ArrayList<OffsetCommitResponseTopic>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetCommitResponseTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetCommitResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 3) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetCommitResponseTopic _element : _object.topics) { _topicsArray.add(OffsetCommitResponseTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(OffsetCommitResponseData _object, short _version) { return write(_object, _version, true); } public static class OffsetCommitResponsePartitionJsonConverter { public static OffsetCommitResponsePartition read(JsonNode _node, short _version) { OffsetCommitResponsePartition _object = new OffsetCommitResponsePartition(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("OffsetCommitResponsePartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "OffsetCommitResponsePartition"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("OffsetCommitResponsePartition: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "OffsetCommitResponsePartition"); } return _object; } public static JsonNode write(OffsetCommitResponsePartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(OffsetCommitResponsePartition _object, short _version) { return write(_object, _version, true); } } public static class OffsetCommitResponseTopicJsonConverter { public static OffsetCommitResponseTopic read(JsonNode _node, short _version) { OffsetCommitResponseTopic _object = new OffsetCommitResponseTopic(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("OffsetCommitResponseTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("OffsetCommitResponseTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("OffsetCommitResponseTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("OffsetCommitResponseTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetCommitResponsePartition> _collection = new ArrayList<OffsetCommitResponsePartition>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(OffsetCommitResponsePartitionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetCommitResponseTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetCommitResponsePartition _element : _object.partitions) { _partitionsArray.add(OffsetCommitResponsePartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(OffsetCommitResponseTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetDeleteRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; public class OffsetDeleteRequestData implements ApiMessage { String groupId; OffsetDeleteRequestTopicCollection topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("group_id", Type.STRING, "The unique group identifier."), new Field("topics", new ArrayOf(OffsetDeleteRequestTopic.SCHEMA_0), "The topics to delete offsets for") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public OffsetDeleteRequestData(Readable _readable, short _version) { read(_readable, _version); } public OffsetDeleteRequestData() { this.groupId = ""; this.topics = new OffsetDeleteRequestTopicCollection(0); } @Override public short apiKey() { return 47; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { { int length; length = _readable.readShort(); if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } OffsetDeleteRequestTopicCollection newCollection = new OffsetDeleteRequestTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetDeleteRequestTopic(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(groupId); _writable.writeShort((short) _stringBytes.length); _writable.writeByteArray(_stringBytes); } _writable.writeInt(topics.size()); for (OffsetDeleteRequestTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); _size.addBytes(_stringBytes.length + 2); } { _size.addBytes(4); for (OffsetDeleteRequestTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetDeleteRequestData)) return false; OffsetDeleteRequestData other = (OffsetDeleteRequestData) obj; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public OffsetDeleteRequestData duplicate() { OffsetDeleteRequestData _duplicate = new OffsetDeleteRequestData(); _duplicate.groupId = groupId; OffsetDeleteRequestTopicCollection newTopics = new OffsetDeleteRequestTopicCollection(topics.size()); for (OffsetDeleteRequestTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "OffsetDeleteRequestData(" + "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public String groupId() { return this.groupId; } public OffsetDeleteRequestTopicCollection topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetDeleteRequestData setGroupId(String v) { this.groupId = v; return this; } public OffsetDeleteRequestData setTopics(OffsetDeleteRequestTopicCollection v) { this.topics = v; return this; } public static class OffsetDeleteRequestTopic implements Message, ImplicitLinkedHashMultiCollection.Element { String name; List<OffsetDeleteRequestPartition> partitions; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetDeleteRequestPartition.SCHEMA_0), "Each partition to delete offsets for.") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public OffsetDeleteRequestTopic(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public OffsetDeleteRequestTopic() { this.name = ""; this.partitions = new ArrayList<OffsetDeleteRequestPartition>(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteRequestTopic"); } { int length; length = _readable.readShort(); if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetDeleteRequestPartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetDeleteRequestPartition(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeShort((short) _stringBytes.length); _writable.writeByteArray(_stringBytes); } _writable.writeInt(partitions.size()); for (OffsetDeleteRequestPartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetDeleteRequestTopic"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + 2); } { _size.addBytes(4); for (OffsetDeleteRequestPartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof OffsetDeleteRequestTopic)) return false; OffsetDeleteRequestTopic other = (OffsetDeleteRequestTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetDeleteRequestTopic)) return false; OffsetDeleteRequestTopic other = (OffsetDeleteRequestTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public OffsetDeleteRequestTopic duplicate() { OffsetDeleteRequestTopic _duplicate = new OffsetDeleteRequestTopic(); _duplicate.name = name; ArrayList<OffsetDeleteRequestPartition> newPartitions = new ArrayList<OffsetDeleteRequestPartition>(partitions.size()); for (OffsetDeleteRequestPartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "OffsetDeleteRequestTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<OffsetDeleteRequestPartition> partitions() { return this.partitions; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetDeleteRequestTopic setName(String v) { this.name = v; return this; } public OffsetDeleteRequestTopic setPartitions(List<OffsetDeleteRequestPartition> v) { this.partitions = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class OffsetDeleteRequestPartition implements Message { int partitionIndex; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index.") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public OffsetDeleteRequestPartition(Readable _readable, short _version) { read(_readable, _version); } public OffsetDeleteRequestPartition() { this.partitionIndex = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteRequestPartition"); } this.partitionIndex = _readable.readInt(); this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetDeleteRequestPartition"); } _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetDeleteRequestPartition)) return false; OffsetDeleteRequestPartition other = (OffsetDeleteRequestPartition) obj; if (partitionIndex != other.partitionIndex) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; return hashCode; } @Override public OffsetDeleteRequestPartition duplicate() { OffsetDeleteRequestPartition _duplicate = new OffsetDeleteRequestPartition(); _duplicate.partitionIndex = partitionIndex; return _duplicate; } @Override public String toString() { return "OffsetDeleteRequestPartition(" + "partitionIndex=" + partitionIndex + ")"; } public int partitionIndex() { return this.partitionIndex; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetDeleteRequestPartition setPartitionIndex(int v) { this.partitionIndex = v; return this; } } public static class OffsetDeleteRequestTopicCollection extends ImplicitLinkedHashMultiCollection<OffsetDeleteRequestTopic> { public OffsetDeleteRequestTopicCollection() { super(); } public OffsetDeleteRequestTopicCollection(int expectedNumElements) { super(expectedNumElements); } public OffsetDeleteRequestTopicCollection(Iterator<OffsetDeleteRequestTopic> iterator) { super(iterator); } public OffsetDeleteRequestTopic find(String name) { OffsetDeleteRequestTopic _key = new OffsetDeleteRequestTopic(); _key.setName(name); return find(_key); } public List<OffsetDeleteRequestTopic> findAll(String name) { OffsetDeleteRequestTopic _key = new OffsetDeleteRequestTopic(); _key.setName(name); return findAll(_key); } public OffsetDeleteRequestTopicCollection duplicate() { OffsetDeleteRequestTopicCollection _duplicate = new OffsetDeleteRequestTopicCollection(size()); for (OffsetDeleteRequestTopic _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetDeleteRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.OffsetDeleteRequestData.*; public class OffsetDeleteRequestDataJsonConverter { public static OffsetDeleteRequestData read(JsonNode _node, short _version) { OffsetDeleteRequestData _object = new OffsetDeleteRequestData(); JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { throw new RuntimeException("OffsetDeleteRequestData: unable to locate field 'groupId', which is mandatory in version " + _version); } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("OffsetDeleteRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("OffsetDeleteRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetDeleteRequestData expected a JSON array, but got " + _node.getNodeType()); } OffsetDeleteRequestTopicCollection _collection = new OffsetDeleteRequestTopicCollection(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetDeleteRequestTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetDeleteRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("groupId", new TextNode(_object.groupId)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetDeleteRequestTopic _element : _object.topics) { _topicsArray.add(OffsetDeleteRequestTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(OffsetDeleteRequestData _object, short _version) { return write(_object, _version, true); } public static class OffsetDeleteRequestPartitionJsonConverter { public static OffsetDeleteRequestPartition read(JsonNode _node, short _version) { OffsetDeleteRequestPartition _object = new OffsetDeleteRequestPartition(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("OffsetDeleteRequestPartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "OffsetDeleteRequestPartition"); } return _object; } public static JsonNode write(OffsetDeleteRequestPartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); return _node; } public static JsonNode write(OffsetDeleteRequestPartition _object, short _version) { return write(_object, _version, true); } } public static class OffsetDeleteRequestTopicJsonConverter { public static OffsetDeleteRequestTopic read(JsonNode _node, short _version) { OffsetDeleteRequestTopic _object = new OffsetDeleteRequestTopic(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("OffsetDeleteRequestTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("OffsetDeleteRequestTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("OffsetDeleteRequestTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("OffsetDeleteRequestTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetDeleteRequestPartition> _collection = new ArrayList<OffsetDeleteRequestPartition>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(OffsetDeleteRequestPartitionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetDeleteRequestTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetDeleteRequestPartition _element : _object.partitions) { _partitionsArray.add(OffsetDeleteRequestPartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(OffsetDeleteRequestTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetDeleteResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; public class OffsetDeleteResponseData implements ApiMessage { short errorCode; int throttleTimeMs; OffsetDeleteResponseTopicCollection topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no error."), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new ArrayOf(OffsetDeleteResponseTopic.SCHEMA_0), "The responses for each topic.") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public OffsetDeleteResponseData(Readable _readable, short _version) { read(_readable, _version); } public OffsetDeleteResponseData() { this.errorCode = (short) 0; this.throttleTimeMs = 0; this.topics = new OffsetDeleteResponseTopicCollection(0); } @Override public short apiKey() { return 47; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.errorCode = _readable.readShort(); this.throttleTimeMs = _readable.readInt(); { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } OffsetDeleteResponseTopicCollection newCollection = new OffsetDeleteResponseTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetDeleteResponseTopic(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); _writable.writeInt(throttleTimeMs); _writable.writeInt(topics.size()); for (OffsetDeleteResponseTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(2); _size.addBytes(4); { _size.addBytes(4); for (OffsetDeleteResponseTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetDeleteResponseData)) return false; OffsetDeleteResponseData other = (OffsetDeleteResponseData) obj; if (errorCode != other.errorCode) return false; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public OffsetDeleteResponseData duplicate() { OffsetDeleteResponseData _duplicate = new OffsetDeleteResponseData(); _duplicate.errorCode = errorCode; _duplicate.throttleTimeMs = throttleTimeMs; OffsetDeleteResponseTopicCollection newTopics = new OffsetDeleteResponseTopicCollection(topics.size()); for (OffsetDeleteResponseTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "OffsetDeleteResponseData(" + "errorCode=" + errorCode + ", throttleTimeMs=" + throttleTimeMs + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public short errorCode() { return this.errorCode; } public int throttleTimeMs() { return this.throttleTimeMs; } public OffsetDeleteResponseTopicCollection topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetDeleteResponseData setErrorCode(short v) { this.errorCode = v; return this; } public OffsetDeleteResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public OffsetDeleteResponseData setTopics(OffsetDeleteResponseTopicCollection v) { this.topics = v; return this; } public static class OffsetDeleteResponseTopic implements Message, ImplicitLinkedHashMultiCollection.Element { String name; OffsetDeleteResponsePartitionCollection partitions; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetDeleteResponsePartition.SCHEMA_0), "The responses for each partition in the topic.") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public OffsetDeleteResponseTopic(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public OffsetDeleteResponseTopic() { this.name = ""; this.partitions = new OffsetDeleteResponsePartitionCollection(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteResponseTopic"); } { int length; length = _readable.readShort(); if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } OffsetDeleteResponsePartitionCollection newCollection = new OffsetDeleteResponsePartitionCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetDeleteResponsePartition(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeShort((short) _stringBytes.length); _writable.writeByteArray(_stringBytes); } _writable.writeInt(partitions.size()); for (OffsetDeleteResponsePartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetDeleteResponseTopic"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + 2); } { _size.addBytes(4); for (OffsetDeleteResponsePartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof OffsetDeleteResponseTopic)) return false; OffsetDeleteResponseTopic other = (OffsetDeleteResponseTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetDeleteResponseTopic)) return false; OffsetDeleteResponseTopic other = (OffsetDeleteResponseTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public OffsetDeleteResponseTopic duplicate() { OffsetDeleteResponseTopic _duplicate = new OffsetDeleteResponseTopic(); _duplicate.name = name; OffsetDeleteResponsePartitionCollection newPartitions = new OffsetDeleteResponsePartitionCollection(partitions.size()); for (OffsetDeleteResponsePartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "OffsetDeleteResponseTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public OffsetDeleteResponsePartitionCollection partitions() { return this.partitions; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetDeleteResponseTopic setName(String v) { this.name = v; return this; } public OffsetDeleteResponseTopic setPartitions(OffsetDeleteResponsePartitionCollection v) { this.partitions = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class OffsetDeleteResponsePartition implements Message, ImplicitLinkedHashMultiCollection.Element { int partitionIndex; short errorCode; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public OffsetDeleteResponsePartition(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public OffsetDeleteResponsePartition() { this.partitionIndex = 0; this.errorCode = (short) 0; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetDeleteResponsePartition"); } this.partitionIndex = _readable.readInt(); this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetDeleteResponsePartition"); } _size.addBytes(4); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof OffsetDeleteResponsePartition)) return false; OffsetDeleteResponsePartition other = (OffsetDeleteResponsePartition) obj; if (partitionIndex != other.partitionIndex) return false; return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetDeleteResponsePartition)) return false; OffsetDeleteResponsePartition other = (OffsetDeleteResponsePartition) obj; if (partitionIndex != other.partitionIndex) return false; if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; return hashCode; } @Override public OffsetDeleteResponsePartition duplicate() { OffsetDeleteResponsePartition _duplicate = new OffsetDeleteResponsePartition(); _duplicate.partitionIndex = partitionIndex; _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "OffsetDeleteResponsePartition(" + "partitionIndex=" + partitionIndex + ", errorCode=" + errorCode + ")"; } public int partitionIndex() { return this.partitionIndex; } public short errorCode() { return this.errorCode; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetDeleteResponsePartition setPartitionIndex(int v) { this.partitionIndex = v; return this; } public OffsetDeleteResponsePartition setErrorCode(short v) { this.errorCode = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class OffsetDeleteResponsePartitionCollection extends ImplicitLinkedHashMultiCollection<OffsetDeleteResponsePartition> { public OffsetDeleteResponsePartitionCollection() { super(); } public OffsetDeleteResponsePartitionCollection(int expectedNumElements) { super(expectedNumElements); } public OffsetDeleteResponsePartitionCollection(Iterator<OffsetDeleteResponsePartition> iterator) { super(iterator); } public OffsetDeleteResponsePartition find(int partitionIndex) { OffsetDeleteResponsePartition _key = new OffsetDeleteResponsePartition(); _key.setPartitionIndex(partitionIndex); return find(_key); } public List<OffsetDeleteResponsePartition> findAll(int partitionIndex) { OffsetDeleteResponsePartition _key = new OffsetDeleteResponsePartition(); _key.setPartitionIndex(partitionIndex); return findAll(_key); } public OffsetDeleteResponsePartitionCollection duplicate() { OffsetDeleteResponsePartitionCollection _duplicate = new OffsetDeleteResponsePartitionCollection(size()); for (OffsetDeleteResponsePartition _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class OffsetDeleteResponseTopicCollection extends ImplicitLinkedHashMultiCollection<OffsetDeleteResponseTopic> { public OffsetDeleteResponseTopicCollection() { super(); } public OffsetDeleteResponseTopicCollection(int expectedNumElements) { super(expectedNumElements); } public OffsetDeleteResponseTopicCollection(Iterator<OffsetDeleteResponseTopic> iterator) { super(iterator); } public OffsetDeleteResponseTopic find(String name) { OffsetDeleteResponseTopic _key = new OffsetDeleteResponseTopic(); _key.setName(name); return find(_key); } public List<OffsetDeleteResponseTopic> findAll(String name) { OffsetDeleteResponseTopic _key = new OffsetDeleteResponseTopic(); _key.setName(name); return findAll(_key); } public OffsetDeleteResponseTopicCollection duplicate() { OffsetDeleteResponseTopicCollection _duplicate = new OffsetDeleteResponseTopicCollection(size()); for (OffsetDeleteResponseTopic _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetDeleteResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.OffsetDeleteResponseData.*; public class OffsetDeleteResponseDataJsonConverter { public static OffsetDeleteResponseData read(JsonNode _node, short _version) { OffsetDeleteResponseData _object = new OffsetDeleteResponseData(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("OffsetDeleteResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "OffsetDeleteResponseData"); } JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("OffsetDeleteResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "OffsetDeleteResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("OffsetDeleteResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetDeleteResponseData expected a JSON array, but got " + _node.getNodeType()); } OffsetDeleteResponseTopicCollection _collection = new OffsetDeleteResponseTopicCollection(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetDeleteResponseTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetDeleteResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetDeleteResponseTopic _element : _object.topics) { _topicsArray.add(OffsetDeleteResponseTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(OffsetDeleteResponseData _object, short _version) { return write(_object, _version, true); } public static class OffsetDeleteResponsePartitionJsonConverter { public static OffsetDeleteResponsePartition read(JsonNode _node, short _version) { OffsetDeleteResponsePartition _object = new OffsetDeleteResponsePartition(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("OffsetDeleteResponsePartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "OffsetDeleteResponsePartition"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("OffsetDeleteResponsePartition: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "OffsetDeleteResponsePartition"); } return _object; } public static JsonNode write(OffsetDeleteResponsePartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(OffsetDeleteResponsePartition _object, short _version) { return write(_object, _version, true); } } public static class OffsetDeleteResponseTopicJsonConverter { public static OffsetDeleteResponseTopic read(JsonNode _node, short _version) { OffsetDeleteResponseTopic _object = new OffsetDeleteResponseTopic(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("OffsetDeleteResponseTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("OffsetDeleteResponseTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("OffsetDeleteResponseTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("OffsetDeleteResponseTopic expected a JSON array, but got " + _node.getNodeType()); } OffsetDeleteResponsePartitionCollection _collection = new OffsetDeleteResponsePartitionCollection(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(OffsetDeleteResponsePartitionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetDeleteResponseTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetDeleteResponsePartition _element : _object.partitions) { _partitionsArray.add(OffsetDeleteResponsePartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(OffsetDeleteResponseTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetFetchRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class OffsetFetchRequestData implements ApiMessage { String groupId; List<OffsetFetchRequestTopic> topics; List<OffsetFetchRequestGroup> groups; boolean requireStable; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("group_id", Type.STRING, "The group to fetch offsets for."), new Field("topics", new ArrayOf(OffsetFetchRequestTopic.SCHEMA_0), "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("group_id", Type.STRING, "The group to fetch offsets for."), new Field("topics", ArrayOf.nullable(OffsetFetchRequestTopic.SCHEMA_0), "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The group to fetch offsets for."), new Field("topics", CompactArrayOf.nullable(OffsetFetchRequestTopic.SCHEMA_6), "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The group to fetch offsets for."), new Field("topics", CompactArrayOf.nullable(OffsetFetchRequestTopic.SCHEMA_6), "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics."), new Field("require_stable", Type.BOOLEAN, "Whether broker should hold on returning unstable offsets but set a retriable error code for the partitions."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_8 = new Schema( new Field("groups", new CompactArrayOf(OffsetFetchRequestGroup.SCHEMA_8), "Each group we would like to fetch offsets for"), new Field("require_stable", Type.BOOLEAN, "Whether broker should hold on returning unstable offsets but set a retriable error code for the partitions."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetFetchRequestData(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchRequestData() { this.groupId = ""; this.topics = new ArrayList<OffsetFetchRequestTopic>(0); this.groups = new ArrayList<OffsetFetchRequestGroup>(0); this.requireStable = false; } @Override public short apiKey() { return 9; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version <= 7) { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } else { this.groupId = ""; } if (_version <= 7) { if (_version >= 6) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { this.topics = null; } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchRequestTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchRequestTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { if (_version >= 2) { this.topics = null; } else { throw new RuntimeException("non-nullable field topics was serialized as null"); } } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchRequestTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchRequestTopic(_readable, _version)); } this.topics = newCollection; } } } else { this.topics = new ArrayList<OffsetFetchRequestTopic>(0); } if (_version >= 8) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field groups was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchRequestGroup> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchRequestGroup(_readable, _version)); } this.groups = newCollection; } } else { this.groups = new ArrayList<OffsetFetchRequestGroup>(0); } if (_version >= 7) { this.requireStable = _readable.readByte() != 0; } else { this.requireStable = false; } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 7) { { byte[] _stringBytes = _cache.getSerializedValue(groupId); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } else { if (!this.groupId.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default groupId at version " + _version); } } if (_version <= 7) { if (_version >= 6) { if (topics == null) { _writable.writeUnsignedVarint(0); } else { _writable.writeUnsignedVarint(topics.size() + 1); for (OffsetFetchRequestTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } } else { if (topics == null) { if (_version >= 2) { _writable.writeInt(-1); } else { throw new NullPointerException(); } } else { _writable.writeInt(topics.size()); for (OffsetFetchRequestTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } } } else { if (this.topics == null || !this.topics.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default topics at version " + _version); } } if (_version >= 8) { _writable.writeUnsignedVarint(groups.size() + 1); for (OffsetFetchRequestGroup groupsElement : groups) { groupsElement.write(_writable, _cache, _version); } } else { if (!this.groups.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default groups at version " + _version); } } if (_version >= 7) { _writable.writeByte(requireStable ? (byte) 1 : (byte) 0); } else { if (this.requireStable) { throw new UnsupportedVersionException("Attempted to write a non-default requireStable at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 7) { { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version <= 7) { if (topics == null) { if (_version >= 6) { _size.addBytes(1); } else { _size.addBytes(4); } } else { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (OffsetFetchRequestTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } } if (_version >= 8) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(groups.size() + 1)); for (OffsetFetchRequestGroup groupsElement : groups) { groupsElement.addSize(_size, _cache, _version); } } } if (_version >= 7) { _size.addBytes(1); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchRequestData)) return false; OffsetFetchRequestData other = (OffsetFetchRequestData) obj; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } if (this.groups == null) { if (other.groups != null) return false; } else { if (!this.groups.equals(other.groups)) return false; } if (requireStable != other.requireStable) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); hashCode = 31 * hashCode + (groups == null ? 0 : groups.hashCode()); hashCode = 31 * hashCode + (requireStable ? 1231 : 1237); return hashCode; } @Override public OffsetFetchRequestData duplicate() { OffsetFetchRequestData _duplicate = new OffsetFetchRequestData(); _duplicate.groupId = groupId; if (topics == null) { _duplicate.topics = null; } else { ArrayList<OffsetFetchRequestTopic> newTopics = new ArrayList<OffsetFetchRequestTopic>(topics.size()); for (OffsetFetchRequestTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; } ArrayList<OffsetFetchRequestGroup> newGroups = new ArrayList<OffsetFetchRequestGroup>(groups.size()); for (OffsetFetchRequestGroup _element : groups) { newGroups.add(_element.duplicate()); } _duplicate.groups = newGroups; _duplicate.requireStable = requireStable; return _duplicate; } @Override public String toString() { return "OffsetFetchRequestData(" + "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ", topics=" + ((topics == null) ? "null" : MessageUtil.deepToString(topics.iterator())) + ", groups=" + MessageUtil.deepToString(groups.iterator()) + ", requireStable=" + (requireStable ? "true" : "false") + ")"; } public String groupId() { return this.groupId; } public List<OffsetFetchRequestTopic> topics() { return this.topics; } public List<OffsetFetchRequestGroup> groups() { return this.groups; } public boolean requireStable() { return this.requireStable; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchRequestData setGroupId(String v) { this.groupId = v; return this; } public OffsetFetchRequestData setTopics(List<OffsetFetchRequestTopic> v) { this.topics = v; return this; } public OffsetFetchRequestData setGroups(List<OffsetFetchRequestGroup> v) { this.groups = v; return this; } public OffsetFetchRequestData setRequireStable(boolean v) { this.requireStable = v; return this; } public static class OffsetFetchRequestTopic implements Message { String name; List<Integer> partitionIndexes; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partition_indexes", new ArrayOf(Type.INT32), "The partition indexes we would like to fetch offsets for.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partition_indexes", new CompactArrayOf(Type.INT32), "The partition indexes we would like to fetch offsets for."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 7; public OffsetFetchRequestTopic(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchRequestTopic() { this.name = ""; this.partitionIndexes = new ArrayList<Integer>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; if (_version >= 6) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionIndexes was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.partitionIndexes = newCollection; } } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version > 7) { throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetFetchRequestTopic"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 6) { _writable.writeUnsignedVarint(partitionIndexes.size() + 1); } else { _writable.writeInt(partitionIndexes.size()); } for (Integer partitionIndexesElement : partitionIndexes) { _writable.writeInt(partitionIndexesElement); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionIndexes.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(partitionIndexes.size() * 4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchRequestTopic)) return false; OffsetFetchRequestTopic other = (OffsetFetchRequestTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitionIndexes == null) { if (other.partitionIndexes != null) return false; } else { if (!this.partitionIndexes.equals(other.partitionIndexes)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitionIndexes == null ? 0 : partitionIndexes.hashCode()); return hashCode; } @Override public OffsetFetchRequestTopic duplicate() { OffsetFetchRequestTopic _duplicate = new OffsetFetchRequestTopic(); _duplicate.name = name; ArrayList<Integer> newPartitionIndexes = new ArrayList<Integer>(partitionIndexes.size()); for (Integer _element : partitionIndexes) { newPartitionIndexes.add(_element); } _duplicate.partitionIndexes = newPartitionIndexes; return _duplicate; } @Override public String toString() { return "OffsetFetchRequestTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitionIndexes=" + MessageUtil.deepToString(partitionIndexes.iterator()) + ")"; } public String name() { return this.name; } public List<Integer> partitionIndexes() { return this.partitionIndexes; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchRequestTopic setName(String v) { this.name = v; return this; } public OffsetFetchRequestTopic setPartitionIndexes(List<Integer> v) { this.partitionIndexes = v; return this; } } public static class OffsetFetchRequestGroup implements Message { String groupId; List<OffsetFetchRequestTopics> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_8 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The group ID."), new Field("topics", CompactArrayOf.nullable(OffsetFetchRequestTopics.SCHEMA_8), "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 8; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetFetchRequestGroup(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchRequestGroup() { this.groupId = ""; this.topics = new ArrayList<OffsetFetchRequestTopics>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetFetchRequestGroup"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { this.topics = null; } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchRequestTopics> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchRequestTopics(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 8) { throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetFetchRequestGroup"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(groupId); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } if (topics == null) { _writable.writeUnsignedVarint(0); } else { _writable.writeUnsignedVarint(topics.size() + 1); for (OffsetFetchRequestTopics topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetFetchRequestGroup"); } { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } if (topics == null) { _size.addBytes(1); } else { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (OffsetFetchRequestTopics topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchRequestGroup)) return false; OffsetFetchRequestGroup other = (OffsetFetchRequestGroup) obj; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public OffsetFetchRequestGroup duplicate() { OffsetFetchRequestGroup _duplicate = new OffsetFetchRequestGroup(); _duplicate.groupId = groupId; if (topics == null) { _duplicate.topics = null; } else { ArrayList<OffsetFetchRequestTopics> newTopics = new ArrayList<OffsetFetchRequestTopics>(topics.size()); for (OffsetFetchRequestTopics _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; } return _duplicate; } @Override public String toString() { return "OffsetFetchRequestGroup(" + "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ", topics=" + ((topics == null) ? "null" : MessageUtil.deepToString(topics.iterator())) + ")"; } public String groupId() { return this.groupId; } public List<OffsetFetchRequestTopics> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchRequestGroup setGroupId(String v) { this.groupId = v; return this; } public OffsetFetchRequestGroup setTopics(List<OffsetFetchRequestTopics> v) { this.topics = v; return this; } } public static class OffsetFetchRequestTopics implements Message { String name; List<Integer> partitionIndexes; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_8 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partition_indexes", new CompactArrayOf(Type.INT32), "The partition indexes we would like to fetch offsets for."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 8; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetFetchRequestTopics(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchRequestTopics() { this.name = ""; this.partitionIndexes = new ArrayList<Integer>(0); } @Override public short lowestSupportedVersion() { return 8; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetFetchRequestTopics"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionIndexes was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.partitionIndexes = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(partitionIndexes.size() + 1); for (Integer partitionIndexesElement : partitionIndexes) { _writable.writeInt(partitionIndexesElement); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetFetchRequestTopics"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionIndexes.size() + 1)); _size.addBytes(partitionIndexes.size() * 4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchRequestTopics)) return false; OffsetFetchRequestTopics other = (OffsetFetchRequestTopics) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitionIndexes == null) { if (other.partitionIndexes != null) return false; } else { if (!this.partitionIndexes.equals(other.partitionIndexes)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitionIndexes == null ? 0 : partitionIndexes.hashCode()); return hashCode; } @Override public OffsetFetchRequestTopics duplicate() { OffsetFetchRequestTopics _duplicate = new OffsetFetchRequestTopics(); _duplicate.name = name; ArrayList<Integer> newPartitionIndexes = new ArrayList<Integer>(partitionIndexes.size()); for (Integer _element : partitionIndexes) { newPartitionIndexes.add(_element); } _duplicate.partitionIndexes = newPartitionIndexes; return _duplicate; } @Override public String toString() { return "OffsetFetchRequestTopics(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitionIndexes=" + MessageUtil.deepToString(partitionIndexes.iterator()) + ")"; } public String name() { return this.name; } public List<Integer> partitionIndexes() { return this.partitionIndexes; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchRequestTopics setName(String v) { this.name = v; return this; } public OffsetFetchRequestTopics setPartitionIndexes(List<Integer> v) { this.partitionIndexes = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetFetchRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.OffsetFetchRequestData.*; public class OffsetFetchRequestDataJsonConverter { public static OffsetFetchRequestData read(JsonNode _node, short _version) { OffsetFetchRequestData _object = new OffsetFetchRequestData(); JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { if (_version <= 7) { throw new RuntimeException("OffsetFetchRequestData: unable to locate field 'groupId', which is mandatory in version " + _version); } else { _object.groupId = ""; } } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("OffsetFetchRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { if (_version <= 7) { throw new RuntimeException("OffsetFetchRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { _object.topics = new ArrayList<OffsetFetchRequestTopic>(0); } } else { if (_topicsNode.isNull()) { _object.topics = null; } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetFetchRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetFetchRequestTopic> _collection = new ArrayList<OffsetFetchRequestTopic>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetFetchRequestTopicJsonConverter.read(_element, _version)); } } } JsonNode _groupsNode = _node.get("groups"); if (_groupsNode == null) { if (_version >= 8) { throw new RuntimeException("OffsetFetchRequestData: unable to locate field 'groups', which is mandatory in version " + _version); } else { _object.groups = new ArrayList<OffsetFetchRequestGroup>(0); } } else { if (!_groupsNode.isArray()) { throw new RuntimeException("OffsetFetchRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetFetchRequestGroup> _collection = new ArrayList<OffsetFetchRequestGroup>(_groupsNode.size()); _object.groups = _collection; for (JsonNode _element : _groupsNode) { _collection.add(OffsetFetchRequestGroupJsonConverter.read(_element, _version)); } } JsonNode _requireStableNode = _node.get("requireStable"); if (_requireStableNode == null) { if (_version >= 7) { throw new RuntimeException("OffsetFetchRequestData: unable to locate field 'requireStable', which is mandatory in version " + _version); } else { _object.requireStable = false; } } else { if (!_requireStableNode.isBoolean()) { throw new RuntimeException("OffsetFetchRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.requireStable = _requireStableNode.asBoolean(); } return _object; } public static JsonNode write(OffsetFetchRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version <= 7) { _node.set("groupId", new TextNode(_object.groupId)); } else { if (!_object.groupId.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default groupId at version " + _version); } } if (_version <= 7) { if (_object.topics == null) { _node.set("topics", NullNode.instance); } else { ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetFetchRequestTopic _element : _object.topics) { _topicsArray.add(OffsetFetchRequestTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); } } else { if (_object.topics == null || !_object.topics.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default topics at version " + _version); } } if (_version >= 8) { ArrayNode _groupsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetFetchRequestGroup _element : _object.groups) { _groupsArray.add(OffsetFetchRequestGroupJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("groups", _groupsArray); } else { if (!_object.groups.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default groups at version " + _version); } } if (_version >= 7) { _node.set("requireStable", BooleanNode.valueOf(_object.requireStable)); } else { if (_object.requireStable) { throw new UnsupportedVersionException("Attempted to write a non-default requireStable at version " + _version); } } return _node; } public static JsonNode write(OffsetFetchRequestData _object, short _version) { return write(_object, _version, true); } public static class OffsetFetchRequestGroupJsonConverter { public static OffsetFetchRequestGroup read(JsonNode _node, short _version) { OffsetFetchRequestGroup _object = new OffsetFetchRequestGroup(); if (_version < 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetFetchRequestGroup"); } JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { throw new RuntimeException("OffsetFetchRequestGroup: unable to locate field 'groupId', which is mandatory in version " + _version); } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("OffsetFetchRequestGroup expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("OffsetFetchRequestGroup: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (_topicsNode.isNull()) { _object.topics = null; } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetFetchRequestGroup expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetFetchRequestTopics> _collection = new ArrayList<OffsetFetchRequestTopics>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetFetchRequestTopicsJsonConverter.read(_element, _version)); } } } return _object; } public static JsonNode write(OffsetFetchRequestGroup _object, short _version, boolean _serializeRecords) { if (_version < 8) { throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetFetchRequestGroup"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("groupId", new TextNode(_object.groupId)); if (_object.topics == null) { _node.set("topics", NullNode.instance); } else { ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetFetchRequestTopics _element : _object.topics) { _topicsArray.add(OffsetFetchRequestTopicsJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); } return _node; } public static JsonNode write(OffsetFetchRequestGroup _object, short _version) { return write(_object, _version, true); } } public static class OffsetFetchRequestTopicJsonConverter { public static OffsetFetchRequestTopic read(JsonNode _node, short _version) { OffsetFetchRequestTopic _object = new OffsetFetchRequestTopic(); if (_version > 7) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetFetchRequestTopic"); } JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("OffsetFetchRequestTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("OffsetFetchRequestTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionIndexesNode = _node.get("partitionIndexes"); if (_partitionIndexesNode == null) { throw new RuntimeException("OffsetFetchRequestTopic: unable to locate field 'partitionIndexes', which is mandatory in version " + _version); } else { if (!_partitionIndexesNode.isArray()) { throw new RuntimeException("OffsetFetchRequestTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_partitionIndexesNode.size()); _object.partitionIndexes = _collection; for (JsonNode _element : _partitionIndexesNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "OffsetFetchRequestTopic element")); } } return _object; } public static JsonNode write(OffsetFetchRequestTopic _object, short _version, boolean _serializeRecords) { if (_version > 7) { throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetFetchRequestTopic"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionIndexesArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.partitionIndexes) { _partitionIndexesArray.add(new IntNode(_element)); } _node.set("partitionIndexes", _partitionIndexesArray); return _node; } public static JsonNode write(OffsetFetchRequestTopic _object, short _version) { return write(_object, _version, true); } } public static class OffsetFetchRequestTopicsJsonConverter { public static OffsetFetchRequestTopics read(JsonNode _node, short _version) { OffsetFetchRequestTopics _object = new OffsetFetchRequestTopics(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("OffsetFetchRequestTopics: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("OffsetFetchRequestTopics expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionIndexesNode = _node.get("partitionIndexes"); if (_partitionIndexesNode == null) { throw new RuntimeException("OffsetFetchRequestTopics: unable to locate field 'partitionIndexes', which is mandatory in version " + _version); } else { if (!_partitionIndexesNode.isArray()) { throw new RuntimeException("OffsetFetchRequestTopics expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_partitionIndexesNode.size()); _object.partitionIndexes = _collection; for (JsonNode _element : _partitionIndexesNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "OffsetFetchRequestTopics element")); } } return _object; } public static JsonNode write(OffsetFetchRequestTopics _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionIndexesArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.partitionIndexes) { _partitionIndexesArray.add(new IntNode(_element)); } _node.set("partitionIndexes", _partitionIndexesArray); return _node; } public static JsonNode write(OffsetFetchRequestTopics _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetFetchResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class OffsetFetchResponseData implements ApiMessage { int throttleTimeMs; List<OffsetFetchResponseTopic> topics; short errorCode; List<OffsetFetchResponseGroup> groups; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topics", new ArrayOf(OffsetFetchResponseTopic.SCHEMA_0), "The responses per topic.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("topics", new ArrayOf(OffsetFetchResponseTopic.SCHEMA_0), "The responses per topic."), new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no error.") ); public static final Schema SCHEMA_3 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new ArrayOf(OffsetFetchResponseTopic.SCHEMA_0), "The responses per topic."), new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no error.") ); public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new ArrayOf(OffsetFetchResponseTopic.SCHEMA_5), "The responses per topic."), new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no error.") ); public static final Schema SCHEMA_6 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new CompactArrayOf(OffsetFetchResponseTopic.SCHEMA_6), "The responses per topic."), new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("groups", new CompactArrayOf(OffsetFetchResponseGroup.SCHEMA_8), "The responses per group id."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetFetchResponseData(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchResponseData() { this.throttleTimeMs = 0; this.topics = new ArrayList<OffsetFetchResponseTopic>(0); this.errorCode = (short) 0; this.groups = new ArrayList<OffsetFetchResponseGroup>(0); } @Override public short apiKey() { return 9; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version >= 3) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } if (_version <= 7) { if (_version >= 6) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchResponseTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchResponseTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchResponseTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchResponseTopic(_readable, _version)); } this.topics = newCollection; } } } else { this.topics = new ArrayList<OffsetFetchResponseTopic>(0); } if ((_version >= 2) && (_version <= 7)) { this.errorCode = _readable.readShort(); } else { this.errorCode = (short) 0; } if (_version >= 8) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field groups was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchResponseGroup> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchResponseGroup(_readable, _version)); } this.groups = newCollection; } } else { this.groups = new ArrayList<OffsetFetchResponseGroup>(0); } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { _writable.writeInt(throttleTimeMs); } if (_version <= 7) { if (_version >= 6) { _writable.writeUnsignedVarint(topics.size() + 1); for (OffsetFetchResponseTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (OffsetFetchResponseTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } } else { if (!this.topics.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default topics at version " + _version); } } if ((_version >= 2) && (_version <= 7)) { _writable.writeShort(errorCode); } if (_version >= 8) { _writable.writeUnsignedVarint(groups.size() + 1); for (OffsetFetchResponseGroup groupsElement : groups) { groupsElement.write(_writable, _cache, _version); } } else { if (!this.groups.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default groups at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { _size.addBytes(4); } if (_version <= 7) { { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (OffsetFetchResponseTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } } if ((_version >= 2) && (_version <= 7)) { _size.addBytes(2); } if (_version >= 8) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(groups.size() + 1)); for (OffsetFetchResponseGroup groupsElement : groups) { groupsElement.addSize(_size, _cache, _version); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchResponseData)) return false; OffsetFetchResponseData other = (OffsetFetchResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } if (errorCode != other.errorCode) return false; if (this.groups == null) { if (other.groups != null) return false; } else { if (!this.groups.equals(other.groups)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (groups == null ? 0 : groups.hashCode()); return hashCode; } @Override public OffsetFetchResponseData duplicate() { OffsetFetchResponseData _duplicate = new OffsetFetchResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; ArrayList<OffsetFetchResponseTopic> newTopics = new ArrayList<OffsetFetchResponseTopic>(topics.size()); for (OffsetFetchResponseTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; _duplicate.errorCode = errorCode; ArrayList<OffsetFetchResponseGroup> newGroups = new ArrayList<OffsetFetchResponseGroup>(groups.size()); for (OffsetFetchResponseGroup _element : groups) { newGroups.add(_element.duplicate()); } _duplicate.groups = newGroups; return _duplicate; } @Override public String toString() { return "OffsetFetchResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ", errorCode=" + errorCode + ", groups=" + MessageUtil.deepToString(groups.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public List<OffsetFetchResponseTopic> topics() { return this.topics; } public short errorCode() { return this.errorCode; } public List<OffsetFetchResponseGroup> groups() { return this.groups; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public OffsetFetchResponseData setTopics(List<OffsetFetchResponseTopic> v) { this.topics = v; return this; } public OffsetFetchResponseData setErrorCode(short v) { this.errorCode = v; return this; } public OffsetFetchResponseData setGroups(List<OffsetFetchResponseGroup> v) { this.groups = v; return this; } public static class OffsetFetchResponseTopic implements Message { String name; List<OffsetFetchResponsePartition> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetFetchResponsePartition.SCHEMA_0), "The responses per partition") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetFetchResponsePartition.SCHEMA_5), "The responses per partition") ); public static final Schema SCHEMA_6 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(OffsetFetchResponsePartition.SCHEMA_6), "The responses per partition"), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 7; public OffsetFetchResponseTopic(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchResponseTopic() { this.name = ""; this.partitions = new ArrayList<OffsetFetchResponsePartition>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { if (_version >= 6) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchResponsePartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchResponsePartition(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchResponsePartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchResponsePartition(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version > 7) { throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetFetchResponseTopic"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 6) { _writable.writeUnsignedVarint(partitions.size() + 1); for (OffsetFetchResponsePartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (OffsetFetchResponsePartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (OffsetFetchResponsePartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchResponseTopic)) return false; OffsetFetchResponseTopic other = (OffsetFetchResponseTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public OffsetFetchResponseTopic duplicate() { OffsetFetchResponseTopic _duplicate = new OffsetFetchResponseTopic(); _duplicate.name = name; ArrayList<OffsetFetchResponsePartition> newPartitions = new ArrayList<OffsetFetchResponsePartition>(partitions.size()); for (OffsetFetchResponsePartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "OffsetFetchResponseTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<OffsetFetchResponsePartition> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchResponseTopic setName(String v) { this.name = v; return this; } public OffsetFetchResponseTopic setPartitions(List<OffsetFetchResponsePartition> v) { this.partitions = v; return this; } } public static class OffsetFetchResponsePartition implements Message { int partitionIndex; long committedOffset; int committedLeaderEpoch; String metadata; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("committed_offset", Type.INT64, "The committed message offset."), new Field("metadata", Type.NULLABLE_STRING, "The partition metadata."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("committed_offset", Type.INT64, "The committed message offset."), new Field("committed_leader_epoch", Type.INT32, "The leader epoch."), new Field("metadata", Type.NULLABLE_STRING, "The partition metadata."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_6 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("committed_offset", Type.INT64, "The committed message offset."), new Field("committed_leader_epoch", Type.INT32, "The leader epoch."), new Field("metadata", Type.COMPACT_NULLABLE_STRING, "The partition metadata."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 7; public OffsetFetchResponsePartition(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchResponsePartition() { this.partitionIndex = 0; this.committedOffset = 0L; this.committedLeaderEpoch = -1; this.metadata = ""; this.errorCode = (short) 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 7; } @Override public void read(Readable _readable, short _version) { this.partitionIndex = _readable.readInt(); this.committedOffset = _readable.readLong(); if (_version >= 5) { this.committedLeaderEpoch = _readable.readInt(); } else { this.committedLeaderEpoch = -1; } { int length; if (_version >= 6) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.metadata = null; } else if (length > 0x7fff) { throw new RuntimeException("string field metadata had invalid length " + length); } else { this.metadata = _readable.readString(length); } } this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 6) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeLong(committedOffset); if (_version >= 5) { _writable.writeInt(committedLeaderEpoch); } if (metadata == null) { if (_version >= 6) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(metadata); if (_version >= 6) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 6) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(8); if (_version >= 5) { _size.addBytes(4); } if (metadata == null) { if (_version >= 6) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = metadata.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'metadata' field is too long to be serialized"); } _cache.cacheSerializedValue(metadata, _stringBytes); if (_version >= 6) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 6) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchResponsePartition)) return false; OffsetFetchResponsePartition other = (OffsetFetchResponsePartition) obj; if (partitionIndex != other.partitionIndex) return false; if (committedOffset != other.committedOffset) return false; if (committedLeaderEpoch != other.committedLeaderEpoch) return false; if (this.metadata == null) { if (other.metadata != null) return false; } else { if (!this.metadata.equals(other.metadata)) return false; } if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + ((int) (committedOffset >> 32) ^ (int) committedOffset); hashCode = 31 * hashCode + committedLeaderEpoch; hashCode = 31 * hashCode + (metadata == null ? 0 : metadata.hashCode()); hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public OffsetFetchResponsePartition duplicate() { OffsetFetchResponsePartition _duplicate = new OffsetFetchResponsePartition(); _duplicate.partitionIndex = partitionIndex; _duplicate.committedOffset = committedOffset; _duplicate.committedLeaderEpoch = committedLeaderEpoch; if (metadata == null) { _duplicate.metadata = null; } else { _duplicate.metadata = metadata; } _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "OffsetFetchResponsePartition(" + "partitionIndex=" + partitionIndex + ", committedOffset=" + committedOffset + ", committedLeaderEpoch=" + committedLeaderEpoch + ", metadata=" + ((metadata == null) ? "null" : "'" + metadata.toString() + "'") + ", errorCode=" + errorCode + ")"; } public int partitionIndex() { return this.partitionIndex; } public long committedOffset() { return this.committedOffset; } public int committedLeaderEpoch() { return this.committedLeaderEpoch; } public String metadata() { return this.metadata; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchResponsePartition setPartitionIndex(int v) { this.partitionIndex = v; return this; } public OffsetFetchResponsePartition setCommittedOffset(long v) { this.committedOffset = v; return this; } public OffsetFetchResponsePartition setCommittedLeaderEpoch(int v) { this.committedLeaderEpoch = v; return this; } public OffsetFetchResponsePartition setMetadata(String v) { this.metadata = v; return this; } public OffsetFetchResponsePartition setErrorCode(short v) { this.errorCode = v; return this; } } public static class OffsetFetchResponseGroup implements Message { String groupId; List<OffsetFetchResponseTopics> topics; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_8 = new Schema( new Field("group_id", Type.COMPACT_STRING, "The group ID."), new Field("topics", new CompactArrayOf(OffsetFetchResponseTopics.SCHEMA_8), "The responses per topic."), new Field("error_code", Type.INT16, "The group-level error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 8; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetFetchResponseGroup(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchResponseGroup() { this.groupId = ""; this.topics = new ArrayList<OffsetFetchResponseTopics>(0); this.errorCode = (short) 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetFetchResponseGroup"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchResponseTopics> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchResponseTopics(_readable, _version)); } this.topics = newCollection; } } this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 8) { throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetFetchResponseGroup"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(groupId); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(topics.size() + 1); for (OffsetFetchResponseTopics topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetFetchResponseGroup"); } { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (OffsetFetchResponseTopics topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchResponseGroup)) return false; OffsetFetchResponseGroup other = (OffsetFetchResponseGroup) obj; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public OffsetFetchResponseGroup duplicate() { OffsetFetchResponseGroup _duplicate = new OffsetFetchResponseGroup(); _duplicate.groupId = groupId; ArrayList<OffsetFetchResponseTopics> newTopics = new ArrayList<OffsetFetchResponseTopics>(topics.size()); for (OffsetFetchResponseTopics _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "OffsetFetchResponseGroup(" + "groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ", errorCode=" + errorCode + ")"; } public String groupId() { return this.groupId; } public List<OffsetFetchResponseTopics> topics() { return this.topics; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchResponseGroup setGroupId(String v) { this.groupId = v; return this; } public OffsetFetchResponseGroup setTopics(List<OffsetFetchResponseTopics> v) { this.topics = v; return this; } public OffsetFetchResponseGroup setErrorCode(short v) { this.errorCode = v; return this; } } public static class OffsetFetchResponseTopics implements Message { String name; List<OffsetFetchResponsePartitions> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_8 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(OffsetFetchResponsePartitions.SCHEMA_8), "The responses per partition"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 8; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetFetchResponseTopics(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchResponseTopics() { this.name = ""; this.partitions = new ArrayList<OffsetFetchResponsePartitions>(0); } @Override public short lowestSupportedVersion() { return 8; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetFetchResponseTopics"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetFetchResponsePartitions> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetFetchResponsePartitions(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(partitions.size() + 1); for (OffsetFetchResponsePartitions partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetFetchResponseTopics"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); for (OffsetFetchResponsePartitions partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchResponseTopics)) return false; OffsetFetchResponseTopics other = (OffsetFetchResponseTopics) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public OffsetFetchResponseTopics duplicate() { OffsetFetchResponseTopics _duplicate = new OffsetFetchResponseTopics(); _duplicate.name = name; ArrayList<OffsetFetchResponsePartitions> newPartitions = new ArrayList<OffsetFetchResponsePartitions>(partitions.size()); for (OffsetFetchResponsePartitions _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "OffsetFetchResponseTopics(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<OffsetFetchResponsePartitions> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchResponseTopics setName(String v) { this.name = v; return this; } public OffsetFetchResponseTopics setPartitions(List<OffsetFetchResponsePartitions> v) { this.partitions = v; return this; } } public static class OffsetFetchResponsePartitions implements Message { int partitionIndex; long committedOffset; int committedLeaderEpoch; String metadata; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_8 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("committed_offset", Type.INT64, "The committed message offset."), new Field("committed_leader_epoch", Type.INT32, "The leader epoch."), new Field("metadata", Type.COMPACT_NULLABLE_STRING, "The partition metadata."), new Field("error_code", Type.INT16, "The partition-level error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, SCHEMA_8 }; public static final short LOWEST_SUPPORTED_VERSION = 8; public static final short HIGHEST_SUPPORTED_VERSION = 8; public OffsetFetchResponsePartitions(Readable _readable, short _version) { read(_readable, _version); } public OffsetFetchResponsePartitions() { this.partitionIndex = 0; this.committedOffset = 0L; this.committedLeaderEpoch = -1; this.metadata = ""; this.errorCode = (short) 0; } @Override public short lowestSupportedVersion() { return 8; } @Override public short highestSupportedVersion() { return 8; } @Override public void read(Readable _readable, short _version) { if (_version > 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetFetchResponsePartitions"); } this.partitionIndex = _readable.readInt(); this.committedOffset = _readable.readLong(); this.committedLeaderEpoch = _readable.readInt(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.metadata = null; } else if (length > 0x7fff) { throw new RuntimeException("string field metadata had invalid length " + length); } else { this.metadata = _readable.readString(length); } } this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeLong(committedOffset); _writable.writeInt(committedLeaderEpoch); if (metadata == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(metadata); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 8) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetFetchResponsePartitions"); } _size.addBytes(4); _size.addBytes(8); _size.addBytes(4); if (metadata == null) { _size.addBytes(1); } else { byte[] _stringBytes = metadata.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'metadata' field is too long to be serialized"); } _cache.cacheSerializedValue(metadata, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetFetchResponsePartitions)) return false; OffsetFetchResponsePartitions other = (OffsetFetchResponsePartitions) obj; if (partitionIndex != other.partitionIndex) return false; if (committedOffset != other.committedOffset) return false; if (committedLeaderEpoch != other.committedLeaderEpoch) return false; if (this.metadata == null) { if (other.metadata != null) return false; } else { if (!this.metadata.equals(other.metadata)) return false; } if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + ((int) (committedOffset >> 32) ^ (int) committedOffset); hashCode = 31 * hashCode + committedLeaderEpoch; hashCode = 31 * hashCode + (metadata == null ? 0 : metadata.hashCode()); hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public OffsetFetchResponsePartitions duplicate() { OffsetFetchResponsePartitions _duplicate = new OffsetFetchResponsePartitions(); _duplicate.partitionIndex = partitionIndex; _duplicate.committedOffset = committedOffset; _duplicate.committedLeaderEpoch = committedLeaderEpoch; if (metadata == null) { _duplicate.metadata = null; } else { _duplicate.metadata = metadata; } _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "OffsetFetchResponsePartitions(" + "partitionIndex=" + partitionIndex + ", committedOffset=" + committedOffset + ", committedLeaderEpoch=" + committedLeaderEpoch + ", metadata=" + ((metadata == null) ? "null" : "'" + metadata.toString() + "'") + ", errorCode=" + errorCode + ")"; } public int partitionIndex() { return this.partitionIndex; } public long committedOffset() { return this.committedOffset; } public int committedLeaderEpoch() { return this.committedLeaderEpoch; } public String metadata() { return this.metadata; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetFetchResponsePartitions setPartitionIndex(int v) { this.partitionIndex = v; return this; } public OffsetFetchResponsePartitions setCommittedOffset(long v) { this.committedOffset = v; return this; } public OffsetFetchResponsePartitions setCommittedLeaderEpoch(int v) { this.committedLeaderEpoch = v; return this; } public OffsetFetchResponsePartitions setMetadata(String v) { this.metadata = v; return this; } public OffsetFetchResponsePartitions setErrorCode(short v) { this.errorCode = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetFetchResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.OffsetFetchResponseData.*; public class OffsetFetchResponseDataJsonConverter { public static OffsetFetchResponseData read(JsonNode _node, short _version) { OffsetFetchResponseData _object = new OffsetFetchResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 3) { throw new RuntimeException("OffsetFetchResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "OffsetFetchResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { if (_version <= 7) { throw new RuntimeException("OffsetFetchResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { _object.topics = new ArrayList<OffsetFetchResponseTopic>(0); } } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetFetchResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetFetchResponseTopic> _collection = new ArrayList<OffsetFetchResponseTopic>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetFetchResponseTopicJsonConverter.read(_element, _version)); } } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { if ((_version >= 2) && (_version <= 7)) { throw new RuntimeException("OffsetFetchResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = (short) 0; } } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "OffsetFetchResponseData"); } JsonNode _groupsNode = _node.get("groups"); if (_groupsNode == null) { if (_version >= 8) { throw new RuntimeException("OffsetFetchResponseData: unable to locate field 'groups', which is mandatory in version " + _version); } else { _object.groups = new ArrayList<OffsetFetchResponseGroup>(0); } } else { if (!_groupsNode.isArray()) { throw new RuntimeException("OffsetFetchResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetFetchResponseGroup> _collection = new ArrayList<OffsetFetchResponseGroup>(_groupsNode.size()); _object.groups = _collection; for (JsonNode _element : _groupsNode) { _collection.add(OffsetFetchResponseGroupJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetFetchResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 3) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } if (_version <= 7) { ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetFetchResponseTopic _element : _object.topics) { _topicsArray.add(OffsetFetchResponseTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); } else { if (!_object.topics.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default topics at version " + _version); } } if ((_version >= 2) && (_version <= 7)) { _node.set("errorCode", new ShortNode(_object.errorCode)); } if (_version >= 8) { ArrayNode _groupsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetFetchResponseGroup _element : _object.groups) { _groupsArray.add(OffsetFetchResponseGroupJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("groups", _groupsArray); } else { if (!_object.groups.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default groups at version " + _version); } } return _node; } public static JsonNode write(OffsetFetchResponseData _object, short _version) { return write(_object, _version, true); } public static class OffsetFetchResponseGroupJsonConverter { public static OffsetFetchResponseGroup read(JsonNode _node, short _version) { OffsetFetchResponseGroup _object = new OffsetFetchResponseGroup(); if (_version < 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetFetchResponseGroup"); } JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { throw new RuntimeException("OffsetFetchResponseGroup: unable to locate field 'groupId', which is mandatory in version " + _version); } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("OffsetFetchResponseGroup expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("OffsetFetchResponseGroup: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetFetchResponseGroup expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetFetchResponseTopics> _collection = new ArrayList<OffsetFetchResponseTopics>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetFetchResponseTopicsJsonConverter.read(_element, _version)); } } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("OffsetFetchResponseGroup: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "OffsetFetchResponseGroup"); } return _object; } public static JsonNode write(OffsetFetchResponseGroup _object, short _version, boolean _serializeRecords) { if (_version < 8) { throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetFetchResponseGroup"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("groupId", new TextNode(_object.groupId)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetFetchResponseTopics _element : _object.topics) { _topicsArray.add(OffsetFetchResponseTopicsJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(OffsetFetchResponseGroup _object, short _version) { return write(_object, _version, true); } } public static class OffsetFetchResponsePartitionJsonConverter { public static OffsetFetchResponsePartition read(JsonNode _node, short _version) { OffsetFetchResponsePartition _object = new OffsetFetchResponsePartition(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("OffsetFetchResponsePartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "OffsetFetchResponsePartition"); } JsonNode _committedOffsetNode = _node.get("committedOffset"); if (_committedOffsetNode == null) { throw new RuntimeException("OffsetFetchResponsePartition: unable to locate field 'committedOffset', which is mandatory in version " + _version); } else { _object.committedOffset = MessageUtil.jsonNodeToLong(_committedOffsetNode, "OffsetFetchResponsePartition"); } JsonNode _committedLeaderEpochNode = _node.get("committedLeaderEpoch"); if (_committedLeaderEpochNode == null) { if (_version >= 5) { throw new RuntimeException("OffsetFetchResponsePartition: unable to locate field 'committedLeaderEpoch', which is mandatory in version " + _version); } else { _object.committedLeaderEpoch = -1; } } else { _object.committedLeaderEpoch = MessageUtil.jsonNodeToInt(_committedLeaderEpochNode, "OffsetFetchResponsePartition"); } JsonNode _metadataNode = _node.get("metadata"); if (_metadataNode == null) { throw new RuntimeException("OffsetFetchResponsePartition: unable to locate field 'metadata', which is mandatory in version " + _version); } else { if (_metadataNode.isNull()) { _object.metadata = null; } else { if (!_metadataNode.isTextual()) { throw new RuntimeException("OffsetFetchResponsePartition expected a string type, but got " + _node.getNodeType()); } _object.metadata = _metadataNode.asText(); } } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("OffsetFetchResponsePartition: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "OffsetFetchResponsePartition"); } return _object; } public static JsonNode write(OffsetFetchResponsePartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("committedOffset", new LongNode(_object.committedOffset)); if (_version >= 5) { _node.set("committedLeaderEpoch", new IntNode(_object.committedLeaderEpoch)); } if (_object.metadata == null) { _node.set("metadata", NullNode.instance); } else { _node.set("metadata", new TextNode(_object.metadata)); } _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(OffsetFetchResponsePartition _object, short _version) { return write(_object, _version, true); } } public static class OffsetFetchResponsePartitionsJsonConverter { public static OffsetFetchResponsePartitions read(JsonNode _node, short _version) { OffsetFetchResponsePartitions _object = new OffsetFetchResponsePartitions(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("OffsetFetchResponsePartitions: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "OffsetFetchResponsePartitions"); } JsonNode _committedOffsetNode = _node.get("committedOffset"); if (_committedOffsetNode == null) { throw new RuntimeException("OffsetFetchResponsePartitions: unable to locate field 'committedOffset', which is mandatory in version " + _version); } else { _object.committedOffset = MessageUtil.jsonNodeToLong(_committedOffsetNode, "OffsetFetchResponsePartitions"); } JsonNode _committedLeaderEpochNode = _node.get("committedLeaderEpoch"); if (_committedLeaderEpochNode == null) { throw new RuntimeException("OffsetFetchResponsePartitions: unable to locate field 'committedLeaderEpoch', which is mandatory in version " + _version); } else { _object.committedLeaderEpoch = MessageUtil.jsonNodeToInt(_committedLeaderEpochNode, "OffsetFetchResponsePartitions"); } JsonNode _metadataNode = _node.get("metadata"); if (_metadataNode == null) { throw new RuntimeException("OffsetFetchResponsePartitions: unable to locate field 'metadata', which is mandatory in version " + _version); } else { if (_metadataNode.isNull()) { _object.metadata = null; } else { if (!_metadataNode.isTextual()) { throw new RuntimeException("OffsetFetchResponsePartitions expected a string type, but got " + _node.getNodeType()); } _object.metadata = _metadataNode.asText(); } } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("OffsetFetchResponsePartitions: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "OffsetFetchResponsePartitions"); } return _object; } public static JsonNode write(OffsetFetchResponsePartitions _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("committedOffset", new LongNode(_object.committedOffset)); _node.set("committedLeaderEpoch", new IntNode(_object.committedLeaderEpoch)); if (_object.metadata == null) { _node.set("metadata", NullNode.instance); } else { _node.set("metadata", new TextNode(_object.metadata)); } _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(OffsetFetchResponsePartitions _object, short _version) { return write(_object, _version, true); } } public static class OffsetFetchResponseTopicJsonConverter { public static OffsetFetchResponseTopic read(JsonNode _node, short _version) { OffsetFetchResponseTopic _object = new OffsetFetchResponseTopic(); if (_version > 7) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetFetchResponseTopic"); } JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("OffsetFetchResponseTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("OffsetFetchResponseTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("OffsetFetchResponseTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("OffsetFetchResponseTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetFetchResponsePartition> _collection = new ArrayList<OffsetFetchResponsePartition>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(OffsetFetchResponsePartitionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetFetchResponseTopic _object, short _version, boolean _serializeRecords) { if (_version > 7) { throw new UnsupportedVersionException("Can't write version " + _version + " of OffsetFetchResponseTopic"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetFetchResponsePartition _element : _object.partitions) { _partitionsArray.add(OffsetFetchResponsePartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(OffsetFetchResponseTopic _object, short _version) { return write(_object, _version, true); } } public static class OffsetFetchResponseTopicsJsonConverter { public static OffsetFetchResponseTopics read(JsonNode _node, short _version) { OffsetFetchResponseTopics _object = new OffsetFetchResponseTopics(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("OffsetFetchResponseTopics: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("OffsetFetchResponseTopics expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("OffsetFetchResponseTopics: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("OffsetFetchResponseTopics expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetFetchResponsePartitions> _collection = new ArrayList<OffsetFetchResponsePartitions>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(OffsetFetchResponsePartitionsJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetFetchResponseTopics _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetFetchResponsePartitions _element : _object.partitions) { _partitionsArray.add(OffsetFetchResponsePartitionsJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(OffsetFetchResponseTopics _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetForLeaderEpochRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class OffsetForLeaderEpochRequestData implements ApiMessage { int replicaId; OffsetForLeaderTopicCollection topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topics", new ArrayOf(OffsetForLeaderTopic.SCHEMA_0), "Each topic to get offsets for.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("topics", new ArrayOf(OffsetForLeaderTopic.SCHEMA_2), "Each topic to get offsets for.") ); public static final Schema SCHEMA_3 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("topics", new ArrayOf(OffsetForLeaderTopic.SCHEMA_2), "Each topic to get offsets for.") ); public static final Schema SCHEMA_4 = new Schema( new Field("replica_id", Type.INT32, "The broker ID of the follower, of -1 if this request is from a consumer."), new Field("topics", new CompactArrayOf(OffsetForLeaderTopic.SCHEMA_4), "Each topic to get offsets for."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public OffsetForLeaderEpochRequestData(Readable _readable, short _version) { read(_readable, _version); } public OffsetForLeaderEpochRequestData() { this.replicaId = -2; this.topics = new OffsetForLeaderTopicCollection(0); } @Override public short apiKey() { return 23; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version >= 3) { this.replicaId = _readable.readInt(); } else { this.replicaId = -2; } { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } OffsetForLeaderTopicCollection newCollection = new OffsetForLeaderTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetForLeaderTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } OffsetForLeaderTopicCollection newCollection = new OffsetForLeaderTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetForLeaderTopic(_readable, _version)); } this.topics = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { _writable.writeInt(replicaId); } if (_version >= 4) { _writable.writeUnsignedVarint(topics.size() + 1); for (OffsetForLeaderTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (OffsetForLeaderTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { _size.addBytes(4); } { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (OffsetForLeaderTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetForLeaderEpochRequestData)) return false; OffsetForLeaderEpochRequestData other = (OffsetForLeaderEpochRequestData) obj; if (replicaId != other.replicaId) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + replicaId; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public OffsetForLeaderEpochRequestData duplicate() { OffsetForLeaderEpochRequestData _duplicate = new OffsetForLeaderEpochRequestData(); _duplicate.replicaId = replicaId; OffsetForLeaderTopicCollection newTopics = new OffsetForLeaderTopicCollection(topics.size()); for (OffsetForLeaderTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "OffsetForLeaderEpochRequestData(" + "replicaId=" + replicaId + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int replicaId() { return this.replicaId; } public OffsetForLeaderTopicCollection topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetForLeaderEpochRequestData setReplicaId(int v) { this.replicaId = v; return this; } public OffsetForLeaderEpochRequestData setTopics(OffsetForLeaderTopicCollection v) { this.topics = v; return this; } public static class OffsetForLeaderTopic implements Message, ImplicitLinkedHashMultiCollection.Element { String topic; List<OffsetForLeaderPartition> partitions; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("topic", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetForLeaderPartition.SCHEMA_0), "Each partition to get offsets for.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("topic", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(OffsetForLeaderPartition.SCHEMA_2), "Each partition to get offsets for.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("topic", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(OffsetForLeaderPartition.SCHEMA_4), "Each partition to get offsets for."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public OffsetForLeaderTopic(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public OffsetForLeaderTopic() { this.topic = ""; this.partitions = new ArrayList<OffsetForLeaderPartition>(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version > 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetForLeaderTopic"); } { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field topic was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topic had invalid length " + length); } else { this.topic = _readable.readString(length); } } { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetForLeaderPartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetForLeaderPartition(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OffsetForLeaderPartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetForLeaderPartition(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(topic); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 4) { _writable.writeUnsignedVarint(partitions.size() + 1); for (OffsetForLeaderPartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (OffsetForLeaderPartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 4) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetForLeaderTopic"); } { byte[] _stringBytes = topic.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topic' field is too long to be serialized"); } _cache.cacheSerializedValue(topic, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (OffsetForLeaderPartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof OffsetForLeaderTopic)) return false; OffsetForLeaderTopic other = (OffsetForLeaderTopic) obj; if (this.topic == null) { if (other.topic != null) return false; } else { if (!this.topic.equals(other.topic)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetForLeaderTopic)) return false; OffsetForLeaderTopic other = (OffsetForLeaderTopic) obj; if (this.topic == null) { if (other.topic != null) return false; } else { if (!this.topic.equals(other.topic)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topic == null ? 0 : topic.hashCode()); return hashCode; } @Override public OffsetForLeaderTopic duplicate() { OffsetForLeaderTopic _duplicate = new OffsetForLeaderTopic(); _duplicate.topic = topic; ArrayList<OffsetForLeaderPartition> newPartitions = new ArrayList<OffsetForLeaderPartition>(partitions.size()); for (OffsetForLeaderPartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "OffsetForLeaderTopic(" + "topic=" + ((topic == null) ? "null" : "'" + topic.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topic() { return this.topic; } public List<OffsetForLeaderPartition> partitions() { return this.partitions; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetForLeaderTopic setTopic(String v) { this.topic = v; return this; } public OffsetForLeaderTopic setPartitions(List<OffsetForLeaderPartition> v) { this.partitions = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class OffsetForLeaderPartition implements Message { int partition; int currentLeaderEpoch; int leaderEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition", Type.INT32, "The partition index."), new Field("leader_epoch", Type.INT32, "The epoch to look up an offset for.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("partition", Type.INT32, "The partition index."), new Field("current_leader_epoch", Type.INT32, "An epoch used to fence consumers/replicas with old metadata. If the epoch provided by the client is larger than the current epoch known to the broker, then the UNKNOWN_LEADER_EPOCH error code will be returned. If the provided epoch is smaller, then the FENCED_LEADER_EPOCH error code will be returned."), new Field("leader_epoch", Type.INT32, "The epoch to look up an offset for.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("partition", Type.INT32, "The partition index."), new Field("current_leader_epoch", Type.INT32, "An epoch used to fence consumers/replicas with old metadata. If the epoch provided by the client is larger than the current epoch known to the broker, then the UNKNOWN_LEADER_EPOCH error code will be returned. If the provided epoch is smaller, then the FENCED_LEADER_EPOCH error code will be returned."), new Field("leader_epoch", Type.INT32, "The epoch to look up an offset for."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public OffsetForLeaderPartition(Readable _readable, short _version) { read(_readable, _version); } public OffsetForLeaderPartition() { this.partition = 0; this.currentLeaderEpoch = -1; this.leaderEpoch = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version > 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetForLeaderPartition"); } this.partition = _readable.readInt(); if (_version >= 2) { this.currentLeaderEpoch = _readable.readInt(); } else { this.currentLeaderEpoch = -1; } this.leaderEpoch = _readable.readInt(); this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partition); if (_version >= 2) { _writable.writeInt(currentLeaderEpoch); } _writable.writeInt(leaderEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 4) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetForLeaderPartition"); } _size.addBytes(4); if (_version >= 2) { _size.addBytes(4); } _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetForLeaderPartition)) return false; OffsetForLeaderPartition other = (OffsetForLeaderPartition) obj; if (partition != other.partition) return false; if (currentLeaderEpoch != other.currentLeaderEpoch) return false; if (leaderEpoch != other.leaderEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partition; hashCode = 31 * hashCode + currentLeaderEpoch; hashCode = 31 * hashCode + leaderEpoch; return hashCode; } @Override public OffsetForLeaderPartition duplicate() { OffsetForLeaderPartition _duplicate = new OffsetForLeaderPartition(); _duplicate.partition = partition; _duplicate.currentLeaderEpoch = currentLeaderEpoch; _duplicate.leaderEpoch = leaderEpoch; return _duplicate; } @Override public String toString() { return "OffsetForLeaderPartition(" + "partition=" + partition + ", currentLeaderEpoch=" + currentLeaderEpoch + ", leaderEpoch=" + leaderEpoch + ")"; } public int partition() { return this.partition; } public int currentLeaderEpoch() { return this.currentLeaderEpoch; } public int leaderEpoch() { return this.leaderEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetForLeaderPartition setPartition(int v) { this.partition = v; return this; } public OffsetForLeaderPartition setCurrentLeaderEpoch(int v) { this.currentLeaderEpoch = v; return this; } public OffsetForLeaderPartition setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } } public static class OffsetForLeaderTopicCollection extends ImplicitLinkedHashMultiCollection<OffsetForLeaderTopic> { public OffsetForLeaderTopicCollection() { super(); } public OffsetForLeaderTopicCollection(int expectedNumElements) { super(expectedNumElements); } public OffsetForLeaderTopicCollection(Iterator<OffsetForLeaderTopic> iterator) { super(iterator); } public OffsetForLeaderTopic find(String topic) { OffsetForLeaderTopic _key = new OffsetForLeaderTopic(); _key.setTopic(topic); return find(_key); } public List<OffsetForLeaderTopic> findAll(String topic) { OffsetForLeaderTopic _key = new OffsetForLeaderTopic(); _key.setTopic(topic); return findAll(_key); } public OffsetForLeaderTopicCollection duplicate() { OffsetForLeaderTopicCollection _duplicate = new OffsetForLeaderTopicCollection(size()); for (OffsetForLeaderTopic _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetForLeaderEpochRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.*; public class OffsetForLeaderEpochRequestDataJsonConverter { public static OffsetForLeaderEpochRequestData read(JsonNode _node, short _version) { OffsetForLeaderEpochRequestData _object = new OffsetForLeaderEpochRequestData(); JsonNode _replicaIdNode = _node.get("replicaId"); if (_replicaIdNode == null) { if (_version >= 3) { throw new RuntimeException("OffsetForLeaderEpochRequestData: unable to locate field 'replicaId', which is mandatory in version " + _version); } else { _object.replicaId = -2; } } else { _object.replicaId = MessageUtil.jsonNodeToInt(_replicaIdNode, "OffsetForLeaderEpochRequestData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("OffsetForLeaderEpochRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetForLeaderEpochRequestData expected a JSON array, but got " + _node.getNodeType()); } OffsetForLeaderTopicCollection _collection = new OffsetForLeaderTopicCollection(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetForLeaderTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetForLeaderEpochRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 3) { _node.set("replicaId", new IntNode(_object.replicaId)); } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetForLeaderTopic _element : _object.topics) { _topicsArray.add(OffsetForLeaderTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(OffsetForLeaderEpochRequestData _object, short _version) { return write(_object, _version, true); } public static class OffsetForLeaderPartitionJsonConverter { public static OffsetForLeaderPartition read(JsonNode _node, short _version) { OffsetForLeaderPartition _object = new OffsetForLeaderPartition(); JsonNode _partitionNode = _node.get("partition"); if (_partitionNode == null) { throw new RuntimeException("OffsetForLeaderPartition: unable to locate field 'partition', which is mandatory in version " + _version); } else { _object.partition = MessageUtil.jsonNodeToInt(_partitionNode, "OffsetForLeaderPartition"); } JsonNode _currentLeaderEpochNode = _node.get("currentLeaderEpoch"); if (_currentLeaderEpochNode == null) { if (_version >= 2) { throw new RuntimeException("OffsetForLeaderPartition: unable to locate field 'currentLeaderEpoch', which is mandatory in version " + _version); } else { _object.currentLeaderEpoch = -1; } } else { _object.currentLeaderEpoch = MessageUtil.jsonNodeToInt(_currentLeaderEpochNode, "OffsetForLeaderPartition"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { throw new RuntimeException("OffsetForLeaderPartition: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "OffsetForLeaderPartition"); } return _object; } public static JsonNode write(OffsetForLeaderPartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partition", new IntNode(_object.partition)); if (_version >= 2) { _node.set("currentLeaderEpoch", new IntNode(_object.currentLeaderEpoch)); } _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); return _node; } public static JsonNode write(OffsetForLeaderPartition _object, short _version) { return write(_object, _version, true); } } public static class OffsetForLeaderTopicJsonConverter { public static OffsetForLeaderTopic read(JsonNode _node, short _version) { OffsetForLeaderTopic _object = new OffsetForLeaderTopic(); JsonNode _topicNode = _node.get("topic"); if (_topicNode == null) { throw new RuntimeException("OffsetForLeaderTopic: unable to locate field 'topic', which is mandatory in version " + _version); } else { if (!_topicNode.isTextual()) { throw new RuntimeException("OffsetForLeaderTopic expected a string type, but got " + _node.getNodeType()); } _object.topic = _topicNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("OffsetForLeaderTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("OffsetForLeaderTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OffsetForLeaderPartition> _collection = new ArrayList<OffsetForLeaderPartition>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(OffsetForLeaderPartitionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetForLeaderTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("topic", new TextNode(_object.topic)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetForLeaderPartition _element : _object.partitions) { _partitionsArray.add(OffsetForLeaderPartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(OffsetForLeaderTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetForLeaderEpochResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class OffsetForLeaderEpochResponseData implements ApiMessage { int throttleTimeMs; OffsetForLeaderTopicResultCollection topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topics", new ArrayOf(OffsetForLeaderTopicResult.SCHEMA_0), "Each topic we fetched offsets for.") ); public static final Schema SCHEMA_1 = new Schema( new Field("topics", new ArrayOf(OffsetForLeaderTopicResult.SCHEMA_1), "Each topic we fetched offsets for.") ); public static final Schema SCHEMA_2 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new ArrayOf(OffsetForLeaderTopicResult.SCHEMA_1), "Each topic we fetched offsets for.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("topics", new CompactArrayOf(OffsetForLeaderTopicResult.SCHEMA_4), "Each topic we fetched offsets for."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public OffsetForLeaderEpochResponseData(Readable _readable, short _version) { read(_readable, _version); } public OffsetForLeaderEpochResponseData() { this.throttleTimeMs = 0; this.topics = new OffsetForLeaderTopicResultCollection(0); } @Override public short apiKey() { return 23; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version >= 2) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } OffsetForLeaderTopicResultCollection newCollection = new OffsetForLeaderTopicResultCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetForLeaderTopicResult(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } OffsetForLeaderTopicResultCollection newCollection = new OffsetForLeaderTopicResultCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OffsetForLeaderTopicResult(_readable, _version)); } this.topics = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _writable.writeInt(throttleTimeMs); } if (_version >= 4) { _writable.writeUnsignedVarint(topics.size() + 1); for (OffsetForLeaderTopicResult topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (OffsetForLeaderTopicResult topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _size.addBytes(4); } { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (OffsetForLeaderTopicResult topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetForLeaderEpochResponseData)) return false; OffsetForLeaderEpochResponseData other = (OffsetForLeaderEpochResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public OffsetForLeaderEpochResponseData duplicate() { OffsetForLeaderEpochResponseData _duplicate = new OffsetForLeaderEpochResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; OffsetForLeaderTopicResultCollection newTopics = new OffsetForLeaderTopicResultCollection(topics.size()); for (OffsetForLeaderTopicResult _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "OffsetForLeaderEpochResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public OffsetForLeaderTopicResultCollection topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetForLeaderEpochResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public OffsetForLeaderEpochResponseData setTopics(OffsetForLeaderTopicResultCollection v) { this.topics = v; return this; } public static class OffsetForLeaderTopicResult implements Message, ImplicitLinkedHashMultiCollection.Element { String topic; List<EpochEndOffset> partitions; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("topic", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(EpochEndOffset.SCHEMA_0), "Each partition in the topic we fetched offsets for.") ); public static final Schema SCHEMA_1 = new Schema( new Field("topic", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(EpochEndOffset.SCHEMA_1), "Each partition in the topic we fetched offsets for.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("topic", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(EpochEndOffset.SCHEMA_4), "Each partition in the topic we fetched offsets for."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public OffsetForLeaderTopicResult(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public OffsetForLeaderTopicResult() { this.topic = ""; this.partitions = new ArrayList<EpochEndOffset>(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version > 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of OffsetForLeaderTopicResult"); } { int length; if (_version >= 4) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field topic was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topic had invalid length " + length); } else { this.topic = _readable.readString(length); } } { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EpochEndOffset> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EpochEndOffset(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EpochEndOffset> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EpochEndOffset(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(topic); if (_version >= 4) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 4) { _writable.writeUnsignedVarint(partitions.size() + 1); for (EpochEndOffset partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (EpochEndOffset partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 4) { throw new UnsupportedVersionException("Can't size version " + _version + " of OffsetForLeaderTopicResult"); } { byte[] _stringBytes = topic.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topic' field is too long to be serialized"); } _cache.cacheSerializedValue(topic, _stringBytes); if (_version >= 4) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (EpochEndOffset partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof OffsetForLeaderTopicResult)) return false; OffsetForLeaderTopicResult other = (OffsetForLeaderTopicResult) obj; if (this.topic == null) { if (other.topic != null) return false; } else { if (!this.topic.equals(other.topic)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof OffsetForLeaderTopicResult)) return false; OffsetForLeaderTopicResult other = (OffsetForLeaderTopicResult) obj; if (this.topic == null) { if (other.topic != null) return false; } else { if (!this.topic.equals(other.topic)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topic == null ? 0 : topic.hashCode()); return hashCode; } @Override public OffsetForLeaderTopicResult duplicate() { OffsetForLeaderTopicResult _duplicate = new OffsetForLeaderTopicResult(); _duplicate.topic = topic; ArrayList<EpochEndOffset> newPartitions = new ArrayList<EpochEndOffset>(partitions.size()); for (EpochEndOffset _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "OffsetForLeaderTopicResult(" + "topic=" + ((topic == null) ? "null" : "'" + topic.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topic() { return this.topic; } public List<EpochEndOffset> partitions() { return this.partitions; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OffsetForLeaderTopicResult setTopic(String v) { this.topic = v; return this; } public OffsetForLeaderTopicResult setPartitions(List<EpochEndOffset> v) { this.partitions = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class EpochEndOffset implements Message { short errorCode; int partition; int leaderEpoch; long endOffset; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code 0, or if there was no error."), new Field("partition", Type.INT32, "The partition index."), new Field("end_offset", Type.INT64, "The end offset of the epoch.") ); public static final Schema SCHEMA_1 = new Schema( new Field("error_code", Type.INT16, "The error code 0, or if there was no error."), new Field("partition", Type.INT32, "The partition index."), new Field("leader_epoch", Type.INT32, "The leader epoch of the partition."), new Field("end_offset", Type.INT64, "The end offset of the epoch.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = new Schema( new Field("error_code", Type.INT16, "The error code 0, or if there was no error."), new Field("partition", Type.INT32, "The partition index."), new Field("leader_epoch", Type.INT32, "The leader epoch of the partition."), new Field("end_offset", Type.INT64, "The end offset of the epoch."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public EpochEndOffset(Readable _readable, short _version) { read(_readable, _version); } public EpochEndOffset() { this.errorCode = (short) 0; this.partition = 0; this.leaderEpoch = -1; this.endOffset = -1L; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version > 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of EpochEndOffset"); } this.errorCode = _readable.readShort(); this.partition = _readable.readInt(); if (_version >= 1) { this.leaderEpoch = _readable.readInt(); } else { this.leaderEpoch = -1; } this.endOffset = _readable.readLong(); this._unknownTaggedFields = null; if (_version >= 4) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); _writable.writeInt(partition); if (_version >= 1) { _writable.writeInt(leaderEpoch); } _writable.writeLong(endOffset); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 4) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 4) { throw new UnsupportedVersionException("Can't size version " + _version + " of EpochEndOffset"); } _size.addBytes(2); _size.addBytes(4); if (_version >= 1) { _size.addBytes(4); } _size.addBytes(8); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 4) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof EpochEndOffset)) return false; EpochEndOffset other = (EpochEndOffset) obj; if (errorCode != other.errorCode) return false; if (partition != other.partition) return false; if (leaderEpoch != other.leaderEpoch) return false; if (endOffset != other.endOffset) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + partition; hashCode = 31 * hashCode + leaderEpoch; hashCode = 31 * hashCode + ((int) (endOffset >> 32) ^ (int) endOffset); return hashCode; } @Override public EpochEndOffset duplicate() { EpochEndOffset _duplicate = new EpochEndOffset(); _duplicate.errorCode = errorCode; _duplicate.partition = partition; _duplicate.leaderEpoch = leaderEpoch; _duplicate.endOffset = endOffset; return _duplicate; } @Override public String toString() { return "EpochEndOffset(" + "errorCode=" + errorCode + ", partition=" + partition + ", leaderEpoch=" + leaderEpoch + ", endOffset=" + endOffset + ")"; } public short errorCode() { return this.errorCode; } public int partition() { return this.partition; } public int leaderEpoch() { return this.leaderEpoch; } public long endOffset() { return this.endOffset; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EpochEndOffset setErrorCode(short v) { this.errorCode = v; return this; } public EpochEndOffset setPartition(int v) { this.partition = v; return this; } public EpochEndOffset setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } public EpochEndOffset setEndOffset(long v) { this.endOffset = v; return this; } } public static class OffsetForLeaderTopicResultCollection extends ImplicitLinkedHashMultiCollection<OffsetForLeaderTopicResult> { public OffsetForLeaderTopicResultCollection() { super(); } public OffsetForLeaderTopicResultCollection(int expectedNumElements) { super(expectedNumElements); } public OffsetForLeaderTopicResultCollection(Iterator<OffsetForLeaderTopicResult> iterator) { super(iterator); } public OffsetForLeaderTopicResult find(String topic) { OffsetForLeaderTopicResult _key = new OffsetForLeaderTopicResult(); _key.setTopic(topic); return find(_key); } public List<OffsetForLeaderTopicResult> findAll(String topic) { OffsetForLeaderTopicResult _key = new OffsetForLeaderTopicResult(); _key.setTopic(topic); return findAll(_key); } public OffsetForLeaderTopicResultCollection duplicate() { OffsetForLeaderTopicResultCollection _duplicate = new OffsetForLeaderTopicResultCollection(size()); for (OffsetForLeaderTopicResult _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/OffsetForLeaderEpochResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.*; public class OffsetForLeaderEpochResponseDataJsonConverter { public static OffsetForLeaderEpochResponseData read(JsonNode _node, short _version) { OffsetForLeaderEpochResponseData _object = new OffsetForLeaderEpochResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 2) { throw new RuntimeException("OffsetForLeaderEpochResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "OffsetForLeaderEpochResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("OffsetForLeaderEpochResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("OffsetForLeaderEpochResponseData expected a JSON array, but got " + _node.getNodeType()); } OffsetForLeaderTopicResultCollection _collection = new OffsetForLeaderTopicResultCollection(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(OffsetForLeaderTopicResultJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetForLeaderEpochResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 2) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (OffsetForLeaderTopicResult _element : _object.topics) { _topicsArray.add(OffsetForLeaderTopicResultJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(OffsetForLeaderEpochResponseData _object, short _version) { return write(_object, _version, true); } public static class EpochEndOffsetJsonConverter { public static EpochEndOffset read(JsonNode _node, short _version) { EpochEndOffset _object = new EpochEndOffset(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("EpochEndOffset: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "EpochEndOffset"); } JsonNode _partitionNode = _node.get("partition"); if (_partitionNode == null) { throw new RuntimeException("EpochEndOffset: unable to locate field 'partition', which is mandatory in version " + _version); } else { _object.partition = MessageUtil.jsonNodeToInt(_partitionNode, "EpochEndOffset"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { if (_version >= 1) { throw new RuntimeException("EpochEndOffset: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = -1; } } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "EpochEndOffset"); } JsonNode _endOffsetNode = _node.get("endOffset"); if (_endOffsetNode == null) { throw new RuntimeException("EpochEndOffset: unable to locate field 'endOffset', which is mandatory in version " + _version); } else { _object.endOffset = MessageUtil.jsonNodeToLong(_endOffsetNode, "EpochEndOffset"); } return _object; } public static JsonNode write(EpochEndOffset _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("partition", new IntNode(_object.partition)); if (_version >= 1) { _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); } _node.set("endOffset", new LongNode(_object.endOffset)); return _node; } public static JsonNode write(EpochEndOffset _object, short _version) { return write(_object, _version, true); } } public static class OffsetForLeaderTopicResultJsonConverter { public static OffsetForLeaderTopicResult read(JsonNode _node, short _version) { OffsetForLeaderTopicResult _object = new OffsetForLeaderTopicResult(); JsonNode _topicNode = _node.get("topic"); if (_topicNode == null) { throw new RuntimeException("OffsetForLeaderTopicResult: unable to locate field 'topic', which is mandatory in version " + _version); } else { if (!_topicNode.isTextual()) { throw new RuntimeException("OffsetForLeaderTopicResult expected a string type, but got " + _node.getNodeType()); } _object.topic = _topicNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("OffsetForLeaderTopicResult: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("OffsetForLeaderTopicResult expected a JSON array, but got " + _node.getNodeType()); } ArrayList<EpochEndOffset> _collection = new ArrayList<EpochEndOffset>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(EpochEndOffsetJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(OffsetForLeaderTopicResult _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("topic", new TextNode(_object.topic)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (EpochEndOffset _element : _object.partitions) { _partitionsArray.add(EpochEndOffsetJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(OffsetForLeaderTopicResult _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ProduceRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Objects; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.record.BaseRecords; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ProduceRequestData implements ApiMessage { String transactionalId; short acks; int timeoutMs; TopicProduceDataCollection topicData; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("acks", Type.INT16, "The number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR."), new Field("timeout_ms", Type.INT32, "The timeout to await a response in milliseconds."), new Field("topic_data", new ArrayOf(TopicProduceData.SCHEMA_0), "Each topic to produce to.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("transactional_id", Type.NULLABLE_STRING, "The transactional ID, or null if the producer is not transactional."), new Field("acks", Type.INT16, "The number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR."), new Field("timeout_ms", Type.INT32, "The timeout to await a response in milliseconds."), new Field("topic_data", new ArrayOf(TopicProduceData.SCHEMA_0), "Each topic to produce to.") ); public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("transactional_id", Type.COMPACT_NULLABLE_STRING, "The transactional ID, or null if the producer is not transactional."), new Field("acks", Type.INT16, "The number of acknowledgments the producer requires the leader to have received before considering a request complete. Allowed values: 0 for no acknowledgments, 1 for only the leader and -1 for the full ISR."), new Field("timeout_ms", Type.INT32, "The timeout to await a response in milliseconds."), new Field("topic_data", new CompactArrayOf(TopicProduceData.SCHEMA_9), "Each topic to produce to."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public ProduceRequestData(Readable _readable, short _version) { read(_readable, _version); } public ProduceRequestData() { this.transactionalId = null; this.acks = (short) 0; this.timeoutMs = 0; this.topicData = new TopicProduceDataCollection(0); } @Override public short apiKey() { return 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { if (_version >= 3) { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.transactionalId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field transactionalId had invalid length " + length); } else { this.transactionalId = _readable.readString(length); } } else { this.transactionalId = null; } this.acks = _readable.readShort(); this.timeoutMs = _readable.readInt(); { if (_version >= 9) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topicData was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } TopicProduceDataCollection newCollection = new TopicProduceDataCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicProduceData(_readable, _version)); } this.topicData = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topicData was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } TopicProduceDataCollection newCollection = new TopicProduceDataCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicProduceData(_readable, _version)); } this.topicData = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { if (transactionalId == null) { if (_version >= 9) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(transactionalId); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } else { if (this.transactionalId != null) { throw new UnsupportedVersionException("Attempted to write a non-default transactionalId at version " + _version); } } _writable.writeShort(acks); _writable.writeInt(timeoutMs); if (_version >= 9) { _writable.writeUnsignedVarint(topicData.size() + 1); for (TopicProduceData topicDataElement : topicData) { topicDataElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topicData.size()); for (TopicProduceData topicDataElement : topicData) { topicDataElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { if (transactionalId == null) { if (_version >= 9) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'transactionalId' field is too long to be serialized"); } _cache.cacheSerializedValue(transactionalId, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } _size.addBytes(2); _size.addBytes(4); { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topicData.size() + 1)); } else { _size.addBytes(4); } for (TopicProduceData topicDataElement : topicData) { topicDataElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ProduceRequestData)) return false; ProduceRequestData other = (ProduceRequestData) obj; if (this.transactionalId == null) { if (other.transactionalId != null) return false; } else { if (!this.transactionalId.equals(other.transactionalId)) return false; } if (acks != other.acks) return false; if (timeoutMs != other.timeoutMs) return false; if (this.topicData == null) { if (other.topicData != null) return false; } else { if (!this.topicData.equals(other.topicData)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode()); hashCode = 31 * hashCode + acks; hashCode = 31 * hashCode + timeoutMs; hashCode = 31 * hashCode + (topicData == null ? 0 : topicData.hashCode()); return hashCode; } @Override public ProduceRequestData duplicate() { ProduceRequestData _duplicate = new ProduceRequestData(); if (transactionalId == null) { _duplicate.transactionalId = null; } else { _duplicate.transactionalId = transactionalId; } _duplicate.acks = acks; _duplicate.timeoutMs = timeoutMs; TopicProduceDataCollection newTopicData = new TopicProduceDataCollection(topicData.size()); for (TopicProduceData _element : topicData) { newTopicData.add(_element.duplicate()); } _duplicate.topicData = newTopicData; return _duplicate; } @Override public String toString() { return "ProduceRequestData(" + "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'") + ", acks=" + acks + ", timeoutMs=" + timeoutMs + ", topicData=" + MessageUtil.deepToString(topicData.iterator()) + ")"; } public String transactionalId() { return this.transactionalId; } public short acks() { return this.acks; } public int timeoutMs() { return this.timeoutMs; } public TopicProduceDataCollection topicData() { return this.topicData; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ProduceRequestData setTransactionalId(String v) { this.transactionalId = v; return this; } public ProduceRequestData setAcks(short v) { this.acks = v; return this; } public ProduceRequestData setTimeoutMs(int v) { this.timeoutMs = v; return this; } public ProduceRequestData setTopicData(TopicProduceDataCollection v) { this.topicData = v; return this; } public static class TopicProduceData implements Message, ImplicitLinkedHashMultiCollection.Element { String name; List<PartitionProduceData> partitionData; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partition_data", new ArrayOf(PartitionProduceData.SCHEMA_0), "Each partition to produce to.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partition_data", new CompactArrayOf(PartitionProduceData.SCHEMA_9), "Each partition to produce to."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public TopicProduceData(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public TopicProduceData() { this.name = ""; this.partitionData = new ArrayList<PartitionProduceData>(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { if (_version > 9) { throw new UnsupportedVersionException("Can't read version " + _version + " of TopicProduceData"); } { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { if (_version >= 9) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionData was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionProduceData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionProduceData(_readable, _version)); } this.partitionData = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionData was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionProduceData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionProduceData(_readable, _version)); } this.partitionData = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 9) { _writable.writeUnsignedVarint(partitionData.size() + 1); for (PartitionProduceData partitionDataElement : partitionData) { partitionDataElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitionData.size()); for (PartitionProduceData partitionDataElement : partitionData) { partitionDataElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 9) { throw new UnsupportedVersionException("Can't size version " + _version + " of TopicProduceData"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionData.size() + 1)); } else { _size.addBytes(4); } for (PartitionProduceData partitionDataElement : partitionData) { partitionDataElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof TopicProduceData)) return false; TopicProduceData other = (TopicProduceData) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof TopicProduceData)) return false; TopicProduceData other = (TopicProduceData) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitionData == null) { if (other.partitionData != null) return false; } else { if (!this.partitionData.equals(other.partitionData)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public TopicProduceData duplicate() { TopicProduceData _duplicate = new TopicProduceData(); _duplicate.name = name; ArrayList<PartitionProduceData> newPartitionData = new ArrayList<PartitionProduceData>(partitionData.size()); for (PartitionProduceData _element : partitionData) { newPartitionData.add(_element.duplicate()); } _duplicate.partitionData = newPartitionData; return _duplicate; } @Override public String toString() { return "TopicProduceData(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitionData=" + MessageUtil.deepToString(partitionData.iterator()) + ")"; } public String name() { return this.name; } public List<PartitionProduceData> partitionData() { return this.partitionData; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public TopicProduceData setName(String v) { this.name = v; return this; } public TopicProduceData setPartitionData(List<PartitionProduceData> v) { this.partitionData = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class PartitionProduceData implements Message { int index; BaseRecords records; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("index", Type.INT32, "The partition index."), new Field("records", Type.RECORDS, "The record data to be produced.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = SCHEMA_4; public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = SCHEMA_7; public static final Schema SCHEMA_9 = new Schema( new Field("index", Type.INT32, "The partition index."), new Field("records", Type.COMPACT_RECORDS, "The record data to be produced."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public PartitionProduceData(Readable _readable, short _version) { read(_readable, _version); } public PartitionProduceData() { this.index = 0; this.records = null; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { if (_version > 9) { throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionProduceData"); } this.index = _readable.readInt(); { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readInt(); } if (length < 0) { this.records = null; } else { this.records = _readable.readRecords(length); } } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(index); if (records == null) { if (_version >= 9) { _writable.writeUnsignedVarint(0); } else { _writable.writeInt(-1); } } else { if (_version >= 9) { _writable.writeUnsignedVarint(records.sizeInBytes() + 1); } else { _writable.writeInt(records.sizeInBytes()); } _writable.writeRecords(records); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 9) { throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionProduceData"); } _size.addBytes(4); if (records == null) { if (_version >= 9) { _size.addBytes(1); } else { _size.addBytes(4); } } else { _size.addZeroCopyBytes(records.sizeInBytes()); if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(records.sizeInBytes() + 1)); } else { _size.addBytes(4); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof PartitionProduceData)) return false; PartitionProduceData other = (PartitionProduceData) obj; if (index != other.index) return false; if (!Objects.equals(this.records, other.records)) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + index; hashCode = 31 * hashCode + Objects.hashCode(records); return hashCode; } @Override public PartitionProduceData duplicate() { PartitionProduceData _duplicate = new PartitionProduceData(); _duplicate.index = index; if (records == null) { _duplicate.records = null; } else { _duplicate.records = MemoryRecords.readableRecords(((MemoryRecords) records).buffer().duplicate()); } return _duplicate; } @Override public String toString() { return "PartitionProduceData(" + "index=" + index + ", records=" + records + ")"; } public int index() { return this.index; } public BaseRecords records() { return this.records; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public PartitionProduceData setIndex(int v) { this.index = v; return this; } public PartitionProduceData setRecords(BaseRecords v) { this.records = v; return this; } } public static class TopicProduceDataCollection extends ImplicitLinkedHashMultiCollection<TopicProduceData> { public TopicProduceDataCollection() { super(); } public TopicProduceDataCollection(int expectedNumElements) { super(expectedNumElements); } public TopicProduceDataCollection(Iterator<TopicProduceData> iterator) { super(iterator); } public TopicProduceData find(String name) { TopicProduceData _key = new TopicProduceData(); _key.setName(name); return find(_key); } public List<TopicProduceData> findAll(String name) { TopicProduceData _key = new TopicProduceData(); _key.setName(name); return findAll(_key); } public TopicProduceDataCollection duplicate() { TopicProduceDataCollection _duplicate = new TopicProduceDataCollection(size()); for (TopicProduceData _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ProduceRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.nio.ByteBuffer; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.record.MemoryRecords; import static org.apache.kafka.common.message.ProduceRequestData.*; public class ProduceRequestDataJsonConverter { public static ProduceRequestData read(JsonNode _node, short _version) { ProduceRequestData _object = new ProduceRequestData(); JsonNode _transactionalIdNode = _node.get("transactionalId"); if (_transactionalIdNode == null) { if (_version >= 3) { throw new RuntimeException("ProduceRequestData: unable to locate field 'transactionalId', which is mandatory in version " + _version); } else { _object.transactionalId = null; } } else { if (_transactionalIdNode.isNull()) { _object.transactionalId = null; } else { if (!_transactionalIdNode.isTextual()) { throw new RuntimeException("ProduceRequestData expected a string type, but got " + _node.getNodeType()); } _object.transactionalId = _transactionalIdNode.asText(); } } JsonNode _acksNode = _node.get("acks"); if (_acksNode == null) { throw new RuntimeException("ProduceRequestData: unable to locate field 'acks', which is mandatory in version " + _version); } else { _object.acks = MessageUtil.jsonNodeToShort(_acksNode, "ProduceRequestData"); } JsonNode _timeoutMsNode = _node.get("timeoutMs"); if (_timeoutMsNode == null) { throw new RuntimeException("ProduceRequestData: unable to locate field 'timeoutMs', which is mandatory in version " + _version); } else { _object.timeoutMs = MessageUtil.jsonNodeToInt(_timeoutMsNode, "ProduceRequestData"); } JsonNode _topicDataNode = _node.get("topicData"); if (_topicDataNode == null) { throw new RuntimeException("ProduceRequestData: unable to locate field 'topicData', which is mandatory in version " + _version); } else { if (!_topicDataNode.isArray()) { throw new RuntimeException("ProduceRequestData expected a JSON array, but got " + _node.getNodeType()); } TopicProduceDataCollection _collection = new TopicProduceDataCollection(_topicDataNode.size()); _object.topicData = _collection; for (JsonNode _element : _topicDataNode) { _collection.add(TopicProduceDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ProduceRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 3) { if (_object.transactionalId == null) { _node.set("transactionalId", NullNode.instance); } else { _node.set("transactionalId", new TextNode(_object.transactionalId)); } } else { if (_object.transactionalId != null) { throw new UnsupportedVersionException("Attempted to write a non-default transactionalId at version " + _version); } } _node.set("acks", new ShortNode(_object.acks)); _node.set("timeoutMs", new IntNode(_object.timeoutMs)); ArrayNode _topicDataArray = new ArrayNode(JsonNodeFactory.instance); for (TopicProduceData _element : _object.topicData) { _topicDataArray.add(TopicProduceDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topicData", _topicDataArray); return _node; } public static JsonNode write(ProduceRequestData _object, short _version) { return write(_object, _version, true); } public static class PartitionProduceDataJsonConverter { public static PartitionProduceData read(JsonNode _node, short _version) { PartitionProduceData _object = new PartitionProduceData(); JsonNode _indexNode = _node.get("index"); if (_indexNode == null) { throw new RuntimeException("PartitionProduceData: unable to locate field 'index', which is mandatory in version " + _version); } else { _object.index = MessageUtil.jsonNodeToInt(_indexNode, "PartitionProduceData"); } JsonNode _recordsNode = _node.get("records"); if (_recordsNode == null) { throw new RuntimeException("PartitionProduceData: unable to locate field 'records', which is mandatory in version " + _version); } else { if (_recordsNode.isNull()) { _object.records = null; } else { _object.records = MemoryRecords.readableRecords(ByteBuffer.wrap(MessageUtil.jsonNodeToBinary(_recordsNode, "PartitionProduceData"))); } } return _object; } public static JsonNode write(PartitionProduceData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("index", new IntNode(_object.index)); if (_object.records == null) { _node.set("records", NullNode.instance); } else { if (_serializeRecords) { _node.set("records", new BinaryNode(new byte[]{})); } else { _node.set("recordsSizeInBytes", new IntNode(_object.records.sizeInBytes())); } } return _node; } public static JsonNode write(PartitionProduceData _object, short _version) { return write(_object, _version, true); } } public static class TopicProduceDataJsonConverter { public static TopicProduceData read(JsonNode _node, short _version) { TopicProduceData _object = new TopicProduceData(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("TopicProduceData: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("TopicProduceData expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionDataNode = _node.get("partitionData"); if (_partitionDataNode == null) { throw new RuntimeException("TopicProduceData: unable to locate field 'partitionData', which is mandatory in version " + _version); } else { if (!_partitionDataNode.isArray()) { throw new RuntimeException("TopicProduceData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionProduceData> _collection = new ArrayList<PartitionProduceData>(_partitionDataNode.size()); _object.partitionData = _collection; for (JsonNode _element : _partitionDataNode) { _collection.add(PartitionProduceDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(TopicProduceData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionDataArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionProduceData _element : _object.partitionData) { _partitionDataArray.add(PartitionProduceDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitionData", _partitionDataArray); return _node; } public static JsonNode write(TopicProduceData _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ProduceResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ProduceResponseData implements ApiMessage { TopicProduceResponseCollection responses; int throttleTimeMs; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("responses", new ArrayOf(TopicProduceResponse.SCHEMA_0), "Each produce response") ); public static final Schema SCHEMA_1 = new Schema( new Field("responses", new ArrayOf(TopicProduceResponse.SCHEMA_0), "Each produce response"), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.") ); public static final Schema SCHEMA_2 = new Schema( new Field("responses", new ArrayOf(TopicProduceResponse.SCHEMA_2), "Each produce response"), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("responses", new ArrayOf(TopicProduceResponse.SCHEMA_5), "Each produce response"), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("responses", new ArrayOf(TopicProduceResponse.SCHEMA_8), "Each produce response"), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.") ); public static final Schema SCHEMA_9 = new Schema( new Field("responses", new CompactArrayOf(TopicProduceResponse.SCHEMA_9), "Each produce response"), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public ProduceResponseData(Readable _readable, short _version) { read(_readable, _version); } public ProduceResponseData() { this.responses = new TopicProduceResponseCollection(0); this.throttleTimeMs = 0; } @Override public short apiKey() { return 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { { if (_version >= 9) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field responses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } TopicProduceResponseCollection newCollection = new TopicProduceResponseCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicProduceResponse(_readable, _version)); } this.responses = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field responses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } TopicProduceResponseCollection newCollection = new TopicProduceResponseCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicProduceResponse(_readable, _version)); } this.responses = newCollection; } } } if (_version >= 1) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 9) { _writable.writeUnsignedVarint(responses.size() + 1); for (TopicProduceResponse responsesElement : responses) { responsesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(responses.size()); for (TopicProduceResponse responsesElement : responses) { responsesElement.write(_writable, _cache, _version); } } if (_version >= 1) { _writable.writeInt(throttleTimeMs); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(responses.size() + 1)); } else { _size.addBytes(4); } for (TopicProduceResponse responsesElement : responses) { responsesElement.addSize(_size, _cache, _version); } } if (_version >= 1) { _size.addBytes(4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ProduceResponseData)) return false; ProduceResponseData other = (ProduceResponseData) obj; if (this.responses == null) { if (other.responses != null) return false; } else { if (!this.responses.equals(other.responses)) return false; } if (throttleTimeMs != other.throttleTimeMs) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (responses == null ? 0 : responses.hashCode()); hashCode = 31 * hashCode + throttleTimeMs; return hashCode; } @Override public ProduceResponseData duplicate() { ProduceResponseData _duplicate = new ProduceResponseData(); TopicProduceResponseCollection newResponses = new TopicProduceResponseCollection(responses.size()); for (TopicProduceResponse _element : responses) { newResponses.add(_element.duplicate()); } _duplicate.responses = newResponses; _duplicate.throttleTimeMs = throttleTimeMs; return _duplicate; } @Override public String toString() { return "ProduceResponseData(" + "responses=" + MessageUtil.deepToString(responses.iterator()) + ", throttleTimeMs=" + throttleTimeMs + ")"; } public TopicProduceResponseCollection responses() { return this.responses; } public int throttleTimeMs() { return this.throttleTimeMs; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ProduceResponseData setResponses(TopicProduceResponseCollection v) { this.responses = v; return this; } public ProduceResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public static class TopicProduceResponse implements Message, ImplicitLinkedHashMultiCollection.Element { String name; List<PartitionProduceResponse> partitionResponses; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name"), new Field("partition_responses", new ArrayOf(PartitionProduceResponse.SCHEMA_0), "Each partition that we produced to within the topic.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("name", Type.STRING, "The topic name"), new Field("partition_responses", new ArrayOf(PartitionProduceResponse.SCHEMA_2), "Each partition that we produced to within the topic.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("name", Type.STRING, "The topic name"), new Field("partition_responses", new ArrayOf(PartitionProduceResponse.SCHEMA_5), "Each partition that we produced to within the topic.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("name", Type.STRING, "The topic name"), new Field("partition_responses", new ArrayOf(PartitionProduceResponse.SCHEMA_8), "Each partition that we produced to within the topic.") ); public static final Schema SCHEMA_9 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name"), new Field("partition_responses", new CompactArrayOf(PartitionProduceResponse.SCHEMA_9), "Each partition that we produced to within the topic."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public TopicProduceResponse(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public TopicProduceResponse() { this.name = ""; this.partitionResponses = new ArrayList<PartitionProduceResponse>(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { if (_version > 9) { throw new UnsupportedVersionException("Can't read version " + _version + " of TopicProduceResponse"); } { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { if (_version >= 9) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionResponses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionProduceResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionProduceResponse(_readable, _version)); } this.partitionResponses = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitionResponses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionProduceResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionProduceResponse(_readable, _version)); } this.partitionResponses = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 9) { _writable.writeUnsignedVarint(partitionResponses.size() + 1); for (PartitionProduceResponse partitionResponsesElement : partitionResponses) { partitionResponsesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitionResponses.size()); for (PartitionProduceResponse partitionResponsesElement : partitionResponses) { partitionResponsesElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 9) { throw new UnsupportedVersionException("Can't size version " + _version + " of TopicProduceResponse"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitionResponses.size() + 1)); } else { _size.addBytes(4); } for (PartitionProduceResponse partitionResponsesElement : partitionResponses) { partitionResponsesElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof TopicProduceResponse)) return false; TopicProduceResponse other = (TopicProduceResponse) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof TopicProduceResponse)) return false; TopicProduceResponse other = (TopicProduceResponse) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitionResponses == null) { if (other.partitionResponses != null) return false; } else { if (!this.partitionResponses.equals(other.partitionResponses)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public TopicProduceResponse duplicate() { TopicProduceResponse _duplicate = new TopicProduceResponse(); _duplicate.name = name; ArrayList<PartitionProduceResponse> newPartitionResponses = new ArrayList<PartitionProduceResponse>(partitionResponses.size()); for (PartitionProduceResponse _element : partitionResponses) { newPartitionResponses.add(_element.duplicate()); } _duplicate.partitionResponses = newPartitionResponses; return _duplicate; } @Override public String toString() { return "TopicProduceResponse(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitionResponses=" + MessageUtil.deepToString(partitionResponses.iterator()) + ")"; } public String name() { return this.name; } public List<PartitionProduceResponse> partitionResponses() { return this.partitionResponses; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public TopicProduceResponse setName(String v) { this.name = v; return this; } public TopicProduceResponse setPartitionResponses(List<PartitionProduceResponse> v) { this.partitionResponses = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class PartitionProduceResponse implements Message { int index; short errorCode; long baseOffset; long logAppendTimeMs; long logStartOffset; List<BatchIndexAndErrorMessage> recordErrors; String errorMessage; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("base_offset", Type.INT64, "The base offset.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("base_offset", Type.INT64, "The base offset."), new Field("log_append_time_ms", Type.INT64, "The timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended.") ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema SCHEMA_5 = new Schema( new Field("index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("base_offset", Type.INT64, "The base offset."), new Field("log_append_time_ms", Type.INT64, "The timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended."), new Field("log_start_offset", Type.INT64, "The log start offset.") ); public static final Schema SCHEMA_6 = SCHEMA_5; public static final Schema SCHEMA_7 = SCHEMA_6; public static final Schema SCHEMA_8 = new Schema( new Field("index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("base_offset", Type.INT64, "The base offset."), new Field("log_append_time_ms", Type.INT64, "The timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended."), new Field("log_start_offset", Type.INT64, "The log start offset."), new Field("record_errors", new ArrayOf(BatchIndexAndErrorMessage.SCHEMA_8), "The batch indices of records that caused the batch to be dropped"), new Field("error_message", Type.NULLABLE_STRING, "The global error message summarizing the common root cause of the records that caused the batch to be dropped") ); public static final Schema SCHEMA_9 = new Schema( new Field("index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("base_offset", Type.INT64, "The base offset."), new Field("log_append_time_ms", Type.INT64, "The timestamp returned by broker after appending the messages. If CreateTime is used for the topic, the timestamp will be -1. If LogAppendTime is used for the topic, the timestamp will be the broker local time when the messages are appended."), new Field("log_start_offset", Type.INT64, "The log start offset."), new Field("record_errors", new CompactArrayOf(BatchIndexAndErrorMessage.SCHEMA_9), "The batch indices of records that caused the batch to be dropped"), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The global error message summarizing the common root cause of the records that caused the batch to be dropped"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4, SCHEMA_5, SCHEMA_6, SCHEMA_7, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 9; public PartitionProduceResponse(Readable _readable, short _version) { read(_readable, _version); } public PartitionProduceResponse() { this.index = 0; this.errorCode = (short) 0; this.baseOffset = 0L; this.logAppendTimeMs = -1L; this.logStartOffset = -1L; this.recordErrors = new ArrayList<BatchIndexAndErrorMessage>(0); this.errorMessage = null; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { if (_version > 9) { throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionProduceResponse"); } this.index = _readable.readInt(); this.errorCode = _readable.readShort(); this.baseOffset = _readable.readLong(); if (_version >= 2) { this.logAppendTimeMs = _readable.readLong(); } else { this.logAppendTimeMs = -1L; } if (_version >= 5) { this.logStartOffset = _readable.readLong(); } else { this.logStartOffset = -1L; } if (_version >= 8) { if (_version >= 9) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field recordErrors was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<BatchIndexAndErrorMessage> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new BatchIndexAndErrorMessage(_readable, _version)); } this.recordErrors = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field recordErrors was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<BatchIndexAndErrorMessage> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new BatchIndexAndErrorMessage(_readable, _version)); } this.recordErrors = newCollection; } } } else { this.recordErrors = new ArrayList<BatchIndexAndErrorMessage>(0); } if (_version >= 8) { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } else { this.errorMessage = null; } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(index); _writable.writeShort(errorCode); _writable.writeLong(baseOffset); if (_version >= 2) { _writable.writeLong(logAppendTimeMs); } if (_version >= 5) { _writable.writeLong(logStartOffset); } if (_version >= 8) { if (_version >= 9) { _writable.writeUnsignedVarint(recordErrors.size() + 1); for (BatchIndexAndErrorMessage recordErrorsElement : recordErrors) { recordErrorsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(recordErrors.size()); for (BatchIndexAndErrorMessage recordErrorsElement : recordErrors) { recordErrorsElement.write(_writable, _cache, _version); } } } if (_version >= 8) { if (errorMessage == null) { if (_version >= 9) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 9) { throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionProduceResponse"); } _size.addBytes(4); _size.addBytes(2); _size.addBytes(8); if (_version >= 2) { _size.addBytes(8); } if (_version >= 5) { _size.addBytes(8); } if (_version >= 8) { { if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(recordErrors.size() + 1)); } else { _size.addBytes(4); } for (BatchIndexAndErrorMessage recordErrorsElement : recordErrors) { recordErrorsElement.addSize(_size, _cache, _version); } } } if (_version >= 8) { if (errorMessage == null) { if (_version >= 9) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof PartitionProduceResponse)) return false; PartitionProduceResponse other = (PartitionProduceResponse) obj; if (index != other.index) return false; if (errorCode != other.errorCode) return false; if (baseOffset != other.baseOffset) return false; if (logAppendTimeMs != other.logAppendTimeMs) return false; if (logStartOffset != other.logStartOffset) return false; if (this.recordErrors == null) { if (other.recordErrors != null) return false; } else { if (!this.recordErrors.equals(other.recordErrors)) return false; } if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + index; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + ((int) (baseOffset >> 32) ^ (int) baseOffset); hashCode = 31 * hashCode + ((int) (logAppendTimeMs >> 32) ^ (int) logAppendTimeMs); hashCode = 31 * hashCode + ((int) (logStartOffset >> 32) ^ (int) logStartOffset); hashCode = 31 * hashCode + (recordErrors == null ? 0 : recordErrors.hashCode()); hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); return hashCode; } @Override public PartitionProduceResponse duplicate() { PartitionProduceResponse _duplicate = new PartitionProduceResponse(); _duplicate.index = index; _duplicate.errorCode = errorCode; _duplicate.baseOffset = baseOffset; _duplicate.logAppendTimeMs = logAppendTimeMs; _duplicate.logStartOffset = logStartOffset; ArrayList<BatchIndexAndErrorMessage> newRecordErrors = new ArrayList<BatchIndexAndErrorMessage>(recordErrors.size()); for (BatchIndexAndErrorMessage _element : recordErrors) { newRecordErrors.add(_element.duplicate()); } _duplicate.recordErrors = newRecordErrors; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } return _duplicate; } @Override public String toString() { return "PartitionProduceResponse(" + "index=" + index + ", errorCode=" + errorCode + ", baseOffset=" + baseOffset + ", logAppendTimeMs=" + logAppendTimeMs + ", logStartOffset=" + logStartOffset + ", recordErrors=" + MessageUtil.deepToString(recordErrors.iterator()) + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ")"; } public int index() { return this.index; } public short errorCode() { return this.errorCode; } public long baseOffset() { return this.baseOffset; } public long logAppendTimeMs() { return this.logAppendTimeMs; } public long logStartOffset() { return this.logStartOffset; } public List<BatchIndexAndErrorMessage> recordErrors() { return this.recordErrors; } public String errorMessage() { return this.errorMessage; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public PartitionProduceResponse setIndex(int v) { this.index = v; return this; } public PartitionProduceResponse setErrorCode(short v) { this.errorCode = v; return this; } public PartitionProduceResponse setBaseOffset(long v) { this.baseOffset = v; return this; } public PartitionProduceResponse setLogAppendTimeMs(long v) { this.logAppendTimeMs = v; return this; } public PartitionProduceResponse setLogStartOffset(long v) { this.logStartOffset = v; return this; } public PartitionProduceResponse setRecordErrors(List<BatchIndexAndErrorMessage> v) { this.recordErrors = v; return this; } public PartitionProduceResponse setErrorMessage(String v) { this.errorMessage = v; return this; } } public static class BatchIndexAndErrorMessage implements Message { int batchIndex; String batchIndexErrorMessage; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_8 = new Schema( new Field("batch_index", Type.INT32, "The batch index of the record that cause the batch to be dropped"), new Field("batch_index_error_message", Type.NULLABLE_STRING, "The error message of the record that caused the batch to be dropped") ); public static final Schema SCHEMA_9 = new Schema( new Field("batch_index", Type.INT32, "The batch index of the record that cause the batch to be dropped"), new Field("batch_index_error_message", Type.COMPACT_NULLABLE_STRING, "The error message of the record that caused the batch to be dropped"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, null, null, null, null, SCHEMA_8, SCHEMA_9 }; public static final short LOWEST_SUPPORTED_VERSION = 8; public static final short HIGHEST_SUPPORTED_VERSION = 9; public BatchIndexAndErrorMessage(Readable _readable, short _version) { read(_readable, _version); } public BatchIndexAndErrorMessage() { this.batchIndex = 0; this.batchIndexErrorMessage = null; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 9; } @Override public void read(Readable _readable, short _version) { if (_version > 9) { throw new UnsupportedVersionException("Can't read version " + _version + " of BatchIndexAndErrorMessage"); } this.batchIndex = _readable.readInt(); { int length; if (_version >= 9) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.batchIndexErrorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field batchIndexErrorMessage had invalid length " + length); } else { this.batchIndexErrorMessage = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 9) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 8) { throw new UnsupportedVersionException("Can't write version " + _version + " of BatchIndexAndErrorMessage"); } int _numTaggedFields = 0; _writable.writeInt(batchIndex); if (batchIndexErrorMessage == null) { if (_version >= 9) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(batchIndexErrorMessage); if (_version >= 9) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 9) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 9) { throw new UnsupportedVersionException("Can't size version " + _version + " of BatchIndexAndErrorMessage"); } _size.addBytes(4); if (batchIndexErrorMessage == null) { if (_version >= 9) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = batchIndexErrorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'batchIndexErrorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(batchIndexErrorMessage, _stringBytes); if (_version >= 9) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 9) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof BatchIndexAndErrorMessage)) return false; BatchIndexAndErrorMessage other = (BatchIndexAndErrorMessage) obj; if (batchIndex != other.batchIndex) return false; if (this.batchIndexErrorMessage == null) { if (other.batchIndexErrorMessage != null) return false; } else { if (!this.batchIndexErrorMessage.equals(other.batchIndexErrorMessage)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + batchIndex; hashCode = 31 * hashCode + (batchIndexErrorMessage == null ? 0 : batchIndexErrorMessage.hashCode()); return hashCode; } @Override public BatchIndexAndErrorMessage duplicate() { BatchIndexAndErrorMessage _duplicate = new BatchIndexAndErrorMessage(); _duplicate.batchIndex = batchIndex; if (batchIndexErrorMessage == null) { _duplicate.batchIndexErrorMessage = null; } else { _duplicate.batchIndexErrorMessage = batchIndexErrorMessage; } return _duplicate; } @Override public String toString() { return "BatchIndexAndErrorMessage(" + "batchIndex=" + batchIndex + ", batchIndexErrorMessage=" + ((batchIndexErrorMessage == null) ? "null" : "'" + batchIndexErrorMessage.toString() + "'") + ")"; } public int batchIndex() { return this.batchIndex; } public String batchIndexErrorMessage() { return this.batchIndexErrorMessage; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public BatchIndexAndErrorMessage setBatchIndex(int v) { this.batchIndex = v; return this; } public BatchIndexAndErrorMessage setBatchIndexErrorMessage(String v) { this.batchIndexErrorMessage = v; return this; } } public static class TopicProduceResponseCollection extends ImplicitLinkedHashMultiCollection<TopicProduceResponse> { public TopicProduceResponseCollection() { super(); } public TopicProduceResponseCollection(int expectedNumElements) { super(expectedNumElements); } public TopicProduceResponseCollection(Iterator<TopicProduceResponse> iterator) { super(iterator); } public TopicProduceResponse find(String name) { TopicProduceResponse _key = new TopicProduceResponse(); _key.setName(name); return find(_key); } public List<TopicProduceResponse> findAll(String name) { TopicProduceResponse _key = new TopicProduceResponse(); _key.setName(name); return findAll(_key); } public TopicProduceResponseCollection duplicate() { TopicProduceResponseCollection _duplicate = new TopicProduceResponseCollection(size()); for (TopicProduceResponse _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ProduceResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ProduceResponseData.*; public class ProduceResponseDataJsonConverter { public static ProduceResponseData read(JsonNode _node, short _version) { ProduceResponseData _object = new ProduceResponseData(); JsonNode _responsesNode = _node.get("responses"); if (_responsesNode == null) { throw new RuntimeException("ProduceResponseData: unable to locate field 'responses', which is mandatory in version " + _version); } else { if (!_responsesNode.isArray()) { throw new RuntimeException("ProduceResponseData expected a JSON array, but got " + _node.getNodeType()); } TopicProduceResponseCollection _collection = new TopicProduceResponseCollection(_responsesNode.size()); _object.responses = _collection; for (JsonNode _element : _responsesNode) { _collection.add(TopicProduceResponseJsonConverter.read(_element, _version)); } } JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 1) { throw new RuntimeException("ProduceResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "ProduceResponseData"); } return _object; } public static JsonNode write(ProduceResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); ArrayNode _responsesArray = new ArrayNode(JsonNodeFactory.instance); for (TopicProduceResponse _element : _object.responses) { _responsesArray.add(TopicProduceResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("responses", _responsesArray); if (_version >= 1) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } return _node; } public static JsonNode write(ProduceResponseData _object, short _version) { return write(_object, _version, true); } public static class BatchIndexAndErrorMessageJsonConverter { public static BatchIndexAndErrorMessage read(JsonNode _node, short _version) { BatchIndexAndErrorMessage _object = new BatchIndexAndErrorMessage(); if (_version < 8) { throw new UnsupportedVersionException("Can't read version " + _version + " of BatchIndexAndErrorMessage"); } JsonNode _batchIndexNode = _node.get("batchIndex"); if (_batchIndexNode == null) { throw new RuntimeException("BatchIndexAndErrorMessage: unable to locate field 'batchIndex', which is mandatory in version " + _version); } else { _object.batchIndex = MessageUtil.jsonNodeToInt(_batchIndexNode, "BatchIndexAndErrorMessage"); } JsonNode _batchIndexErrorMessageNode = _node.get("batchIndexErrorMessage"); if (_batchIndexErrorMessageNode == null) { throw new RuntimeException("BatchIndexAndErrorMessage: unable to locate field 'batchIndexErrorMessage', which is mandatory in version " + _version); } else { if (_batchIndexErrorMessageNode.isNull()) { _object.batchIndexErrorMessage = null; } else { if (!_batchIndexErrorMessageNode.isTextual()) { throw new RuntimeException("BatchIndexAndErrorMessage expected a string type, but got " + _node.getNodeType()); } _object.batchIndexErrorMessage = _batchIndexErrorMessageNode.asText(); } } return _object; } public static JsonNode write(BatchIndexAndErrorMessage _object, short _version, boolean _serializeRecords) { if (_version < 8) { throw new UnsupportedVersionException("Can't write version " + _version + " of BatchIndexAndErrorMessage"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("batchIndex", new IntNode(_object.batchIndex)); if (_object.batchIndexErrorMessage == null) { _node.set("batchIndexErrorMessage", NullNode.instance); } else { _node.set("batchIndexErrorMessage", new TextNode(_object.batchIndexErrorMessage)); } return _node; } public static JsonNode write(BatchIndexAndErrorMessage _object, short _version) { return write(_object, _version, true); } } public static class PartitionProduceResponseJsonConverter { public static PartitionProduceResponse read(JsonNode _node, short _version) { PartitionProduceResponse _object = new PartitionProduceResponse(); JsonNode _indexNode = _node.get("index"); if (_indexNode == null) { throw new RuntimeException("PartitionProduceResponse: unable to locate field 'index', which is mandatory in version " + _version); } else { _object.index = MessageUtil.jsonNodeToInt(_indexNode, "PartitionProduceResponse"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("PartitionProduceResponse: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "PartitionProduceResponse"); } JsonNode _baseOffsetNode = _node.get("baseOffset"); if (_baseOffsetNode == null) { throw new RuntimeException("PartitionProduceResponse: unable to locate field 'baseOffset', which is mandatory in version " + _version); } else { _object.baseOffset = MessageUtil.jsonNodeToLong(_baseOffsetNode, "PartitionProduceResponse"); } JsonNode _logAppendTimeMsNode = _node.get("logAppendTimeMs"); if (_logAppendTimeMsNode == null) { if (_version >= 2) { throw new RuntimeException("PartitionProduceResponse: unable to locate field 'logAppendTimeMs', which is mandatory in version " + _version); } else { _object.logAppendTimeMs = -1L; } } else { _object.logAppendTimeMs = MessageUtil.jsonNodeToLong(_logAppendTimeMsNode, "PartitionProduceResponse"); } JsonNode _logStartOffsetNode = _node.get("logStartOffset"); if (_logStartOffsetNode == null) { if (_version >= 5) { throw new RuntimeException("PartitionProduceResponse: unable to locate field 'logStartOffset', which is mandatory in version " + _version); } else { _object.logStartOffset = -1L; } } else { _object.logStartOffset = MessageUtil.jsonNodeToLong(_logStartOffsetNode, "PartitionProduceResponse"); } JsonNode _recordErrorsNode = _node.get("recordErrors"); if (_recordErrorsNode == null) { if (_version >= 8) { throw new RuntimeException("PartitionProduceResponse: unable to locate field 'recordErrors', which is mandatory in version " + _version); } else { _object.recordErrors = new ArrayList<BatchIndexAndErrorMessage>(0); } } else { if (!_recordErrorsNode.isArray()) { throw new RuntimeException("PartitionProduceResponse expected a JSON array, but got " + _node.getNodeType()); } ArrayList<BatchIndexAndErrorMessage> _collection = new ArrayList<BatchIndexAndErrorMessage>(_recordErrorsNode.size()); _object.recordErrors = _collection; for (JsonNode _element : _recordErrorsNode) { _collection.add(BatchIndexAndErrorMessageJsonConverter.read(_element, _version)); } } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { if (_version >= 8) { throw new RuntimeException("PartitionProduceResponse: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { _object.errorMessage = null; } } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("PartitionProduceResponse expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } return _object; } public static JsonNode write(PartitionProduceResponse _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("index", new IntNode(_object.index)); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("baseOffset", new LongNode(_object.baseOffset)); if (_version >= 2) { _node.set("logAppendTimeMs", new LongNode(_object.logAppendTimeMs)); } if (_version >= 5) { _node.set("logStartOffset", new LongNode(_object.logStartOffset)); } if (_version >= 8) { ArrayNode _recordErrorsArray = new ArrayNode(JsonNodeFactory.instance); for (BatchIndexAndErrorMessage _element : _object.recordErrors) { _recordErrorsArray.add(BatchIndexAndErrorMessageJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("recordErrors", _recordErrorsArray); } if (_version >= 8) { if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } } return _node; } public static JsonNode write(PartitionProduceResponse _object, short _version) { return write(_object, _version, true); } } public static class TopicProduceResponseJsonConverter { public static TopicProduceResponse read(JsonNode _node, short _version) { TopicProduceResponse _object = new TopicProduceResponse(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("TopicProduceResponse: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("TopicProduceResponse expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionResponsesNode = _node.get("partitionResponses"); if (_partitionResponsesNode == null) { throw new RuntimeException("TopicProduceResponse: unable to locate field 'partitionResponses', which is mandatory in version " + _version); } else { if (!_partitionResponsesNode.isArray()) { throw new RuntimeException("TopicProduceResponse expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionProduceResponse> _collection = new ArrayList<PartitionProduceResponse>(_partitionResponsesNode.size()); _object.partitionResponses = _collection; for (JsonNode _element : _partitionResponsesNode) { _collection.add(PartitionProduceResponseJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(TopicProduceResponse _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionResponsesArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionProduceResponse _element : _object.partitionResponses) { _partitionResponsesArray.add(PartitionProduceResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitionResponses", _partitionResponsesArray); return _node; } public static JsonNode write(TopicProduceResponse _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/RenewDelegationTokenRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Bytes; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class RenewDelegationTokenRequestData implements ApiMessage { byte[] hmac; long renewPeriodMs; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("hmac", Type.BYTES, "The HMAC of the delegation token to be renewed."), new Field("renew_period_ms", Type.INT64, "The renewal time period in milliseconds.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("hmac", Type.COMPACT_BYTES, "The HMAC of the delegation token to be renewed."), new Field("renew_period_ms", Type.INT64, "The renewal time period in milliseconds."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public RenewDelegationTokenRequestData(Readable _readable, short _version) { read(_readable, _version); } public RenewDelegationTokenRequestData() { this.hmac = Bytes.EMPTY; this.renewPeriodMs = 0L; } @Override public short apiKey() { return 39; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readInt(); } if (length < 0) { throw new RuntimeException("non-nullable field hmac was serialized as null"); } else { byte[] newBytes = _readable.readArray(length); this.hmac = newBytes; } } this.renewPeriodMs = _readable.readLong(); this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _writable.writeUnsignedVarint(hmac.length + 1); } else { _writable.writeInt(hmac.length); } _writable.writeByteArray(hmac); _writable.writeLong(renewPeriodMs); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { _size.addBytes(hmac.length); if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(hmac.length + 1)); } else { _size.addBytes(4); } } _size.addBytes(8); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof RenewDelegationTokenRequestData)) return false; RenewDelegationTokenRequestData other = (RenewDelegationTokenRequestData) obj; if (!Arrays.equals(this.hmac, other.hmac)) return false; if (renewPeriodMs != other.renewPeriodMs) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + Arrays.hashCode(hmac); hashCode = 31 * hashCode + ((int) (renewPeriodMs >> 32) ^ (int) renewPeriodMs); return hashCode; } @Override public RenewDelegationTokenRequestData duplicate() { RenewDelegationTokenRequestData _duplicate = new RenewDelegationTokenRequestData(); _duplicate.hmac = MessageUtil.duplicate(hmac); _duplicate.renewPeriodMs = renewPeriodMs; return _duplicate; } @Override public String toString() { return "RenewDelegationTokenRequestData(" + "hmac=" + Arrays.toString(hmac) + ", renewPeriodMs=" + renewPeriodMs + ")"; } public byte[] hmac() { return this.hmac; } public long renewPeriodMs() { return this.renewPeriodMs; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public RenewDelegationTokenRequestData setHmac(byte[] v) { this.hmac = v; return this; } public RenewDelegationTokenRequestData setRenewPeriodMs(long v) { this.renewPeriodMs = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/RenewDelegationTokenRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import java.util.Arrays; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.RenewDelegationTokenRequestData.*; public class RenewDelegationTokenRequestDataJsonConverter { public static RenewDelegationTokenRequestData read(JsonNode _node, short _version) { RenewDelegationTokenRequestData _object = new RenewDelegationTokenRequestData(); JsonNode _hmacNode = _node.get("hmac"); if (_hmacNode == null) { throw new RuntimeException("RenewDelegationTokenRequestData: unable to locate field 'hmac', which is mandatory in version " + _version); } else { _object.hmac = MessageUtil.jsonNodeToBinary(_hmacNode, "RenewDelegationTokenRequestData"); } JsonNode _renewPeriodMsNode = _node.get("renewPeriodMs"); if (_renewPeriodMsNode == null) { throw new RuntimeException("RenewDelegationTokenRequestData: unable to locate field 'renewPeriodMs', which is mandatory in version " + _version); } else { _object.renewPeriodMs = MessageUtil.jsonNodeToLong(_renewPeriodMsNode, "RenewDelegationTokenRequestData"); } return _object; } public static JsonNode write(RenewDelegationTokenRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("hmac", new BinaryNode(Arrays.copyOf(_object.hmac, _object.hmac.length))); _node.set("renewPeriodMs", new LongNode(_object.renewPeriodMs)); return _node; } public static JsonNode write(RenewDelegationTokenRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/RenewDelegationTokenResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class RenewDelegationTokenResponseData implements ApiMessage { short errorCode; long expiryTimestampMs; int throttleTimeMs; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("expiry_timestamp_ms", Type.INT64, "The timestamp in milliseconds at which this token expires."), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("expiry_timestamp_ms", Type.INT64, "The timestamp in milliseconds at which this token expires."), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public RenewDelegationTokenResponseData(Readable _readable, short _version) { read(_readable, _version); } public RenewDelegationTokenResponseData() { this.errorCode = (short) 0; this.expiryTimestampMs = 0L; this.throttleTimeMs = 0; } @Override public short apiKey() { return 39; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { this.errorCode = _readable.readShort(); this.expiryTimestampMs = _readable.readLong(); this.throttleTimeMs = _readable.readInt(); this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); _writable.writeLong(expiryTimestampMs); _writable.writeInt(throttleTimeMs); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(2); _size.addBytes(8); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof RenewDelegationTokenResponseData)) return false; RenewDelegationTokenResponseData other = (RenewDelegationTokenResponseData) obj; if (errorCode != other.errorCode) return false; if (expiryTimestampMs != other.expiryTimestampMs) return false; if (throttleTimeMs != other.throttleTimeMs) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + ((int) (expiryTimestampMs >> 32) ^ (int) expiryTimestampMs); hashCode = 31 * hashCode + throttleTimeMs; return hashCode; } @Override public RenewDelegationTokenResponseData duplicate() { RenewDelegationTokenResponseData _duplicate = new RenewDelegationTokenResponseData(); _duplicate.errorCode = errorCode; _duplicate.expiryTimestampMs = expiryTimestampMs; _duplicate.throttleTimeMs = throttleTimeMs; return _duplicate; } @Override public String toString() { return "RenewDelegationTokenResponseData(" + "errorCode=" + errorCode + ", expiryTimestampMs=" + expiryTimestampMs + ", throttleTimeMs=" + throttleTimeMs + ")"; } public short errorCode() { return this.errorCode; } public long expiryTimestampMs() { return this.expiryTimestampMs; } public int throttleTimeMs() { return this.throttleTimeMs; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public RenewDelegationTokenResponseData setErrorCode(short v) { this.errorCode = v; return this; } public RenewDelegationTokenResponseData setExpiryTimestampMs(long v) { this.expiryTimestampMs = v; return this; } public RenewDelegationTokenResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/RenewDelegationTokenResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.RenewDelegationTokenResponseData.*; public class RenewDelegationTokenResponseDataJsonConverter { public static RenewDelegationTokenResponseData read(JsonNode _node, short _version) { RenewDelegationTokenResponseData _object = new RenewDelegationTokenResponseData(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("RenewDelegationTokenResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "RenewDelegationTokenResponseData"); } JsonNode _expiryTimestampMsNode = _node.get("expiryTimestampMs"); if (_expiryTimestampMsNode == null) { throw new RuntimeException("RenewDelegationTokenResponseData: unable to locate field 'expiryTimestampMs', which is mandatory in version " + _version); } else { _object.expiryTimestampMs = MessageUtil.jsonNodeToLong(_expiryTimestampMsNode, "RenewDelegationTokenResponseData"); } JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("RenewDelegationTokenResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "RenewDelegationTokenResponseData"); } return _object; } public static JsonNode write(RenewDelegationTokenResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("expiryTimestampMs", new LongNode(_object.expiryTimestampMs)); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); return _node; } public static JsonNode write(RenewDelegationTokenResponseData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/RequestHeaderData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class RequestHeaderData implements ApiMessage { short requestApiKey; short requestApiVersion; int correlationId; String clientId; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("request_api_key", Type.INT16, "The API key of this request."), new Field("request_api_version", Type.INT16, "The API version of this request."), new Field("correlation_id", Type.INT32, "The correlation ID of this request.") ); public static final Schema SCHEMA_1 = new Schema( new Field("request_api_key", Type.INT16, "The API key of this request."), new Field("request_api_version", Type.INT16, "The API version of this request."), new Field("correlation_id", Type.INT32, "The correlation ID of this request."), new Field("client_id", Type.NULLABLE_STRING, "The client ID string.") ); public static final Schema SCHEMA_2 = new Schema( new Field("request_api_key", Type.INT16, "The API key of this request."), new Field("request_api_version", Type.INT16, "The API version of this request."), new Field("correlation_id", Type.INT32, "The correlation ID of this request."), new Field("client_id", Type.NULLABLE_STRING, "The client ID string."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public RequestHeaderData(Readable _readable, short _version) { read(_readable, _version); } public RequestHeaderData() { this.requestApiKey = (short) 0; this.requestApiVersion = (short) 0; this.correlationId = 0; this.clientId = ""; } @Override public short apiKey() { return -1; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { this.requestApiKey = _readable.readShort(); this.requestApiVersion = _readable.readShort(); this.correlationId = _readable.readInt(); if (_version >= 1) { int length; length = _readable.readShort(); if (length < 0) { this.clientId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field clientId had invalid length " + length); } else { this.clientId = _readable.readString(length); } } else { this.clientId = ""; } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(requestApiKey); _writable.writeShort(requestApiVersion); _writable.writeInt(correlationId); if (_version >= 1) { if (clientId == null) { _writable.writeShort((short) -1); } else { byte[] _stringBytes = _cache.getSerializedValue(clientId); _writable.writeShort((short) _stringBytes.length); _writable.writeByteArray(_stringBytes); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(2); _size.addBytes(2); _size.addBytes(4); if (_version >= 1) { if (clientId == null) { _size.addBytes(2); } else { byte[] _stringBytes = clientId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'clientId' field is too long to be serialized"); } _cache.cacheSerializedValue(clientId, _stringBytes); _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof RequestHeaderData)) return false; RequestHeaderData other = (RequestHeaderData) obj; if (requestApiKey != other.requestApiKey) return false; if (requestApiVersion != other.requestApiVersion) return false; if (correlationId != other.correlationId) return false; if (this.clientId == null) { if (other.clientId != null) return false; } else { if (!this.clientId.equals(other.clientId)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + requestApiKey; hashCode = 31 * hashCode + requestApiVersion; hashCode = 31 * hashCode + correlationId; hashCode = 31 * hashCode + (clientId == null ? 0 : clientId.hashCode()); return hashCode; } @Override public RequestHeaderData duplicate() { RequestHeaderData _duplicate = new RequestHeaderData(); _duplicate.requestApiKey = requestApiKey; _duplicate.requestApiVersion = requestApiVersion; _duplicate.correlationId = correlationId; if (clientId == null) { _duplicate.clientId = null; } else { _duplicate.clientId = clientId; } return _duplicate; } @Override public String toString() { return "RequestHeaderData(" + "requestApiKey=" + requestApiKey + ", requestApiVersion=" + requestApiVersion + ", correlationId=" + correlationId + ", clientId=" + ((clientId == null) ? "null" : "'" + clientId.toString() + "'") + ")"; } public short requestApiKey() { return this.requestApiKey; } public short requestApiVersion() { return this.requestApiVersion; } public int correlationId() { return this.correlationId; } public String clientId() { return this.clientId; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public RequestHeaderData setRequestApiKey(short v) { this.requestApiKey = v; return this; } public RequestHeaderData setRequestApiVersion(short v) { this.requestApiVersion = v; return this; } public RequestHeaderData setCorrelationId(int v) { this.correlationId = v; return this; } public RequestHeaderData setClientId(String v) { this.clientId = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/RequestHeaderDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.RequestHeaderData.*; public class RequestHeaderDataJsonConverter { public static RequestHeaderData read(JsonNode _node, short _version) { RequestHeaderData _object = new RequestHeaderData(); JsonNode _requestApiKeyNode = _node.get("requestApiKey"); if (_requestApiKeyNode == null) { throw new RuntimeException("RequestHeaderData: unable to locate field 'requestApiKey', which is mandatory in version " + _version); } else { _object.requestApiKey = MessageUtil.jsonNodeToShort(_requestApiKeyNode, "RequestHeaderData"); } JsonNode _requestApiVersionNode = _node.get("requestApiVersion"); if (_requestApiVersionNode == null) { throw new RuntimeException("RequestHeaderData: unable to locate field 'requestApiVersion', which is mandatory in version " + _version); } else { _object.requestApiVersion = MessageUtil.jsonNodeToShort(_requestApiVersionNode, "RequestHeaderData"); } JsonNode _correlationIdNode = _node.get("correlationId"); if (_correlationIdNode == null) { throw new RuntimeException("RequestHeaderData: unable to locate field 'correlationId', which is mandatory in version " + _version); } else { _object.correlationId = MessageUtil.jsonNodeToInt(_correlationIdNode, "RequestHeaderData"); } JsonNode _clientIdNode = _node.get("clientId"); if (_clientIdNode == null) { if (_version >= 1) { throw new RuntimeException("RequestHeaderData: unable to locate field 'clientId', which is mandatory in version " + _version); } else { _object.clientId = ""; } } else { if (_clientIdNode.isNull()) { _object.clientId = null; } else { if (!_clientIdNode.isTextual()) { throw new RuntimeException("RequestHeaderData expected a string type, but got " + _node.getNodeType()); } _object.clientId = _clientIdNode.asText(); } } return _object; } public static JsonNode write(RequestHeaderData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("requestApiKey", new ShortNode(_object.requestApiKey)); _node.set("requestApiVersion", new ShortNode(_object.requestApiVersion)); _node.set("correlationId", new IntNode(_object.correlationId)); if (_version >= 1) { if (_object.clientId == null) { _node.set("clientId", NullNode.instance); } else { _node.set("clientId", new TextNode(_object.clientId)); } } return _node; } public static JsonNode write(RequestHeaderData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ResponseHeaderData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ResponseHeaderData implements ApiMessage { int correlationId; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("correlation_id", Type.INT32, "The correlation ID of this response.") ); public static final Schema SCHEMA_1 = new Schema( new Field("correlation_id", Type.INT32, "The correlation ID of this response."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public ResponseHeaderData(Readable _readable, short _version) { read(_readable, _version); } public ResponseHeaderData() { this.correlationId = 0; } @Override public short apiKey() { return -1; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { this.correlationId = _readable.readInt(); this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(correlationId); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ResponseHeaderData)) return false; ResponseHeaderData other = (ResponseHeaderData) obj; if (correlationId != other.correlationId) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + correlationId; return hashCode; } @Override public ResponseHeaderData duplicate() { ResponseHeaderData _duplicate = new ResponseHeaderData(); _duplicate.correlationId = correlationId; return _duplicate; } @Override public String toString() { return "ResponseHeaderData(" + "correlationId=" + correlationId + ")"; } public int correlationId() { return this.correlationId; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ResponseHeaderData setCorrelationId(int v) { this.correlationId = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ResponseHeaderDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ResponseHeaderData.*; public class ResponseHeaderDataJsonConverter { public static ResponseHeaderData read(JsonNode _node, short _version) { ResponseHeaderData _object = new ResponseHeaderData(); JsonNode _correlationIdNode = _node.get("correlationId"); if (_correlationIdNode == null) { throw new RuntimeException("ResponseHeaderData: unable to locate field 'correlationId', which is mandatory in version " + _version); } else { _object.correlationId = MessageUtil.jsonNodeToInt(_correlationIdNode, "ResponseHeaderData"); } return _object; } public static JsonNode write(ResponseHeaderData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("correlationId", new IntNode(_object.correlationId)); return _node; } public static JsonNode write(ResponseHeaderData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/SaslAuthenticateRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Bytes; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class SaslAuthenticateRequestData implements ApiMessage { byte[] authBytes; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("auth_bytes", Type.BYTES, "The SASL authentication bytes from the client, as defined by the SASL mechanism.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("auth_bytes", Type.COMPACT_BYTES, "The SASL authentication bytes from the client, as defined by the SASL mechanism."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public SaslAuthenticateRequestData(Readable _readable, short _version) { read(_readable, _version); } public SaslAuthenticateRequestData() { this.authBytes = Bytes.EMPTY; } @Override public short apiKey() { return 36; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readInt(); } if (length < 0) { throw new RuntimeException("non-nullable field authBytes was serialized as null"); } else { byte[] newBytes = _readable.readArray(length); this.authBytes = newBytes; } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _writable.writeUnsignedVarint(authBytes.length + 1); } else { _writable.writeInt(authBytes.length); } _writable.writeByteArray(authBytes); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { _size.addBytes(authBytes.length); if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(authBytes.length + 1)); } else { _size.addBytes(4); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof SaslAuthenticateRequestData)) return false; SaslAuthenticateRequestData other = (SaslAuthenticateRequestData) obj; if (!Arrays.equals(this.authBytes, other.authBytes)) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + Arrays.hashCode(authBytes); return hashCode; } @Override public SaslAuthenticateRequestData duplicate() { SaslAuthenticateRequestData _duplicate = new SaslAuthenticateRequestData(); _duplicate.authBytes = MessageUtil.duplicate(authBytes); return _duplicate; } @Override public String toString() { return "SaslAuthenticateRequestData(" + "authBytes=" + Arrays.toString(authBytes) + ")"; } public byte[] authBytes() { return this.authBytes; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public SaslAuthenticateRequestData setAuthBytes(byte[] v) { this.authBytes = v; return this; } }