index
int64
repo_id
string
file_path
string
content
string
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/SecurityDisabledException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * An error indicating that security is disabled on the broker. */ public class SecurityDisabledException extends ApiException { private static final long serialVersionUID = 1L; public SecurityDisabledException(String message) { super(message); } public SecurityDisabledException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/SerializationException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; import org.apache.kafka.common.KafkaException; /** * Any exception during serialization in the producer */ public class SerializationException extends KafkaException { private static final long serialVersionUID = 1L; public SerializationException(String message, Throwable cause) { super(message, cause); } public SerializationException(String message) { super(message); } public SerializationException(Throwable cause) { super(cause); } public SerializationException() { super(); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/SnapshotNotFoundException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; public class SnapshotNotFoundException extends ApiException { private static final long serialVersionUID = 1; public SnapshotNotFoundException(String s) { super(s); } public SnapshotNotFoundException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/SslAuthenticationException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; import javax.net.ssl.SSLException; /** * This exception indicates that SSL handshake has failed. See {@link #getCause()} * for the {@link SSLException} that caused this failure. * <p> * SSL handshake failures in clients may indicate client authentication * failure due to untrusted certificates if server is configured to request * client certificates. Handshake failures could also indicate misconfigured * security including protocol/cipher suite mismatch, server certificate * authentication failure or server host name verification failure. * </p> */ public class SslAuthenticationException extends AuthenticationException { private static final long serialVersionUID = 1L; public SslAuthenticationException(String message) { super(message); } public SslAuthenticationException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/StaleBrokerEpochException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; public class StaleBrokerEpochException extends ApiException { private static final long serialVersionUID = 1L; public StaleBrokerEpochException(String message) { super(message); } public StaleBrokerEpochException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/ThrottlingQuotaExceededException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * Exception thrown if an operation on a resource exceeds the throttling quota. */ public class ThrottlingQuotaExceededException extends RetriableException { private int throttleTimeMs = 0; public ThrottlingQuotaExceededException(String message) { super(message); } public ThrottlingQuotaExceededException(int throttleTimeMs, String message) { super(message); this.throttleTimeMs = throttleTimeMs; } public int throttleTimeMs() { return this.throttleTimeMs; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/TimeoutException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * Indicates that a request timed out. */ public class TimeoutException extends RetriableException { private static final long serialVersionUID = 1L; public TimeoutException() { super(); } public TimeoutException(String message, Throwable cause) { super(message, cause); } public TimeoutException(String message) { super(message); } public TimeoutException(Throwable cause) { super(cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/TopicAuthorizationException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; import java.util.Collections; import java.util.Set; public class TopicAuthorizationException extends AuthorizationException { private final Set<String> unauthorizedTopics; public TopicAuthorizationException(String message, Set<String> unauthorizedTopics) { super(message); this.unauthorizedTopics = unauthorizedTopics; } public TopicAuthorizationException(Set<String> unauthorizedTopics) { this("Not authorized to access topics: " + unauthorizedTopics, unauthorizedTopics); } public TopicAuthorizationException(String message) { this(message, Collections.emptySet()); } /** * Get the set of topics which failed authorization. May be empty if the set is not known * in the context the exception was raised in. * * @return possibly empty set of unauthorized topics */ public Set<String> unauthorizedTopics() { return unauthorizedTopics; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/TopicDeletionDisabledException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; public class TopicDeletionDisabledException extends ApiException { private static final long serialVersionUID = 1L; public TopicDeletionDisabledException() { } public TopicDeletionDisabledException(String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/TopicExistsException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; public class TopicExistsException extends ApiException { private static final long serialVersionUID = 1L; public TopicExistsException(String message) { super(message); } public TopicExistsException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/TransactionAbortedException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * This is the Exception thrown when we are aborting any undrained batches during * a transaction which is aborted without any underlying cause - which likely means that the user chose to abort. */ public class TransactionAbortedException extends ApiException { private final static long serialVersionUID = 1L; public TransactionAbortedException(String message, Throwable cause) { super(message, cause); } public TransactionAbortedException(String message) { super(message); } public TransactionAbortedException() { super("Failing batch since transaction was aborted"); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/TransactionCoordinatorFencedException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; public class TransactionCoordinatorFencedException extends ApiException { private static final long serialVersionUID = 1L; public TransactionCoordinatorFencedException(String message) { super(message); } public TransactionCoordinatorFencedException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/TransactionalIdAuthorizationException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; public class TransactionalIdAuthorizationException extends AuthorizationException { public TransactionalIdAuthorizationException(final String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/TransactionalIdNotFoundException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; public class TransactionalIdNotFoundException extends ApiException { public TransactionalIdNotFoundException(String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnacceptableCredentialException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * Exception thrown when attempting to define a credential that does not meet the criteria for acceptability * (for example, attempting to create a SCRAM credential with an empty username or password or too few/many iterations). */ public class UnacceptableCredentialException extends ApiException { private static final long serialVersionUID = 1L; /** * Constructor * * @param message the exception's message */ public UnacceptableCredentialException(String message) { super(message); } /** * * @param message the exception's message * @param cause the exception's cause */ public UnacceptableCredentialException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnknownLeaderEpochException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * The request contained a leader epoch which is larger than that on the broker that received the * request. This can happen if the client observes a metadata update before it has been propagated * to all brokers. Clients need not refresh metadata before retrying. */ public class UnknownLeaderEpochException extends RetriableException { private static final long serialVersionUID = 1L; public UnknownLeaderEpochException(String message) { super(message); } public UnknownLeaderEpochException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnknownMemberIdException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; public class UnknownMemberIdException extends ApiException { private static final long serialVersionUID = 1L; public UnknownMemberIdException() { super(); } public UnknownMemberIdException(String message, Throwable cause) { super(message, cause); } public UnknownMemberIdException(String message) { super(message); } public UnknownMemberIdException(Throwable cause) { super(cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnknownProducerIdException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * This exception is raised by the broker if it could not locate the producer metadata associated with the producerId * in question. This could happen if, for instance, the producer's records were deleted because their retention time * had elapsed. Once the last records of the producerId are removed, the producer's metadata is removed from the broker, * and future appends by the producer will return this exception. */ public class UnknownProducerIdException extends OutOfOrderSequenceException { public UnknownProducerIdException(String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnknownServerException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * An error occurred on the server for which the client doesn't have a corresponding error code. This is generally an * unexpected error. * */ public class UnknownServerException extends ApiException { private static final long serialVersionUID = 1L; public UnknownServerException() { } public UnknownServerException(String message) { super(message); } public UnknownServerException(Throwable cause) { super(cause); } public UnknownServerException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnknownTopicIdException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; public class UnknownTopicIdException extends InvalidMetadataException { private static final long serialVersionUID = 1L; public UnknownTopicIdException(String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnknownTopicOrPartitionException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * This topic/partition doesn't exist. * This exception is used in contexts where a topic doesn't seem to exist based on possibly stale metadata. * This exception is retriable because the topic or partition might subsequently be created. * * @see InvalidTopicException */ public class UnknownTopicOrPartitionException extends InvalidMetadataException { private static final long serialVersionUID = 1L; public UnknownTopicOrPartitionException() { } public UnknownTopicOrPartitionException(String message) { super(message); } public UnknownTopicOrPartitionException(Throwable throwable) { super(throwable); } public UnknownTopicOrPartitionException(String message, Throwable throwable) { super(message, throwable); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnreleasedInstanceIdException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; import org.apache.kafka.common.annotation.InterfaceStability; @InterfaceStability.Evolving public class UnreleasedInstanceIdException extends ApiException { public UnreleasedInstanceIdException(String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnstableOffsetCommitException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * Exception thrown when there are unstable offsets for the requested topic partitions. */ public class UnstableOffsetCommitException extends RetriableException { private static final long serialVersionUID = 1L; public UnstableOffsetCommitException(String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnsupportedAssignorException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; import org.apache.kafka.common.annotation.InterfaceStability; @InterfaceStability.Evolving public class UnsupportedAssignorException extends ApiException { public UnsupportedAssignorException(String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnsupportedByAuthenticationException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * Authentication mechanism does not support the requested function. */ public class UnsupportedByAuthenticationException extends ApiException { private static final long serialVersionUID = 1L; public UnsupportedByAuthenticationException(String message) { super(message); } public UnsupportedByAuthenticationException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnsupportedCompressionTypeException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * The requesting client does not support the compression type of given partition. */ public class UnsupportedCompressionTypeException extends ApiException { private static final long serialVersionUID = 1L; public UnsupportedCompressionTypeException(String message) { super(message); } public UnsupportedCompressionTypeException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * The message format version does not support the requested function. For example, if idempotence is * requested and the topic is using a message format older than 0.11.0.0, then this error will be returned. */ public class UnsupportedForMessageFormatException extends ApiException { private static final long serialVersionUID = 1L; public UnsupportedForMessageFormatException(String message) { super(message); } public UnsupportedForMessageFormatException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnsupportedSaslMechanismException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; /** * This exception indicates that the SASL mechanism requested by the client * is not enabled on the broker. */ public class UnsupportedSaslMechanismException extends AuthenticationException { private static final long serialVersionUID = 1L; public UnsupportedSaslMechanismException(String message) { super(message); } public UnsupportedSaslMechanismException(String message, Throwable cause) { super(message, cause); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/UnsupportedVersionException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; import java.util.Map; /** * Indicates that a request API or version needed by the client is not supported by the broker. This is * typically a fatal error as Kafka clients will downgrade request versions as needed except in cases where * a needed feature is not available in old versions. Fatal errors can generally only be handled by closing * the client instance, although in some cases it may be possible to continue without relying on the * underlying feature. For example, when the producer is used with idempotence enabled, this error is fatal * since the producer does not support reverting to weaker semantics. On the other hand, if this error * is raised from {@link org.apache.kafka.clients.consumer.KafkaConsumer#offsetsForTimes(Map)}, it would * be possible to revert to alternative logic to set the consumer's position. */ public class UnsupportedVersionException extends ApiException { private static final long serialVersionUID = 1L; public UnsupportedVersionException(String message, Throwable cause) { super(message, cause); } public UnsupportedVersionException(String message) { super(message); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/WakeupException.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.errors; import org.apache.kafka.common.KafkaException; /** * Exception used to indicate preemption of a blocking operation by an external thread. * For example, {@link org.apache.kafka.clients.consumer.KafkaConsumer#wakeup} * can be used to break out of an active {@link org.apache.kafka.clients.consumer.KafkaConsumer#poll(java.time.Duration)}, * which would raise an instance of this exception. */ public class WakeupException extends KafkaException { private static final long serialVersionUID = 1L; }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/errors/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides common exception classes. */ package org.apache.kafka.common.errors;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/feature/BaseVersionRange.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.feature; import static java.util.stream.Collectors.joining; import java.util.Map; import java.util.Objects; import org.apache.kafka.common.utils.Utils; /** * Represents an immutable basic version range using 2 attributes: min and max, each of type short. * The min and max attributes need to satisfy 2 rules: * - they are each expected to be >= 1, as we only consider positive version values to be valid. * - max should be >= min. * * The class also provides API to convert the version range to a map. * The class allows for configurable labels for the min/max attributes, which can be specialized by * sub-classes (if needed). */ class BaseVersionRange { // Non-empty label for the min version key, that's used only to convert to/from a map. private final String minKeyLabel; // The value of the minimum version. private final short minValue; // Non-empty label for the max version key, that's used only to convert to/from a map. private final String maxKeyLabel; // The value of the maximum version. private final short maxValue; /** * Raises an exception unless the following condition is met: * minValue >= 1 and maxValue >= 1 and maxValue >= minValue. * * @param minKeyLabel Label for the min version key, that's used only to convert to/from a map. * @param minValue The minimum version value. * @param maxKeyLabel Label for the max version key, that's used only to convert to/from a map. * @param maxValue The maximum version value. * * @throws IllegalArgumentException If any of the following conditions are true: * - (minValue < 1) OR (maxValue < 1) OR (maxValue < minValue). * - minKeyLabel is empty, OR, minKeyLabel is empty. */ protected BaseVersionRange(String minKeyLabel, short minValue, String maxKeyLabel, short maxValue) { if (minValue < 1 || maxValue < 1 || maxValue < minValue) { throw new IllegalArgumentException( String.format( "Expected minValue >= 1, maxValue >= 1 and maxValue >= minValue, but received" + " minValue: %d, maxValue: %d", minValue, maxValue)); } if (minKeyLabel.isEmpty()) { throw new IllegalArgumentException("Expected minKeyLabel to be non-empty."); } if (maxKeyLabel.isEmpty()) { throw new IllegalArgumentException("Expected maxKeyLabel to be non-empty."); } this.minKeyLabel = minKeyLabel; this.minValue = minValue; this.maxKeyLabel = maxKeyLabel; this.maxValue = maxValue; } public short min() { return minValue; } public short max() { return maxValue; } public String toString() { return String.format( "%s[%s]", this.getClass().getSimpleName(), mapToString(toMap())); } public Map<String, Short> toMap() { return Utils.mkMap(Utils.mkEntry(minKeyLabel, min()), Utils.mkEntry(maxKeyLabel, max())); } private static String mapToString(final Map<String, Short> map) { return map .entrySet() .stream() .map(entry -> String.format("%s:%d", entry.getKey(), entry.getValue())) .collect(joining(", ")); } @Override public boolean equals(Object other) { if (this == other) { return true; } if (other == null || getClass() != other.getClass()) { return false; } final BaseVersionRange that = (BaseVersionRange) other; return Objects.equals(this.minKeyLabel, that.minKeyLabel) && this.minValue == that.minValue && Objects.equals(this.maxKeyLabel, that.maxKeyLabel) && this.maxValue == that.maxValue; } @Override public int hashCode() { return Objects.hash(minKeyLabel, minValue, maxKeyLabel, maxValue); } public static short valueOrThrow(String key, Map<String, Short> versionRangeMap) { final Short value = versionRangeMap.get(key); if (value == null) { throw new IllegalArgumentException(String.format("%s absent in [%s]", key, mapToString(versionRangeMap))); } return value; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/feature/Features.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.feature; import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; import java.util.Objects; import static java.util.stream.Collectors.joining; /** * Represents an immutable dictionary with key being feature name, and value being <VersionRangeType>. * Also provides API to convert the features and their version ranges to/from a map. * * This class can be instantiated only using its factory functions, with the important ones being: * Features.supportedFeatures(...) and Features.finalizedFeatures(...). * * @param <VersionRangeType> is the type of version range. * @see SupportedVersionRange */ public class Features<VersionRangeType extends BaseVersionRange> { private final Map<String, VersionRangeType> features; /** * Constructor is made private, as for readability it is preferred the caller uses one of the * static factory functions for instantiation (see below). * * @param features Map of feature name to a type of VersionRange. */ private Features(Map<String, VersionRangeType> features) { Objects.requireNonNull(features, "Provided features can not be null."); this.features = features; } /** * @param features Map of feature name to SupportedVersionRange. * * @return Returns a new Features object representing supported features. */ public static Features<SupportedVersionRange> supportedFeatures(Map<String, SupportedVersionRange> features) { return new Features<>(features); } public static Features<SupportedVersionRange> emptySupportedFeatures() { return new Features<>(new HashMap<>()); } public Map<String, VersionRangeType> features() { return features; } public boolean empty() { return features.isEmpty(); } /** * @param feature name of the feature * * @return the VersionRangeType corresponding to the feature name, or null if the * feature is absent */ public VersionRangeType get(String feature) { return features.get(feature); } public String toString() { return String.format( "Features{%s}", features .entrySet() .stream() .map(entry -> String.format("(%s -> %s)", entry.getKey(), entry.getValue())) .collect(joining(", ")) ); } /** * @return A map representation of the underlying features. The returned value can be converted * back to Features using one of the from*FeaturesMap() APIs of this class. */ public Map<String, Map<String, Short>> toMap() { return features.entrySet().stream().collect( Collectors.toMap( Map.Entry::getKey, entry -> entry.getValue().toMap())); } /** * An interface that defines behavior to convert from a Map to an object of type BaseVersionRange. */ private interface MapToBaseVersionRangeConverter<V extends BaseVersionRange> { /** * Convert the map representation of an object of type <V>, to an object of type <V>. * * @param baseVersionRangeMap the map representation of a BaseVersionRange object. * * @return the object of type <V> */ V fromMap(Map<String, Short> baseVersionRangeMap); } private static <V extends BaseVersionRange> Features<V> fromFeaturesMap( Map<String, Map<String, Short>> featuresMap, MapToBaseVersionRangeConverter<V> converter) { return new Features<>(featuresMap.entrySet().stream().collect( Collectors.toMap( Map.Entry::getKey, entry -> converter.fromMap(entry.getValue())))); } /** * Converts from a map to Features<SupportedVersionRange>. * * @param featuresMap the map representation of a Features<SupportedVersionRange> object, * generated using the toMap() API. * * @return the Features<SupportedVersionRange> object */ public static Features<SupportedVersionRange> fromSupportedFeaturesMap( Map<String, Map<String, Short>> featuresMap) { return fromFeaturesMap(featuresMap, SupportedVersionRange::fromMap); } @Override public boolean equals(Object other) { if (this == other) { return true; } if (!(other instanceof Features)) { return false; } final Features that = (Features) other; return Objects.equals(this.features, that.features); } @Override public int hashCode() { return Objects.hash(features); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/feature/SupportedVersionRange.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.feature; import java.util.Map; /** * An extended {@link BaseVersionRange} representing the min/max versions for a supported feature. */ public class SupportedVersionRange extends BaseVersionRange { // Label for the min version key, that's used only to convert to/from a map. private static final String MIN_VERSION_KEY_LABEL = "min_version"; // Label for the max version key, that's used only to convert to/from a map. private static final String MAX_VERSION_KEY_LABEL = "max_version"; public SupportedVersionRange(short minVersion, short maxVersion) { super(MIN_VERSION_KEY_LABEL, minVersion, MAX_VERSION_KEY_LABEL, maxVersion); } public SupportedVersionRange(short maxVersion) { this((short) 1, maxVersion); } public static SupportedVersionRange fromMap(Map<String, Short> versionRangeMap) { return new SupportedVersionRange( BaseVersionRange.valueOrThrow(MIN_VERSION_KEY_LABEL, versionRangeMap), BaseVersionRange.valueOrThrow(MAX_VERSION_KEY_LABEL, versionRangeMap)); } /** * Checks if the version level does *NOT* fall within the [min, max] range of this SupportedVersionRange. * * @param version the version to be checked * * @return - true, if the version levels are incompatible * - false otherwise */ public boolean isIncompatibleWith(short version) { return min() > version || max() < version; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/feature/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides classes for programmatically communicating support for logical features. * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong> */ package org.apache.kafka.common.feature;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/header/Header.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.header; public interface Header { String key(); byte[] value(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/header/Headers.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.header; public interface Headers extends Iterable<Header> { /** * Adds a header (key inside), to the end, returning if the operation succeeded. * * @param header the Header to be added * @return this instance of the Headers, once the header is added. * @throws IllegalStateException is thrown if headers are in a read-only state. */ Headers add(Header header) throws IllegalStateException; /** * Creates and adds a header, to the end, returning if the operation succeeded. * * @param key of the header to be added. * @param value of the header to be added. * @return this instance of the Headers, once the header is added. * @throws IllegalStateException is thrown if headers are in a read-only state. */ Headers add(String key, byte[] value) throws IllegalStateException; /** * Removes all headers for the given key returning if the operation succeeded. * * @param key to remove all headers for. * @return this instance of the Headers, once the header is removed. * @throws IllegalStateException is thrown if headers are in a read-only state. */ Headers remove(String key) throws IllegalStateException; /** * Returns just one (the very last) header for the given key, if present. * * @param key to get the last header for. * @return this last header matching the given key, returns none if not present. */ Header lastHeader(String key); /** * Returns all headers for the given key, in the order they were added in, if present. * * @param key to return the headers for. * @return all headers for the given key, in the order they were added in, if NO headers are present an empty iterable is returned. */ Iterable<Header> headers(String key); /** * Returns all headers as an array, in the order they were added in. * * @return the headers as a Header[], mutating this array will not affect the Headers, if NO headers are present an empty array is returned. */ Header[] toArray(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/header/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides API for application-defined metadata attached to Kafka records. */ package org.apache.kafka.common.header;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/header
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/header/internals/RecordHeader.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.header.internals; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Objects; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.utils.Utils; public class RecordHeader implements Header { private ByteBuffer keyBuffer; private String key; private ByteBuffer valueBuffer; private byte[] value; public RecordHeader(String key, byte[] value) { Objects.requireNonNull(key, "Null header keys are not permitted"); this.key = key; this.value = value; } public RecordHeader(ByteBuffer keyBuffer, ByteBuffer valueBuffer) { this.keyBuffer = Objects.requireNonNull(keyBuffer, "Null header keys are not permitted"); this.valueBuffer = valueBuffer; } public String key() { if (key == null) { key = Utils.utf8(keyBuffer, keyBuffer.remaining()); keyBuffer = null; } return key; } public byte[] value() { if (value == null && valueBuffer != null) { value = Utils.toArray(valueBuffer); valueBuffer = null; } return value; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; RecordHeader header = (RecordHeader) o; return Objects.equals(key(), header.key()) && Arrays.equals(value(), header.value()); } @Override public int hashCode() { int result = key().hashCode(); result = 31 * result + Arrays.hashCode(value()); return result; } @Override public String toString() { return "RecordHeader(key = " + key() + ", value = " + Arrays.toString(value()) + ")"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/header
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/header/internals/RecordHeaders.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.header.internals; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.record.Record; import org.apache.kafka.common.utils.AbstractIterator; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; import java.util.Objects; public class RecordHeaders implements Headers { private final List<Header> headers; private volatile boolean isReadOnly; public RecordHeaders() { this((Iterable<Header>) null); } public RecordHeaders(Header[] headers) { this(headers == null ? null : Arrays.asList(headers)); } public RecordHeaders(Iterable<Header> headers) { //Use efficient copy constructor if possible, fallback to iteration otherwise if (headers == null) { this.headers = new ArrayList<>(); } else if (headers instanceof RecordHeaders) { this.headers = new ArrayList<>(((RecordHeaders) headers).headers); } else { this.headers = new ArrayList<>(); for (Header header : headers) { Objects.requireNonNull(header, "Header cannot be null."); this.headers.add(header); } } } @Override public Headers add(Header header) throws IllegalStateException { Objects.requireNonNull(header, "Header cannot be null."); canWrite(); headers.add(header); return this; } @Override public Headers add(String key, byte[] value) throws IllegalStateException { return add(new RecordHeader(key, value)); } @Override public Headers remove(String key) throws IllegalStateException { canWrite(); checkKey(key); Iterator<Header> iterator = iterator(); while (iterator.hasNext()) { if (iterator.next().key().equals(key)) { iterator.remove(); } } return this; } @Override public Header lastHeader(String key) { checkKey(key); for (int i = headers.size() - 1; i >= 0; i--) { Header header = headers.get(i); if (header.key().equals(key)) { return header; } } return null; } @Override public Iterable<Header> headers(final String key) { checkKey(key); return () -> new FilterByKeyIterator(headers.iterator(), key); } @Override public Iterator<Header> iterator() { return closeAware(headers.iterator()); } public void setReadOnly() { this.isReadOnly = true; } public Header[] toArray() { return headers.isEmpty() ? Record.EMPTY_HEADERS : headers.toArray(new Header[0]); } private void checkKey(String key) { if (key == null) throw new IllegalArgumentException("key cannot be null."); } private void canWrite() { if (isReadOnly) throw new IllegalStateException("RecordHeaders has been closed."); } private Iterator<Header> closeAware(final Iterator<Header> original) { return new Iterator<Header>() { @Override public boolean hasNext() { return original.hasNext(); } public Header next() { return original.next(); } @Override public void remove() { canWrite(); original.remove(); } }; } @Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } RecordHeaders headers1 = (RecordHeaders) o; return Objects.equals(headers, headers1.headers); } @Override public int hashCode() { return headers != null ? headers.hashCode() : 0; } @Override public String toString() { return "RecordHeaders(" + "headers = " + headers + ", isReadOnly = " + isReadOnly + ')'; } private static final class FilterByKeyIterator extends AbstractIterator<Header> { private final Iterator<Header> original; private final String key; private FilterByKeyIterator(Iterator<Header> original, String key) { this.original = original; this.key = key; } protected Header makeNext() { while (true) { if (original.hasNext()) { Header header = original.next(); if (!header.key().equals(key)) continue; return header; } return this.allDone(); } } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/internals/ClusterResourceListeners.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.internals; import org.apache.kafka.common.ClusterResource; import org.apache.kafka.common.ClusterResourceListener; import java.util.ArrayList; import java.util.List; public class ClusterResourceListeners { private final List<ClusterResourceListener> clusterResourceListeners; public ClusterResourceListeners() { this.clusterResourceListeners = new ArrayList<>(); } /** * Add only if the candidate implements {@link ClusterResourceListener}. * @param candidate Object which might implement {@link ClusterResourceListener} */ public void maybeAdd(Object candidate) { if (candidate instanceof ClusterResourceListener) { clusterResourceListeners.add((ClusterResourceListener) candidate); } } /** * Add all items who implement {@link ClusterResourceListener} from the list. * @param candidateList List of objects which might implement {@link ClusterResourceListener} */ public void maybeAddAll(List<?> candidateList) { for (Object candidate : candidateList) { this.maybeAdd(candidate); } } /** * Send the updated cluster metadata to all {@link ClusterResourceListener}. * @param cluster Cluster metadata */ public void onUpdate(ClusterResource cluster) { for (ClusterResourceListener clusterResourceListener : clusterResourceListeners) { clusterResourceListener.onUpdate(cluster); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/internals/FatalExitError.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.internals; import org.apache.kafka.common.utils.Exit; /** * An error that indicates the need to exit the JVM process. This should only be used by the server or command-line * tools. Clients should never shutdown the JVM process. * * This exception is expected to be caught at the highest level of the thread so that no shared lock is held by * the thread when it calls {@link Exit#exit(int)}. */ public class FatalExitError extends Error { private final static long serialVersionUID = 1L; private final int statusCode; public FatalExitError(int statusCode) { if (statusCode == 0) throw new IllegalArgumentException("statusCode must not be 0"); this.statusCode = statusCode; } public FatalExitError() { this(1); } public int statusCode() { return statusCode; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/internals/KafkaCompletableFuture.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.internals; import java.util.concurrent.CompletableFuture; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; /** * This internal class exists because CompletableFuture exposes complete(), completeExceptionally() and * other methods which would allow erroneous completion by user code of a KafkaFuture returned from a * Kafka API to a client application. * @param <T> The type of the future value. */ public class KafkaCompletableFuture<T> extends CompletableFuture<T> { /** * Completes this future normally. For internal use by the Kafka clients, not by user code. * @param value the result value * @return {@code true} if this invocation caused this CompletableFuture * to transition to a completed state, else {@code false} */ boolean kafkaComplete(T value) { return super.complete(value); } /** * Completes this future exceptionally. For internal use by the Kafka clients, not by user code. * @param throwable the exception. * @return {@code true} if this invocation caused this CompletableFuture * to transition to a completed state, else {@code false} */ boolean kafkaCompleteExceptionally(Throwable throwable) { return super.completeExceptionally(throwable); } @Override public boolean complete(T value) { throw erroneousCompletionException(); } @Override public boolean completeExceptionally(Throwable ex) { throw erroneousCompletionException(); } @Override public void obtrudeValue(T value) { throw erroneousCompletionException(); } @Override public void obtrudeException(Throwable ex) { throw erroneousCompletionException(); } //@Override // enable once Kafka no longer supports Java 8 public <U> CompletableFuture<U> newIncompleteFuture() { return new KafkaCompletableFuture<>(); } //@Override // enable once Kafka no longer supports Java 8 public CompletableFuture<T> completeAsync(Supplier<? extends T> supplier, Executor executor) { throw erroneousCompletionException(); } //@Override // enable once Kafka no longer supports Java 8 public CompletableFuture<T> completeAsync(Supplier<? extends T> supplier) { throw erroneousCompletionException(); } //@Override // enable once Kafka no longer supports Java 8 public CompletableFuture<T> completeOnTimeout(T value, long timeout, TimeUnit unit) { throw erroneousCompletionException(); } private UnsupportedOperationException erroneousCompletionException() { return new UnsupportedOperationException("User code should not complete futures returned from Kafka clients"); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/internals/KafkaFutureImpl.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.internals; import java.util.concurrent.CancellationException; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionException; import java.util.concurrent.CompletionStage; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import org.apache.kafka.common.KafkaFuture; /** * A flexible future which supports call chaining and other asynchronous programming patterns. */ public class KafkaFutureImpl<T> extends KafkaFuture<T> { private final KafkaCompletableFuture<T> completableFuture; private final boolean isDependant; public KafkaFutureImpl() { this(false, new KafkaCompletableFuture<>()); } private KafkaFutureImpl(boolean isDependant, KafkaCompletableFuture<T> completableFuture) { this.isDependant = isDependant; this.completableFuture = completableFuture; } @Override public CompletionStage<T> toCompletionStage() { return completableFuture; } /** * Returns a new KafkaFuture that, when this future completes normally, is executed with this * futures's result as the argument to the supplied function. */ @Override public <R> KafkaFuture<R> thenApply(BaseFunction<T, R> function) { CompletableFuture<R> appliedFuture = completableFuture.thenApply(value -> { try { return function.apply(value); } catch (Throwable t) { if (t instanceof CompletionException) { // KafkaFuture#thenApply, when the function threw CompletionException should return // an ExecutionException wrapping a CompletionException wrapping the exception thrown by the // function. CompletableFuture#thenApply will just return ExecutionException wrapping the // exception thrown by the function, so we add an extra CompletionException here to // maintain the KafkaFuture behaviour. throw new CompletionException(t); } else { throw t; } } }); return new KafkaFutureImpl<>(true, toKafkaCompletableFuture(appliedFuture)); } private static <U> KafkaCompletableFuture<U> toKafkaCompletableFuture(CompletableFuture<U> completableFuture) { if (completableFuture instanceof KafkaCompletableFuture) { return (KafkaCompletableFuture<U>) completableFuture; } else { final KafkaCompletableFuture<U> result = new KafkaCompletableFuture<>(); completableFuture.whenComplete((x, y) -> { if (y != null) { result.kafkaCompleteExceptionally(y); } else { result.kafkaComplete(x); } }); return result; } } /** * @see KafkaFutureImpl#thenApply(BaseFunction) * @deprecated Since Kafka 3.0. */ @Deprecated @Override public <R> KafkaFuture<R> thenApply(Function<T, R> function) { return thenApply((BaseFunction<T, R>) function); } @Override public KafkaFuture<T> whenComplete(final BiConsumer<? super T, ? super Throwable> biConsumer) { CompletableFuture<T> tCompletableFuture = completableFuture.whenComplete((java.util.function.BiConsumer<? super T, ? super Throwable>) (a, b) -> { try { biConsumer.accept(a, b); } catch (Throwable t) { if (t instanceof CompletionException) { throw new CompletionException(t); } else { throw t; } } }); return new KafkaFutureImpl<>(true, toKafkaCompletableFuture(tCompletableFuture)); } @Override public boolean complete(T newValue) { return completableFuture.kafkaComplete(newValue); } @Override public boolean completeExceptionally(Throwable newException) { // CompletableFuture#get() always wraps the _cause_ of a CompletionException in ExecutionException // (which KafkaFuture does not) so wrap CompletionException in an extra one to avoid losing the // first CompletionException in the exception chain. return completableFuture.kafkaCompleteExceptionally( newException instanceof CompletionException ? new CompletionException(newException) : newException); } /** * If not already completed, completes this future with a CancellationException. Dependent * futures that have not already completed will also complete exceptionally, with a * CompletionException caused by this CancellationException. */ @Override public boolean cancel(boolean mayInterruptIfRunning) { return completableFuture.cancel(mayInterruptIfRunning); } /** * We need to deal with differences between KafkaFuture's historic API and the API of CompletableFuture: * CompletableFuture#get() does not wrap CancellationException in ExecutionException (nor does KafkaFuture). * CompletableFuture#get() always wraps the _cause_ of a CompletionException in ExecutionException * (which KafkaFuture does not). * * The semantics for KafkaFuture are that all exceptional completions of the future (via #completeExceptionally() * or exceptions from dependants) manifest as ExecutionException, as observed via both get() and getNow(). */ private void maybeThrowCancellationException(Throwable cause) { if (cause instanceof CancellationException) { throw (CancellationException) cause; } } /** * Waits if necessary for this future to complete, and then returns its result. */ @Override public T get() throws InterruptedException, ExecutionException { try { return completableFuture.get(); } catch (ExecutionException e) { maybeThrowCancellationException(e.getCause()); throw e; } } /** * Waits if necessary for at most the given time for this future to complete, and then returns * its result, if available. */ @Override public T get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { try { return completableFuture.get(timeout, unit); } catch (ExecutionException e) { maybeThrowCancellationException(e.getCause()); throw e; } } /** * Returns the result value (or throws any encountered exception) if completed, else returns * the given valueIfAbsent. */ @Override public T getNow(T valueIfAbsent) throws ExecutionException { try { return completableFuture.getNow(valueIfAbsent); } catch (CompletionException e) { maybeThrowCancellationException(e.getCause()); // Note, unlike CompletableFuture#get() which throws ExecutionException, CompletableFuture#getNow() // throws CompletionException, thus needs rewrapping to conform to KafkaFuture API, // where KafkaFuture#getNow() throws ExecutionException. throw new ExecutionException(e.getCause()); } } /** * Returns true if this CompletableFuture was cancelled before it completed normally. */ @Override public boolean isCancelled() { if (isDependant) { // Having isCancelled() for a dependent future just return // CompletableFuture.isCancelled() would break the historical KafkaFuture behaviour because // CompletableFuture#isCancelled() just checks for the exception being CancellationException // whereas it will be a CompletionException wrapping a CancellationException // due needing to compensate for CompletableFuture's CompletionException unwrapping // shenanigans in other methods. try { completableFuture.getNow(null); return false; } catch (Exception e) { return e instanceof CompletionException && e.getCause() instanceof CancellationException; } } else { return completableFuture.isCancelled(); } } /** * Returns true if this CompletableFuture completed exceptionally, in any way. */ @Override public boolean isCompletedExceptionally() { return completableFuture.isCompletedExceptionally(); } /** * Returns true if completed in any fashion: normally, exceptionally, or via cancellation. */ @Override public boolean isDone() { return completableFuture.isDone(); } @Override public String toString() { T value = null; Throwable exception = null; try { value = completableFuture.getNow(null); } catch (CompletionException e) { exception = e.getCause(); } catch (Exception e) { exception = e; } return String.format("KafkaFuture{value=%s,exception=%s,done=%b}", value, exception, exception != null || value != null); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/internals/PartitionStates.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.internals; import org.apache.kafka.common.TopicPartition; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; import java.util.function.BiConsumer; /** * This class is a useful building block for doing fetch requests where topic partitions have to be rotated via * round-robin to ensure fairness and some level of determinism given the existence of a limit on the fetch response * size. Because the serialization of fetch requests is more efficient if all partitions for the same topic are grouped * together, we do such grouping in the method `set`. * * As partitions are moved to the end, the same topic may be repeated more than once. In the optimal case, a single * topic would "wrap around" and appear twice. However, as partitions are fetched in different orders and partition * leadership changes, we will deviate from the optimal. If this turns out to be an issue in practice, we can improve * it by tracking the partitions per node or calling `set` every so often. * * Note that this class is not thread-safe with the exception of {@link #size()} which returns the number of * partitions currently tracked. */ public class PartitionStates<S> { private final LinkedHashMap<TopicPartition, S> map = new LinkedHashMap<>(); private final Set<TopicPartition> partitionSetView = Collections.unmodifiableSet(map.keySet()); /* the number of partitions that are currently assigned available in a thread safe manner */ private volatile int size = 0; public PartitionStates() {} public void moveToEnd(TopicPartition topicPartition) { S state = map.remove(topicPartition); if (state != null) map.put(topicPartition, state); } public void updateAndMoveToEnd(TopicPartition topicPartition, S state) { map.remove(topicPartition); map.put(topicPartition, state); updateSize(); } public void update(TopicPartition topicPartition, S state) { map.put(topicPartition, state); updateSize(); } public void remove(TopicPartition topicPartition) { map.remove(topicPartition); updateSize(); } /** * Returns an unmodifiable view of the partitions in random order. * changes to this PartitionStates instance will be reflected in this view. */ public Set<TopicPartition> partitionSet() { return partitionSetView; } public void clear() { map.clear(); updateSize(); } public boolean contains(TopicPartition topicPartition) { return map.containsKey(topicPartition); } public Iterator<S> stateIterator() { return map.values().iterator(); } public void forEach(BiConsumer<TopicPartition, S> biConsumer) { map.forEach(biConsumer); } public Map<TopicPartition, S> partitionStateMap() { return Collections.unmodifiableMap(map); } /** * Returns the partition state values in order. */ public List<S> partitionStateValues() { return new ArrayList<>(map.values()); } public S stateValue(TopicPartition topicPartition) { return map.get(topicPartition); } /** * Get the number of partitions that are currently being tracked. This is thread-safe. */ public int size() { return size; } /** * Update the builder to have the received map as its state (i.e. the previous state is cleared). The builder will * "batch by topic", so if we have a, b and c, each with two partitions, we may end up with something like the * following (the order of topics and partitions within topics is dependent on the iteration order of the received * map): a0, a1, b1, b0, c0, c1. */ public void set(Map<TopicPartition, S> partitionToState) { map.clear(); update(partitionToState); updateSize(); } private void updateSize() { size = map.size(); } private void update(Map<TopicPartition, S> partitionToState) { LinkedHashMap<String, List<TopicPartition>> topicToPartitions = new LinkedHashMap<>(); for (TopicPartition tp : partitionToState.keySet()) { List<TopicPartition> partitions = topicToPartitions.computeIfAbsent(tp.topic(), k -> new ArrayList<>()); partitions.add(tp); } for (Map.Entry<String, List<TopicPartition>> entry : topicToPartitions.entrySet()) { for (TopicPartition tp : entry.getValue()) { S state = partitionToState.get(tp); map.put(tp, state); } } } public static class PartitionState<S> { private final TopicPartition topicPartition; private final S value; public PartitionState(TopicPartition topicPartition, S state) { this.topicPartition = Objects.requireNonNull(topicPartition); this.value = Objects.requireNonNull(state); } public S value() { return value; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; PartitionState<?> that = (PartitionState<?>) o; return topicPartition.equals(that.topicPartition) && value.equals(that.value); } @Override public int hashCode() { int result = topicPartition.hashCode(); result = 31 * result + value.hashCode(); return result; } public TopicPartition topicPartition() { return topicPartition; } @Override public String toString() { return "PartitionState(" + topicPartition + "=" + value + ')'; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/internals/Topic.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.internals; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.utils.Utils; import java.util.Collections; import java.util.Set; import java.util.function.Consumer; public class Topic { public static final String GROUP_METADATA_TOPIC_NAME = "__consumer_offsets"; public static final String TRANSACTION_STATE_TOPIC_NAME = "__transaction_state"; public static final String CLUSTER_METADATA_TOPIC_NAME = "__cluster_metadata"; public static final TopicPartition CLUSTER_METADATA_TOPIC_PARTITION = new TopicPartition( CLUSTER_METADATA_TOPIC_NAME, 0 ); public static final String LEGAL_CHARS = "[a-zA-Z0-9._-]"; private static final Set<String> INTERNAL_TOPICS = Collections.unmodifiableSet( Utils.mkSet(GROUP_METADATA_TOPIC_NAME, TRANSACTION_STATE_TOPIC_NAME)); private static final int MAX_NAME_LENGTH = 249; public static void validate(String topic) { validate(topic, "Topic name", message -> { throw new InvalidTopicException(message); }); } private static String detectInvalidTopic(String name) { if (name.isEmpty()) return "the empty string is not allowed"; if (".".equals(name)) return "'.' is not allowed"; if ("..".equals(name)) return "'..' is not allowed"; if (name.length() > MAX_NAME_LENGTH) return "the length of '" + name + "' is longer than the max allowed length " + MAX_NAME_LENGTH; if (!containsValidPattern(name)) return "'" + name + "' contains one or more characters other than " + "ASCII alphanumerics, '.', '_' and '-'"; return null; } public static boolean isValid(String name) { String reasonInvalid = detectInvalidTopic(name); return reasonInvalid == null; } public static void validate(String name, String logPrefix, Consumer<String> throwableConsumer) { String reasonInvalid = detectInvalidTopic(name); if (reasonInvalid != null) { throwableConsumer.accept(logPrefix + " is invalid: " + reasonInvalid); } } public static boolean isInternal(String topic) { return INTERNAL_TOPICS.contains(topic); } /** * Due to limitations in metric names, topics with a period ('.') or underscore ('_') could collide. * * @param topic The topic to check for colliding character * @return true if the topic has collision characters */ public static boolean hasCollisionChars(String topic) { return topic.contains("_") || topic.contains("."); } /** * Unify topic name with a period ('.') or underscore ('_'), this is only used to check collision and will not * be used to really change topic name. * * @param topic A topic to unify * @return A unified topic name */ public static String unifyCollisionChars(String topic) { return topic.replace('.', '_'); } /** * Returns true if the topicNames collide due to a period ('.') or underscore ('_') in the same position. * * @param topicA A topic to check for collision * @param topicB A topic to check for collision * @return true if the topics collide */ public static boolean hasCollision(String topicA, String topicB) { return unifyCollisionChars(topicA).equals(unifyCollisionChars(topicB)); } /** * Valid characters for Kafka topics are the ASCII alphanumerics, '.', '_', and '-' */ static boolean containsValidPattern(String topic) { for (int i = 0; i < topic.length(); ++i) { char c = topic.charAt(i); // We don't use Character.isLetterOrDigit(c) because it's slower boolean validChar = (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || c == '.' || c == '_' || c == '-'; if (!validChar) return false; } return true; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/memory/GarbageCollectedMemoryPool.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.memory; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.utils.Utils; import java.lang.ref.ReferenceQueue; import java.lang.ref.WeakReference; import java.nio.ByteBuffer; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; /** * An extension of SimpleMemoryPool that tracks allocated buffers and logs an error when they "leak" * (when they are garbage-collected without having been release()ed). * THIS IMPLEMENTATION IS A DEVELOPMENT/DEBUGGING AID AND IS NOT MEANT PRO PRODUCTION USE. */ public class GarbageCollectedMemoryPool extends SimpleMemoryPool implements AutoCloseable { private final ReferenceQueue<ByteBuffer> garbageCollectedBuffers = new ReferenceQueue<>(); //serves 2 purposes - 1st it maintains the ref objects reachable (which is a requirement for them //to ever be enqueued), 2nd keeps some (small) metadata for every buffer allocated private final Map<BufferReference, BufferMetadata> buffersInFlight = new ConcurrentHashMap<>(); private final GarbageCollectionListener gcListener = new GarbageCollectionListener(); private final Thread gcListenerThread; private volatile boolean alive = true; public GarbageCollectedMemoryPool(long sizeBytes, int maxSingleAllocationSize, boolean strict, Sensor oomPeriodSensor) { super(sizeBytes, maxSingleAllocationSize, strict, oomPeriodSensor); this.alive = true; this.gcListenerThread = new Thread(gcListener, "memory pool GC listener"); this.gcListenerThread.setDaemon(true); //so we dont need to worry about shutdown this.gcListenerThread.start(); } @Override protected void bufferToBeReturned(ByteBuffer justAllocated) { BufferReference ref = new BufferReference(justAllocated, garbageCollectedBuffers); BufferMetadata metadata = new BufferMetadata(justAllocated.capacity()); if (buffersInFlight.put(ref, metadata) != null) //this is a bug. it means either 2 different co-existing buffers got //the same identity or we failed to register a released/GC'ed buffer throw new IllegalStateException("allocated buffer identity " + ref.hashCode + " already registered as in use?!"); log.trace("allocated buffer of size {} and identity {}", sizeBytes, ref.hashCode); } @Override protected void bufferToBeReleased(ByteBuffer justReleased) { BufferReference ref = new BufferReference(justReleased); //used ro lookup only BufferMetadata metadata = buffersInFlight.remove(ref); if (metadata == null) //its impossible for the buffer to have already been GC'ed (because we have a hard ref to it //in the function arg) so this means either a double free or not our buffer. throw new IllegalArgumentException("returned buffer " + ref.hashCode + " was never allocated by this pool"); if (metadata.sizeBytes != justReleased.capacity()) { //this is a bug throw new IllegalStateException("buffer " + ref.hashCode + " has capacity " + justReleased.capacity() + " but recorded as " + metadata.sizeBytes); } log.trace("released buffer of size {} and identity {}", metadata.sizeBytes, ref.hashCode); } @Override public void close() { alive = false; gcListenerThread.interrupt(); } private class GarbageCollectionListener implements Runnable { @Override public void run() { while (alive) { try { BufferReference ref = (BufferReference) garbageCollectedBuffers.remove(); //blocks ref.clear(); //this cannot race with a release() call because an object is either reachable or not, //release() can only happen before its GC'ed, and enqueue can only happen after. //if the ref was enqueued it must then not have been released BufferMetadata metadata = buffersInFlight.remove(ref); if (metadata == null) { //it can happen rarely that the buffer was release()ed properly (so no metadata) and yet //the reference object to it remains reachable for a short period of time after release() //and hence gets enqueued. this is because we keep refs in a ConcurrentHashMap which cleans //up keys lazily. continue; } availableMemory.addAndGet(metadata.sizeBytes); log.error("Reclaimed buffer of size {} and identity {} that was not properly release()ed. This is a bug.", metadata.sizeBytes, ref.hashCode); } catch (InterruptedException e) { log.debug("interrupted", e); //ignore, we're a daemon thread } } log.info("GC listener shutting down"); } } private static final class BufferMetadata { private final int sizeBytes; private BufferMetadata(int sizeBytes) { this.sizeBytes = sizeBytes; } } private static final class BufferReference extends WeakReference<ByteBuffer> { private final int hashCode; private BufferReference(ByteBuffer referent) { //used for lookup purposes only - no queue required. this(referent, null); } private BufferReference(ByteBuffer referent, ReferenceQueue<? super ByteBuffer> q) { super(referent, q); hashCode = System.identityHashCode(referent); } @Override public boolean equals(Object o) { if (this == o) { //this is important to find leaked buffers (by ref identity) return true; } if (o == null || getClass() != o.getClass()) { return false; } BufferReference that = (BufferReference) o; if (hashCode != that.hashCode) { return false; } ByteBuffer thisBuf = get(); if (thisBuf == null) { //our buffer has already been GC'ed, yet "that" is not us. so not same buffer return false; } ByteBuffer thatBuf = that.get(); return thisBuf == thatBuf; } @Override public int hashCode() { return hashCode; } } @Override public String toString() { long allocated = sizeBytes - availableMemory.get(); return "GarbageCollectedMemoryPool{" + Utils.formatBytes(allocated) + "/" + Utils.formatBytes(sizeBytes) + " used in " + buffersInFlight.size() + " buffers}"; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/memory/MemoryPool.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.memory; import java.nio.ByteBuffer; /** * A common memory pool interface for non-blocking pools. * Every buffer returned from {@link #tryAllocate(int)} must always be {@link #release(ByteBuffer) released}. */ public interface MemoryPool { MemoryPool NONE = new MemoryPool() { @Override public ByteBuffer tryAllocate(int sizeBytes) { return ByteBuffer.allocate(sizeBytes); } @Override public void release(ByteBuffer previouslyAllocated) { //nop } @Override public long size() { return Long.MAX_VALUE; } @Override public long availableMemory() { return Long.MAX_VALUE; } @Override public boolean isOutOfMemory() { return false; } @Override public String toString() { return "NONE"; } }; /** * Tries to acquire a ByteBuffer of the specified size * @param sizeBytes size required * @return a ByteBuffer (which later needs to be release()ed), or null if no memory available. * the buffer will be of the exact size requested, even if backed by a larger chunk of memory */ ByteBuffer tryAllocate(int sizeBytes); /** * Returns a previously allocated buffer to the pool. * @param previouslyAllocated a buffer previously returned from tryAllocate() */ void release(ByteBuffer previouslyAllocated); /** * Returns the total size of this pool * @return total size, in bytes */ long size(); /** * Returns the amount of memory available for allocation by this pool. * NOTE: result may be negative (pools may over allocate to avoid starvation issues) * @return bytes available */ long availableMemory(); /** * Returns true if the pool cannot currently allocate any more buffers * - meaning total outstanding buffers meets or exceeds pool size and * some would need to be released before further allocations are possible. * * This is equivalent to availableMemory() <= 0 * @return true if out of memory */ boolean isOutOfMemory(); }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/memory/SimpleMemoryPool.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.common.memory; import java.nio.ByteBuffer; import java.util.concurrent.atomic.AtomicLong; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * a simple pool implementation. this implementation just provides a limit on the total outstanding memory. * any buffer allocated must be release()ed always otherwise memory is not marked as reclaimed (and "leak"s) */ public class SimpleMemoryPool implements MemoryPool { protected final Logger log = LoggerFactory.getLogger(getClass()); //subclass-friendly protected final long sizeBytes; protected final boolean strict; protected final AtomicLong availableMemory; protected final int maxSingleAllocationSize; protected final AtomicLong startOfNoMemPeriod = new AtomicLong(); //nanoseconds protected volatile Sensor oomTimeSensor; public SimpleMemoryPool(long sizeInBytes, int maxSingleAllocationBytes, boolean strict, Sensor oomPeriodSensor) { if (sizeInBytes <= 0 || maxSingleAllocationBytes <= 0 || maxSingleAllocationBytes > sizeInBytes) throw new IllegalArgumentException("must provide a positive size and max single allocation size smaller than size." + "provided " + sizeInBytes + " and " + maxSingleAllocationBytes + " respectively"); this.sizeBytes = sizeInBytes; this.strict = strict; this.availableMemory = new AtomicLong(sizeInBytes); this.maxSingleAllocationSize = maxSingleAllocationBytes; this.oomTimeSensor = oomPeriodSensor; } @Override public ByteBuffer tryAllocate(int sizeBytes) { if (sizeBytes < 1) throw new IllegalArgumentException("requested size " + sizeBytes + "<=0"); if (sizeBytes > maxSingleAllocationSize) throw new IllegalArgumentException("requested size " + sizeBytes + " is larger than maxSingleAllocationSize " + maxSingleAllocationSize); long available; boolean success = false; //in strict mode we will only allocate memory if we have at least the size required. //in non-strict mode we will allocate memory if we have _any_ memory available (so available memory //can dip into the negative and max allocated memory would be sizeBytes + maxSingleAllocationSize) long threshold = strict ? sizeBytes : 1; while ((available = availableMemory.get()) >= threshold) { success = availableMemory.compareAndSet(available, available - sizeBytes); if (success) break; } if (success) { maybeRecordEndOfDrySpell(); } else { if (oomTimeSensor != null) { startOfNoMemPeriod.compareAndSet(0, System.nanoTime()); } log.trace("refused to allocate buffer of size {}", sizeBytes); return null; } ByteBuffer allocated = ByteBuffer.allocate(sizeBytes); bufferToBeReturned(allocated); return allocated; } @Override public void release(ByteBuffer previouslyAllocated) { if (previouslyAllocated == null) throw new IllegalArgumentException("provided null buffer"); bufferToBeReleased(previouslyAllocated); availableMemory.addAndGet(previouslyAllocated.capacity()); maybeRecordEndOfDrySpell(); } @Override public long size() { return sizeBytes; } @Override public long availableMemory() { return availableMemory.get(); } @Override public boolean isOutOfMemory() { return availableMemory.get() <= 0; } //allows subclasses to do their own bookkeeping (and validation) _before_ memory is returned to client code. protected void bufferToBeReturned(ByteBuffer justAllocated) { log.trace("allocated buffer of size {} ", justAllocated.capacity()); } //allows subclasses to do their own bookkeeping (and validation) _before_ memory is marked as reclaimed. protected void bufferToBeReleased(ByteBuffer justReleased) { log.trace("released buffer of size {}", justReleased.capacity()); } @Override public String toString() { long allocated = sizeBytes - availableMemory.get(); return "SimpleMemoryPool{" + Utils.formatBytes(allocated) + "/" + Utils.formatBytes(sizeBytes) + " used}"; } protected void maybeRecordEndOfDrySpell() { if (oomTimeSensor != null) { long startOfDrySpell = startOfNoMemPeriod.getAndSet(0); if (startOfDrySpell != 0) { //how long were we refusing allocation requests for oomTimeSensor.record((System.nanoTime() - startOfDrySpell) / 1000000.0); //fractional (double) millis } } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/memory/package-info.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Provides mechanisms for explicitly managing memory used by an application. * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong> */ package org.apache.kafka.common.memory;
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AddOffsetsToTxnRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AddOffsetsToTxnRequestData implements ApiMessage { String transactionalId; long producerId; short producerEpoch; String groupId; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("transactional_id", Type.STRING, "The transactional id corresponding to the transaction."), new Field("producer_id", Type.INT64, "Current producer id in use by the transactional id."), new Field("producer_epoch", Type.INT16, "Current epoch associated with the producer id."), new Field("group_id", Type.STRING, "The unique group identifier.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("transactional_id", Type.COMPACT_STRING, "The transactional id corresponding to the transaction."), new Field("producer_id", Type.INT64, "Current producer id in use by the transactional id."), new Field("producer_epoch", Type.INT16, "Current epoch associated with the producer id."), new Field("group_id", Type.COMPACT_STRING, "The unique group identifier."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public AddOffsetsToTxnRequestData(Readable _readable, short _version) { read(_readable, _version); } public AddOffsetsToTxnRequestData() { this.transactionalId = ""; this.producerId = 0L; this.producerEpoch = (short) 0; this.groupId = ""; } @Override public short apiKey() { return 25; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field transactionalId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field transactionalId had invalid length " + length); } else { this.transactionalId = _readable.readString(length); } } this.producerId = _readable.readLong(); this.producerEpoch = _readable.readShort(); { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field groupId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field groupId had invalid length " + length); } else { this.groupId = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(transactionalId); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeLong(producerId); _writable.writeShort(producerEpoch); { byte[] _stringBytes = _cache.getSerializedValue(groupId); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'transactionalId' field is too long to be serialized"); } _cache.cacheSerializedValue(transactionalId, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(8); _size.addBytes(2); { byte[] _stringBytes = groupId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'groupId' field is too long to be serialized"); } _cache.cacheSerializedValue(groupId, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AddOffsetsToTxnRequestData)) return false; AddOffsetsToTxnRequestData other = (AddOffsetsToTxnRequestData) obj; if (this.transactionalId == null) { if (other.transactionalId != null) return false; } else { if (!this.transactionalId.equals(other.transactionalId)) return false; } if (producerId != other.producerId) return false; if (producerEpoch != other.producerEpoch) return false; if (this.groupId == null) { if (other.groupId != null) return false; } else { if (!this.groupId.equals(other.groupId)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode()); hashCode = 31 * hashCode + ((int) (producerId >> 32) ^ (int) producerId); hashCode = 31 * hashCode + producerEpoch; hashCode = 31 * hashCode + (groupId == null ? 0 : groupId.hashCode()); return hashCode; } @Override public AddOffsetsToTxnRequestData duplicate() { AddOffsetsToTxnRequestData _duplicate = new AddOffsetsToTxnRequestData(); _duplicate.transactionalId = transactionalId; _duplicate.producerId = producerId; _duplicate.producerEpoch = producerEpoch; _duplicate.groupId = groupId; return _duplicate; } @Override public String toString() { return "AddOffsetsToTxnRequestData(" + "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'") + ", producerId=" + producerId + ", producerEpoch=" + producerEpoch + ", groupId=" + ((groupId == null) ? "null" : "'" + groupId.toString() + "'") + ")"; } public String transactionalId() { return this.transactionalId; } public long producerId() { return this.producerId; } public short producerEpoch() { return this.producerEpoch; } public String groupId() { return this.groupId; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AddOffsetsToTxnRequestData setTransactionalId(String v) { this.transactionalId = v; return this; } public AddOffsetsToTxnRequestData setProducerId(long v) { this.producerId = v; return this; } public AddOffsetsToTxnRequestData setProducerEpoch(short v) { this.producerEpoch = v; return this; } public AddOffsetsToTxnRequestData setGroupId(String v) { this.groupId = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AddOffsetsToTxnRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AddOffsetsToTxnRequestData.*; public class AddOffsetsToTxnRequestDataJsonConverter { public static AddOffsetsToTxnRequestData read(JsonNode _node, short _version) { AddOffsetsToTxnRequestData _object = new AddOffsetsToTxnRequestData(); JsonNode _transactionalIdNode = _node.get("transactionalId"); if (_transactionalIdNode == null) { throw new RuntimeException("AddOffsetsToTxnRequestData: unable to locate field 'transactionalId', which is mandatory in version " + _version); } else { if (!_transactionalIdNode.isTextual()) { throw new RuntimeException("AddOffsetsToTxnRequestData expected a string type, but got " + _node.getNodeType()); } _object.transactionalId = _transactionalIdNode.asText(); } JsonNode _producerIdNode = _node.get("producerId"); if (_producerIdNode == null) { throw new RuntimeException("AddOffsetsToTxnRequestData: unable to locate field 'producerId', which is mandatory in version " + _version); } else { _object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "AddOffsetsToTxnRequestData"); } JsonNode _producerEpochNode = _node.get("producerEpoch"); if (_producerEpochNode == null) { throw new RuntimeException("AddOffsetsToTxnRequestData: unable to locate field 'producerEpoch', which is mandatory in version " + _version); } else { _object.producerEpoch = MessageUtil.jsonNodeToShort(_producerEpochNode, "AddOffsetsToTxnRequestData"); } JsonNode _groupIdNode = _node.get("groupId"); if (_groupIdNode == null) { throw new RuntimeException("AddOffsetsToTxnRequestData: unable to locate field 'groupId', which is mandatory in version " + _version); } else { if (!_groupIdNode.isTextual()) { throw new RuntimeException("AddOffsetsToTxnRequestData expected a string type, but got " + _node.getNodeType()); } _object.groupId = _groupIdNode.asText(); } return _object; } public static JsonNode write(AddOffsetsToTxnRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("transactionalId", new TextNode(_object.transactionalId)); _node.set("producerId", new LongNode(_object.producerId)); _node.set("producerEpoch", new ShortNode(_object.producerEpoch)); _node.set("groupId", new TextNode(_object.groupId)); return _node; } public static JsonNode write(AddOffsetsToTxnRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AddOffsetsToTxnResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AddOffsetsToTxnResponseData implements ApiMessage { int throttleTimeMs; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The response error code, or 0 if there was no error.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The response error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public AddOffsetsToTxnResponseData(Readable _readable, short _version) { read(_readable, _version); } public AddOffsetsToTxnResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; } @Override public short apiKey() { return 25; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AddOffsetsToTxnResponseData)) return false; AddOffsetsToTxnResponseData other = (AddOffsetsToTxnResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public AddOffsetsToTxnResponseData duplicate() { AddOffsetsToTxnResponseData _duplicate = new AddOffsetsToTxnResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "AddOffsetsToTxnResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AddOffsetsToTxnResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public AddOffsetsToTxnResponseData setErrorCode(short v) { this.errorCode = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AddOffsetsToTxnResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AddOffsetsToTxnResponseData.*; public class AddOffsetsToTxnResponseDataJsonConverter { public static AddOffsetsToTxnResponseData read(JsonNode _node, short _version) { AddOffsetsToTxnResponseData _object = new AddOffsetsToTxnResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("AddOffsetsToTxnResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "AddOffsetsToTxnResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("AddOffsetsToTxnResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "AddOffsetsToTxnResponseData"); } return _object; } public static JsonNode write(AddOffsetsToTxnResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(AddOffsetsToTxnResponseData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AddPartitionsToTxnRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AddPartitionsToTxnRequestData implements ApiMessage { AddPartitionsToTxnTransactionCollection transactions; String v3AndBelowTransactionalId; long v3AndBelowProducerId; short v3AndBelowProducerEpoch; AddPartitionsToTxnTopicCollection v3AndBelowTopics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("v3_and_below_transactional_id", Type.STRING, "The transactional id corresponding to the transaction."), new Field("v3_and_below_producer_id", Type.INT64, "Current producer id in use by the transactional id."), new Field("v3_and_below_producer_epoch", Type.INT16, "Current epoch associated with the producer id."), new Field("v3_and_below_topics", new ArrayOf(AddPartitionsToTxnTopic.SCHEMA_0), "The partitions to add to the transaction.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("v3_and_below_transactional_id", Type.COMPACT_STRING, "The transactional id corresponding to the transaction."), new Field("v3_and_below_producer_id", Type.INT64, "Current producer id in use by the transactional id."), new Field("v3_and_below_producer_epoch", Type.INT16, "Current epoch associated with the producer id."), new Field("v3_and_below_topics", new CompactArrayOf(AddPartitionsToTxnTopic.SCHEMA_3), "The partitions to add to the transaction."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = new Schema( new Field("transactions", new CompactArrayOf(AddPartitionsToTxnTransaction.SCHEMA_4), "List of transactions to add partitions to."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public AddPartitionsToTxnRequestData(Readable _readable, short _version) { read(_readable, _version); } public AddPartitionsToTxnRequestData() { this.transactions = new AddPartitionsToTxnTransactionCollection(0); this.v3AndBelowTransactionalId = ""; this.v3AndBelowProducerId = 0L; this.v3AndBelowProducerEpoch = (short) 0; this.v3AndBelowTopics = new AddPartitionsToTxnTopicCollection(0); } @Override public short apiKey() { return 24; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field transactions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnTransactionCollection newCollection = new AddPartitionsToTxnTransactionCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnTransaction(_readable, _version)); } this.transactions = newCollection; } } else { this.transactions = new AddPartitionsToTxnTransactionCollection(0); } if (_version <= 3) { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field v3AndBelowTransactionalId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field v3AndBelowTransactionalId had invalid length " + length); } else { this.v3AndBelowTransactionalId = _readable.readString(length); } } else { this.v3AndBelowTransactionalId = ""; } if (_version <= 3) { this.v3AndBelowProducerId = _readable.readLong(); } else { this.v3AndBelowProducerId = 0L; } if (_version <= 3) { this.v3AndBelowProducerEpoch = _readable.readShort(); } else { this.v3AndBelowProducerEpoch = (short) 0; } if (_version <= 3) { if (_version >= 3) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field v3AndBelowTopics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnTopicCollection newCollection = new AddPartitionsToTxnTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnTopic(_readable, _version)); } this.v3AndBelowTopics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field v3AndBelowTopics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnTopicCollection newCollection = new AddPartitionsToTxnTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnTopic(_readable, _version)); } this.v3AndBelowTopics = newCollection; } } } else { this.v3AndBelowTopics = new AddPartitionsToTxnTopicCollection(0); } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 4) { _writable.writeUnsignedVarint(transactions.size() + 1); for (AddPartitionsToTxnTransaction transactionsElement : transactions) { transactionsElement.write(_writable, _cache, _version); } } else { if (!this.transactions.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default transactions at version " + _version); } } if (_version <= 3) { { byte[] _stringBytes = _cache.getSerializedValue(v3AndBelowTransactionalId); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } } else { if (!this.v3AndBelowTransactionalId.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default v3AndBelowTransactionalId at version " + _version); } } if (_version <= 3) { _writable.writeLong(v3AndBelowProducerId); } else { if (this.v3AndBelowProducerId != 0L) { throw new UnsupportedVersionException("Attempted to write a non-default v3AndBelowProducerId at version " + _version); } } if (_version <= 3) { _writable.writeShort(v3AndBelowProducerEpoch); } else { if (this.v3AndBelowProducerEpoch != (short) 0) { throw new UnsupportedVersionException("Attempted to write a non-default v3AndBelowProducerEpoch at version " + _version); } } if (_version <= 3) { if (_version >= 3) { _writable.writeUnsignedVarint(v3AndBelowTopics.size() + 1); for (AddPartitionsToTxnTopic v3AndBelowTopicsElement : v3AndBelowTopics) { v3AndBelowTopicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(v3AndBelowTopics.size()); for (AddPartitionsToTxnTopic v3AndBelowTopicsElement : v3AndBelowTopics) { v3AndBelowTopicsElement.write(_writable, _cache, _version); } } } else { if (!this.v3AndBelowTopics.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default v3AndBelowTopics at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 4) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(transactions.size() + 1)); for (AddPartitionsToTxnTransaction transactionsElement : transactions) { transactionsElement.addSize(_size, _cache, _version); } } } if (_version <= 3) { { byte[] _stringBytes = v3AndBelowTransactionalId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'v3AndBelowTransactionalId' field is too long to be serialized"); } _cache.cacheSerializedValue(v3AndBelowTransactionalId, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } } if (_version <= 3) { _size.addBytes(8); } if (_version <= 3) { _size.addBytes(2); } if (_version <= 3) { { if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(v3AndBelowTopics.size() + 1)); } else { _size.addBytes(4); } for (AddPartitionsToTxnTopic v3AndBelowTopicsElement : v3AndBelowTopics) { v3AndBelowTopicsElement.addSize(_size, _cache, _version); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AddPartitionsToTxnRequestData)) return false; AddPartitionsToTxnRequestData other = (AddPartitionsToTxnRequestData) obj; if (this.transactions == null) { if (other.transactions != null) return false; } else { if (!this.transactions.equals(other.transactions)) return false; } if (this.v3AndBelowTransactionalId == null) { if (other.v3AndBelowTransactionalId != null) return false; } else { if (!this.v3AndBelowTransactionalId.equals(other.v3AndBelowTransactionalId)) return false; } if (v3AndBelowProducerId != other.v3AndBelowProducerId) return false; if (v3AndBelowProducerEpoch != other.v3AndBelowProducerEpoch) return false; if (this.v3AndBelowTopics == null) { if (other.v3AndBelowTopics != null) return false; } else { if (!this.v3AndBelowTopics.equals(other.v3AndBelowTopics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (transactions == null ? 0 : transactions.hashCode()); hashCode = 31 * hashCode + (v3AndBelowTransactionalId == null ? 0 : v3AndBelowTransactionalId.hashCode()); hashCode = 31 * hashCode + ((int) (v3AndBelowProducerId >> 32) ^ (int) v3AndBelowProducerId); hashCode = 31 * hashCode + v3AndBelowProducerEpoch; hashCode = 31 * hashCode + (v3AndBelowTopics == null ? 0 : v3AndBelowTopics.hashCode()); return hashCode; } @Override public AddPartitionsToTxnRequestData duplicate() { AddPartitionsToTxnRequestData _duplicate = new AddPartitionsToTxnRequestData(); AddPartitionsToTxnTransactionCollection newTransactions = new AddPartitionsToTxnTransactionCollection(transactions.size()); for (AddPartitionsToTxnTransaction _element : transactions) { newTransactions.add(_element.duplicate()); } _duplicate.transactions = newTransactions; _duplicate.v3AndBelowTransactionalId = v3AndBelowTransactionalId; _duplicate.v3AndBelowProducerId = v3AndBelowProducerId; _duplicate.v3AndBelowProducerEpoch = v3AndBelowProducerEpoch; AddPartitionsToTxnTopicCollection newV3AndBelowTopics = new AddPartitionsToTxnTopicCollection(v3AndBelowTopics.size()); for (AddPartitionsToTxnTopic _element : v3AndBelowTopics) { newV3AndBelowTopics.add(_element.duplicate()); } _duplicate.v3AndBelowTopics = newV3AndBelowTopics; return _duplicate; } @Override public String toString() { return "AddPartitionsToTxnRequestData(" + "transactions=" + MessageUtil.deepToString(transactions.iterator()) + ", v3AndBelowTransactionalId=" + ((v3AndBelowTransactionalId == null) ? "null" : "'" + v3AndBelowTransactionalId.toString() + "'") + ", v3AndBelowProducerId=" + v3AndBelowProducerId + ", v3AndBelowProducerEpoch=" + v3AndBelowProducerEpoch + ", v3AndBelowTopics=" + MessageUtil.deepToString(v3AndBelowTopics.iterator()) + ")"; } public AddPartitionsToTxnTransactionCollection transactions() { return this.transactions; } public String v3AndBelowTransactionalId() { return this.v3AndBelowTransactionalId; } public long v3AndBelowProducerId() { return this.v3AndBelowProducerId; } public short v3AndBelowProducerEpoch() { return this.v3AndBelowProducerEpoch; } public AddPartitionsToTxnTopicCollection v3AndBelowTopics() { return this.v3AndBelowTopics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AddPartitionsToTxnRequestData setTransactions(AddPartitionsToTxnTransactionCollection v) { this.transactions = v; return this; } public AddPartitionsToTxnRequestData setV3AndBelowTransactionalId(String v) { this.v3AndBelowTransactionalId = v; return this; } public AddPartitionsToTxnRequestData setV3AndBelowProducerId(long v) { this.v3AndBelowProducerId = v; return this; } public AddPartitionsToTxnRequestData setV3AndBelowProducerEpoch(short v) { this.v3AndBelowProducerEpoch = v; return this; } public AddPartitionsToTxnRequestData setV3AndBelowTopics(AddPartitionsToTxnTopicCollection v) { this.v3AndBelowTopics = v; return this; } public static class AddPartitionsToTxnTransaction implements Message, ImplicitLinkedHashMultiCollection.Element { String transactionalId; long producerId; short producerEpoch; boolean verifyOnly; AddPartitionsToTxnTopicCollection topics; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_4 = new Schema( new Field("transactional_id", Type.COMPACT_STRING, "The transactional id corresponding to the transaction."), new Field("producer_id", Type.INT64, "Current producer id in use by the transactional id."), new Field("producer_epoch", Type.INT16, "Current epoch associated with the producer id."), new Field("verify_only", Type.BOOLEAN, "Boolean to signify if we want to check if the partition is in the transaction rather than add it."), new Field("topics", new CompactArrayOf(AddPartitionsToTxnTopic.SCHEMA_3), "The partitions to add to the transaction."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 4; public static final short HIGHEST_SUPPORTED_VERSION = 4; public AddPartitionsToTxnTransaction(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AddPartitionsToTxnTransaction() { this.transactionalId = ""; this.producerId = 0L; this.producerEpoch = (short) 0; this.verifyOnly = false; this.topics = new AddPartitionsToTxnTopicCollection(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version > 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTransaction"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field transactionalId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field transactionalId had invalid length " + length); } else { this.transactionalId = _readable.readString(length); } } this.producerId = _readable.readLong(); this.producerEpoch = _readable.readShort(); this.verifyOnly = _readable.readByte() != 0; { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnTopicCollection newCollection = new AddPartitionsToTxnTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnTopic(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 4) { throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTransaction"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(transactionalId); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeLong(producerId); _writable.writeShort(producerEpoch); _writable.writeByte(verifyOnly ? (byte) 1 : (byte) 0); _writable.writeUnsignedVarint(topics.size() + 1); for (AddPartitionsToTxnTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 4) { throw new UnsupportedVersionException("Can't size version " + _version + " of AddPartitionsToTxnTransaction"); } { byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'transactionalId' field is too long to be serialized"); } _cache.cacheSerializedValue(transactionalId, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(8); _size.addBytes(2); _size.addBytes(1); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (AddPartitionsToTxnTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AddPartitionsToTxnTransaction)) return false; AddPartitionsToTxnTransaction other = (AddPartitionsToTxnTransaction) obj; if (this.transactionalId == null) { if (other.transactionalId != null) return false; } else { if (!this.transactionalId.equals(other.transactionalId)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AddPartitionsToTxnTransaction)) return false; AddPartitionsToTxnTransaction other = (AddPartitionsToTxnTransaction) obj; if (this.transactionalId == null) { if (other.transactionalId != null) return false; } else { if (!this.transactionalId.equals(other.transactionalId)) return false; } if (producerId != other.producerId) return false; if (producerEpoch != other.producerEpoch) return false; if (verifyOnly != other.verifyOnly) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode()); return hashCode; } @Override public AddPartitionsToTxnTransaction duplicate() { AddPartitionsToTxnTransaction _duplicate = new AddPartitionsToTxnTransaction(); _duplicate.transactionalId = transactionalId; _duplicate.producerId = producerId; _duplicate.producerEpoch = producerEpoch; _duplicate.verifyOnly = verifyOnly; AddPartitionsToTxnTopicCollection newTopics = new AddPartitionsToTxnTopicCollection(topics.size()); for (AddPartitionsToTxnTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "AddPartitionsToTxnTransaction(" + "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'") + ", producerId=" + producerId + ", producerEpoch=" + producerEpoch + ", verifyOnly=" + (verifyOnly ? "true" : "false") + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public String transactionalId() { return this.transactionalId; } public long producerId() { return this.producerId; } public short producerEpoch() { return this.producerEpoch; } public boolean verifyOnly() { return this.verifyOnly; } public AddPartitionsToTxnTopicCollection topics() { return this.topics; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AddPartitionsToTxnTransaction setTransactionalId(String v) { this.transactionalId = v; return this; } public AddPartitionsToTxnTransaction setProducerId(long v) { this.producerId = v; return this; } public AddPartitionsToTxnTransaction setProducerEpoch(short v) { this.producerEpoch = v; return this; } public AddPartitionsToTxnTransaction setVerifyOnly(boolean v) { this.verifyOnly = v; return this; } public AddPartitionsToTxnTransaction setTopics(AddPartitionsToTxnTopicCollection v) { this.topics = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AddPartitionsToTxnTransactionCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnTransaction> { public AddPartitionsToTxnTransactionCollection() { super(); } public AddPartitionsToTxnTransactionCollection(int expectedNumElements) { super(expectedNumElements); } public AddPartitionsToTxnTransactionCollection(Iterator<AddPartitionsToTxnTransaction> iterator) { super(iterator); } public AddPartitionsToTxnTransaction find(String transactionalId) { AddPartitionsToTxnTransaction _key = new AddPartitionsToTxnTransaction(); _key.setTransactionalId(transactionalId); return find(_key); } public List<AddPartitionsToTxnTransaction> findAll(String transactionalId) { AddPartitionsToTxnTransaction _key = new AddPartitionsToTxnTransaction(); _key.setTransactionalId(transactionalId); return findAll(_key); } public AddPartitionsToTxnTransactionCollection duplicate() { AddPartitionsToTxnTransactionCollection _duplicate = new AddPartitionsToTxnTransactionCollection(size()); for (AddPartitionsToTxnTransaction _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class AddPartitionsToTxnTopic implements Message, ImplicitLinkedHashMultiCollection.Element { String name; List<Integer> partitions; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The name of the topic."), new Field("partitions", new ArrayOf(Type.INT32), "The partition indexes to add to the transaction") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("name", Type.COMPACT_STRING, "The name of the topic."), new Field("partitions", new CompactArrayOf(Type.INT32), "The partition indexes to add to the transaction"), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public AddPartitionsToTxnTopic(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AddPartitionsToTxnTopic() { this.name = ""; this.partitions = new ArrayList<Integer>(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 32767; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; if (_version >= 3) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.partitions = newCollection; } } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 3) { _writable.writeUnsignedVarint(partitions.size() + 1); } else { _writable.writeInt(partitions.size()); } for (Integer partitionsElement : partitions) { _writable.writeInt(partitionsElement); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(partitions.size() * 4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AddPartitionsToTxnTopic)) return false; AddPartitionsToTxnTopic other = (AddPartitionsToTxnTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AddPartitionsToTxnTopic)) return false; AddPartitionsToTxnTopic other = (AddPartitionsToTxnTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public AddPartitionsToTxnTopic duplicate() { AddPartitionsToTxnTopic _duplicate = new AddPartitionsToTxnTopic(); _duplicate.name = name; ArrayList<Integer> newPartitions = new ArrayList<Integer>(partitions.size()); for (Integer _element : partitions) { newPartitions.add(_element); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "AddPartitionsToTxnTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<Integer> partitions() { return this.partitions; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AddPartitionsToTxnTopic setName(String v) { this.name = v; return this; } public AddPartitionsToTxnTopic setPartitions(List<Integer> v) { this.partitions = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AddPartitionsToTxnTopicCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnTopic> { public AddPartitionsToTxnTopicCollection() { super(); } public AddPartitionsToTxnTopicCollection(int expectedNumElements) { super(expectedNumElements); } public AddPartitionsToTxnTopicCollection(Iterator<AddPartitionsToTxnTopic> iterator) { super(iterator); } public AddPartitionsToTxnTopic find(String name) { AddPartitionsToTxnTopic _key = new AddPartitionsToTxnTopic(); _key.setName(name); return find(_key); } public List<AddPartitionsToTxnTopic> findAll(String name) { AddPartitionsToTxnTopic _key = new AddPartitionsToTxnTopic(); _key.setName(name); return findAll(_key); } public AddPartitionsToTxnTopicCollection duplicate() { AddPartitionsToTxnTopicCollection _duplicate = new AddPartitionsToTxnTopicCollection(size()); for (AddPartitionsToTxnTopic _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AddPartitionsToTxnRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AddPartitionsToTxnRequestData.*; public class AddPartitionsToTxnRequestDataJsonConverter { public static AddPartitionsToTxnRequestData read(JsonNode _node, short _version) { AddPartitionsToTxnRequestData _object = new AddPartitionsToTxnRequestData(); JsonNode _transactionsNode = _node.get("transactions"); if (_transactionsNode == null) { if (_version >= 4) { throw new RuntimeException("AddPartitionsToTxnRequestData: unable to locate field 'transactions', which is mandatory in version " + _version); } else { _object.transactions = new AddPartitionsToTxnTransactionCollection(0); } } else { if (!_transactionsNode.isArray()) { throw new RuntimeException("AddPartitionsToTxnRequestData expected a JSON array, but got " + _node.getNodeType()); } AddPartitionsToTxnTransactionCollection _collection = new AddPartitionsToTxnTransactionCollection(_transactionsNode.size()); _object.transactions = _collection; for (JsonNode _element : _transactionsNode) { _collection.add(AddPartitionsToTxnTransactionJsonConverter.read(_element, _version)); } } JsonNode _v3AndBelowTransactionalIdNode = _node.get("v3AndBelowTransactionalId"); if (_v3AndBelowTransactionalIdNode == null) { if (_version <= 3) { throw new RuntimeException("AddPartitionsToTxnRequestData: unable to locate field 'v3AndBelowTransactionalId', which is mandatory in version " + _version); } else { _object.v3AndBelowTransactionalId = ""; } } else { if (!_v3AndBelowTransactionalIdNode.isTextual()) { throw new RuntimeException("AddPartitionsToTxnRequestData expected a string type, but got " + _node.getNodeType()); } _object.v3AndBelowTransactionalId = _v3AndBelowTransactionalIdNode.asText(); } JsonNode _v3AndBelowProducerIdNode = _node.get("v3AndBelowProducerId"); if (_v3AndBelowProducerIdNode == null) { if (_version <= 3) { throw new RuntimeException("AddPartitionsToTxnRequestData: unable to locate field 'v3AndBelowProducerId', which is mandatory in version " + _version); } else { _object.v3AndBelowProducerId = 0L; } } else { _object.v3AndBelowProducerId = MessageUtil.jsonNodeToLong(_v3AndBelowProducerIdNode, "AddPartitionsToTxnRequestData"); } JsonNode _v3AndBelowProducerEpochNode = _node.get("v3AndBelowProducerEpoch"); if (_v3AndBelowProducerEpochNode == null) { if (_version <= 3) { throw new RuntimeException("AddPartitionsToTxnRequestData: unable to locate field 'v3AndBelowProducerEpoch', which is mandatory in version " + _version); } else { _object.v3AndBelowProducerEpoch = (short) 0; } } else { _object.v3AndBelowProducerEpoch = MessageUtil.jsonNodeToShort(_v3AndBelowProducerEpochNode, "AddPartitionsToTxnRequestData"); } JsonNode _v3AndBelowTopicsNode = _node.get("v3AndBelowTopics"); if (_v3AndBelowTopicsNode == null) { if (_version <= 3) { throw new RuntimeException("AddPartitionsToTxnRequestData: unable to locate field 'v3AndBelowTopics', which is mandatory in version " + _version); } else { _object.v3AndBelowTopics = new AddPartitionsToTxnTopicCollection(0); } } else { if (!_v3AndBelowTopicsNode.isArray()) { throw new RuntimeException("AddPartitionsToTxnRequestData expected a JSON array, but got " + _node.getNodeType()); } AddPartitionsToTxnTopicCollection _collection = new AddPartitionsToTxnTopicCollection(_v3AndBelowTopicsNode.size()); _object.v3AndBelowTopics = _collection; for (JsonNode _element : _v3AndBelowTopicsNode) { _collection.add(AddPartitionsToTxnTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AddPartitionsToTxnRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 4) { ArrayNode _transactionsArray = new ArrayNode(JsonNodeFactory.instance); for (AddPartitionsToTxnTransaction _element : _object.transactions) { _transactionsArray.add(AddPartitionsToTxnTransactionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("transactions", _transactionsArray); } else { if (!_object.transactions.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default transactions at version " + _version); } } if (_version <= 3) { _node.set("v3AndBelowTransactionalId", new TextNode(_object.v3AndBelowTransactionalId)); } else { if (!_object.v3AndBelowTransactionalId.equals("")) { throw new UnsupportedVersionException("Attempted to write a non-default v3AndBelowTransactionalId at version " + _version); } } if (_version <= 3) { _node.set("v3AndBelowProducerId", new LongNode(_object.v3AndBelowProducerId)); } else { if (_object.v3AndBelowProducerId != 0L) { throw new UnsupportedVersionException("Attempted to write a non-default v3AndBelowProducerId at version " + _version); } } if (_version <= 3) { _node.set("v3AndBelowProducerEpoch", new ShortNode(_object.v3AndBelowProducerEpoch)); } else { if (_object.v3AndBelowProducerEpoch != (short) 0) { throw new UnsupportedVersionException("Attempted to write a non-default v3AndBelowProducerEpoch at version " + _version); } } if (_version <= 3) { ArrayNode _v3AndBelowTopicsArray = new ArrayNode(JsonNodeFactory.instance); for (AddPartitionsToTxnTopic _element : _object.v3AndBelowTopics) { _v3AndBelowTopicsArray.add(AddPartitionsToTxnTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("v3AndBelowTopics", _v3AndBelowTopicsArray); } else { if (!_object.v3AndBelowTopics.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default v3AndBelowTopics at version " + _version); } } return _node; } public static JsonNode write(AddPartitionsToTxnRequestData _object, short _version) { return write(_object, _version, true); } public static class AddPartitionsToTxnTopicJsonConverter { public static AddPartitionsToTxnTopic read(JsonNode _node, short _version) { AddPartitionsToTxnTopic _object = new AddPartitionsToTxnTopic(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("AddPartitionsToTxnTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("AddPartitionsToTxnTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("AddPartitionsToTxnTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("AddPartitionsToTxnTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "AddPartitionsToTxnTopic element")); } } return _object; } public static JsonNode write(AddPartitionsToTxnTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.partitions) { _partitionsArray.add(new IntNode(_element)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(AddPartitionsToTxnTopic _object, short _version) { return write(_object, _version, true); } } public static class AddPartitionsToTxnTransactionJsonConverter { public static AddPartitionsToTxnTransaction read(JsonNode _node, short _version) { AddPartitionsToTxnTransaction _object = new AddPartitionsToTxnTransaction(); if (_version < 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnTransaction"); } JsonNode _transactionalIdNode = _node.get("transactionalId"); if (_transactionalIdNode == null) { throw new RuntimeException("AddPartitionsToTxnTransaction: unable to locate field 'transactionalId', which is mandatory in version " + _version); } else { if (!_transactionalIdNode.isTextual()) { throw new RuntimeException("AddPartitionsToTxnTransaction expected a string type, but got " + _node.getNodeType()); } _object.transactionalId = _transactionalIdNode.asText(); } JsonNode _producerIdNode = _node.get("producerId"); if (_producerIdNode == null) { throw new RuntimeException("AddPartitionsToTxnTransaction: unable to locate field 'producerId', which is mandatory in version " + _version); } else { _object.producerId = MessageUtil.jsonNodeToLong(_producerIdNode, "AddPartitionsToTxnTransaction"); } JsonNode _producerEpochNode = _node.get("producerEpoch"); if (_producerEpochNode == null) { throw new RuntimeException("AddPartitionsToTxnTransaction: unable to locate field 'producerEpoch', which is mandatory in version " + _version); } else { _object.producerEpoch = MessageUtil.jsonNodeToShort(_producerEpochNode, "AddPartitionsToTxnTransaction"); } JsonNode _verifyOnlyNode = _node.get("verifyOnly"); if (_verifyOnlyNode == null) { throw new RuntimeException("AddPartitionsToTxnTransaction: unable to locate field 'verifyOnly', which is mandatory in version " + _version); } else { if (!_verifyOnlyNode.isBoolean()) { throw new RuntimeException("AddPartitionsToTxnTransaction expected Boolean type, but got " + _node.getNodeType()); } _object.verifyOnly = _verifyOnlyNode.asBoolean(); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("AddPartitionsToTxnTransaction: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("AddPartitionsToTxnTransaction expected a JSON array, but got " + _node.getNodeType()); } AddPartitionsToTxnTopicCollection _collection = new AddPartitionsToTxnTopicCollection(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(AddPartitionsToTxnTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AddPartitionsToTxnTransaction _object, short _version, boolean _serializeRecords) { if (_version < 4) { throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnTransaction"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("transactionalId", new TextNode(_object.transactionalId)); _node.set("producerId", new LongNode(_object.producerId)); _node.set("producerEpoch", new ShortNode(_object.producerEpoch)); _node.set("verifyOnly", BooleanNode.valueOf(_object.verifyOnly)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (AddPartitionsToTxnTopic _element : _object.topics) { _topicsArray.add(AddPartitionsToTxnTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(AddPartitionsToTxnTransaction _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AddPartitionsToTxnResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AddPartitionsToTxnResponseData implements ApiMessage { int throttleTimeMs; short errorCode; AddPartitionsToTxnResultCollection resultsByTransaction; AddPartitionsToTxnTopicResultCollection resultsByTopicV3AndBelow; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("results_by_topic_v3_and_below", new ArrayOf(AddPartitionsToTxnTopicResult.SCHEMA_0), "The results for each topic.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("results_by_topic_v3_and_below", new CompactArrayOf(AddPartitionsToTxnTopicResult.SCHEMA_3), "The results for each topic."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The response top level error code."), new Field("results_by_transaction", new CompactArrayOf(AddPartitionsToTxnResult.SCHEMA_4), "Results categorized by transactional ID."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public AddPartitionsToTxnResponseData(Readable _readable, short _version) { read(_readable, _version); } public AddPartitionsToTxnResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.resultsByTransaction = new AddPartitionsToTxnResultCollection(0); this.resultsByTopicV3AndBelow = new AddPartitionsToTxnTopicResultCollection(0); } @Override public short apiKey() { return 24; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); if (_version >= 4) { this.errorCode = _readable.readShort(); } else { this.errorCode = (short) 0; } if (_version >= 4) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field resultsByTransaction was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnResultCollection newCollection = new AddPartitionsToTxnResultCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnResult(_readable, _version)); } this.resultsByTransaction = newCollection; } } else { this.resultsByTransaction = new AddPartitionsToTxnResultCollection(0); } if (_version <= 3) { if (_version >= 3) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field resultsByTopicV3AndBelow was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnTopicResultCollection newCollection = new AddPartitionsToTxnTopicResultCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnTopicResult(_readable, _version)); } this.resultsByTopicV3AndBelow = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field resultsByTopicV3AndBelow was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnTopicResultCollection newCollection = new AddPartitionsToTxnTopicResultCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnTopicResult(_readable, _version)); } this.resultsByTopicV3AndBelow = newCollection; } } } else { this.resultsByTopicV3AndBelow = new AddPartitionsToTxnTopicResultCollection(0); } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); if (_version >= 4) { _writable.writeShort(errorCode); } if (_version >= 4) { _writable.writeUnsignedVarint(resultsByTransaction.size() + 1); for (AddPartitionsToTxnResult resultsByTransactionElement : resultsByTransaction) { resultsByTransactionElement.write(_writable, _cache, _version); } } else { if (!this.resultsByTransaction.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default resultsByTransaction at version " + _version); } } if (_version <= 3) { if (_version >= 3) { _writable.writeUnsignedVarint(resultsByTopicV3AndBelow.size() + 1); for (AddPartitionsToTxnTopicResult resultsByTopicV3AndBelowElement : resultsByTopicV3AndBelow) { resultsByTopicV3AndBelowElement.write(_writable, _cache, _version); } } else { _writable.writeInt(resultsByTopicV3AndBelow.size()); for (AddPartitionsToTxnTopicResult resultsByTopicV3AndBelowElement : resultsByTopicV3AndBelow) { resultsByTopicV3AndBelowElement.write(_writable, _cache, _version); } } } else { if (!this.resultsByTopicV3AndBelow.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default resultsByTopicV3AndBelow at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); if (_version >= 4) { _size.addBytes(2); } if (_version >= 4) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(resultsByTransaction.size() + 1)); for (AddPartitionsToTxnResult resultsByTransactionElement : resultsByTransaction) { resultsByTransactionElement.addSize(_size, _cache, _version); } } } if (_version <= 3) { { if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(resultsByTopicV3AndBelow.size() + 1)); } else { _size.addBytes(4); } for (AddPartitionsToTxnTopicResult resultsByTopicV3AndBelowElement : resultsByTopicV3AndBelow) { resultsByTopicV3AndBelowElement.addSize(_size, _cache, _version); } } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AddPartitionsToTxnResponseData)) return false; AddPartitionsToTxnResponseData other = (AddPartitionsToTxnResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (this.resultsByTransaction == null) { if (other.resultsByTransaction != null) return false; } else { if (!this.resultsByTransaction.equals(other.resultsByTransaction)) return false; } if (this.resultsByTopicV3AndBelow == null) { if (other.resultsByTopicV3AndBelow != null) return false; } else { if (!this.resultsByTopicV3AndBelow.equals(other.resultsByTopicV3AndBelow)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (resultsByTransaction == null ? 0 : resultsByTransaction.hashCode()); hashCode = 31 * hashCode + (resultsByTopicV3AndBelow == null ? 0 : resultsByTopicV3AndBelow.hashCode()); return hashCode; } @Override public AddPartitionsToTxnResponseData duplicate() { AddPartitionsToTxnResponseData _duplicate = new AddPartitionsToTxnResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; AddPartitionsToTxnResultCollection newResultsByTransaction = new AddPartitionsToTxnResultCollection(resultsByTransaction.size()); for (AddPartitionsToTxnResult _element : resultsByTransaction) { newResultsByTransaction.add(_element.duplicate()); } _duplicate.resultsByTransaction = newResultsByTransaction; AddPartitionsToTxnTopicResultCollection newResultsByTopicV3AndBelow = new AddPartitionsToTxnTopicResultCollection(resultsByTopicV3AndBelow.size()); for (AddPartitionsToTxnTopicResult _element : resultsByTopicV3AndBelow) { newResultsByTopicV3AndBelow.add(_element.duplicate()); } _duplicate.resultsByTopicV3AndBelow = newResultsByTopicV3AndBelow; return _duplicate; } @Override public String toString() { return "AddPartitionsToTxnResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", resultsByTransaction=" + MessageUtil.deepToString(resultsByTransaction.iterator()) + ", resultsByTopicV3AndBelow=" + MessageUtil.deepToString(resultsByTopicV3AndBelow.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public AddPartitionsToTxnResultCollection resultsByTransaction() { return this.resultsByTransaction; } public AddPartitionsToTxnTopicResultCollection resultsByTopicV3AndBelow() { return this.resultsByTopicV3AndBelow; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AddPartitionsToTxnResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public AddPartitionsToTxnResponseData setErrorCode(short v) { this.errorCode = v; return this; } public AddPartitionsToTxnResponseData setResultsByTransaction(AddPartitionsToTxnResultCollection v) { this.resultsByTransaction = v; return this; } public AddPartitionsToTxnResponseData setResultsByTopicV3AndBelow(AddPartitionsToTxnTopicResultCollection v) { this.resultsByTopicV3AndBelow = v; return this; } public static class AddPartitionsToTxnResult implements Message, ImplicitLinkedHashMultiCollection.Element { String transactionalId; AddPartitionsToTxnTopicResultCollection topicResults; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_4 = new Schema( new Field("transactional_id", Type.COMPACT_STRING, "The transactional id corresponding to the transaction."), new Field("topic_results", new CompactArrayOf(AddPartitionsToTxnTopicResult.SCHEMA_3), "The results for each topic."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, null, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 4; public static final short HIGHEST_SUPPORTED_VERSION = 4; public AddPartitionsToTxnResult(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AddPartitionsToTxnResult() { this.transactionalId = ""; this.topicResults = new AddPartitionsToTxnTopicResultCollection(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 4; } @Override public void read(Readable _readable, short _version) { if (_version > 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnResult"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field transactionalId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field transactionalId had invalid length " + length); } else { this.transactionalId = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topicResults was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnTopicResultCollection newCollection = new AddPartitionsToTxnTopicResultCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnTopicResult(_readable, _version)); } this.topicResults = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 4) { throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnResult"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(transactionalId); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(topicResults.size() + 1); for (AddPartitionsToTxnTopicResult topicResultsElement : topicResults) { topicResultsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 4) { throw new UnsupportedVersionException("Can't size version " + _version + " of AddPartitionsToTxnResult"); } { byte[] _stringBytes = transactionalId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'transactionalId' field is too long to be serialized"); } _cache.cacheSerializedValue(transactionalId, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topicResults.size() + 1)); for (AddPartitionsToTxnTopicResult topicResultsElement : topicResults) { topicResultsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AddPartitionsToTxnResult)) return false; AddPartitionsToTxnResult other = (AddPartitionsToTxnResult) obj; if (this.transactionalId == null) { if (other.transactionalId != null) return false; } else { if (!this.transactionalId.equals(other.transactionalId)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AddPartitionsToTxnResult)) return false; AddPartitionsToTxnResult other = (AddPartitionsToTxnResult) obj; if (this.transactionalId == null) { if (other.transactionalId != null) return false; } else { if (!this.transactionalId.equals(other.transactionalId)) return false; } if (this.topicResults == null) { if (other.topicResults != null) return false; } else { if (!this.topicResults.equals(other.topicResults)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (transactionalId == null ? 0 : transactionalId.hashCode()); return hashCode; } @Override public AddPartitionsToTxnResult duplicate() { AddPartitionsToTxnResult _duplicate = new AddPartitionsToTxnResult(); _duplicate.transactionalId = transactionalId; AddPartitionsToTxnTopicResultCollection newTopicResults = new AddPartitionsToTxnTopicResultCollection(topicResults.size()); for (AddPartitionsToTxnTopicResult _element : topicResults) { newTopicResults.add(_element.duplicate()); } _duplicate.topicResults = newTopicResults; return _duplicate; } @Override public String toString() { return "AddPartitionsToTxnResult(" + "transactionalId=" + ((transactionalId == null) ? "null" : "'" + transactionalId.toString() + "'") + ", topicResults=" + MessageUtil.deepToString(topicResults.iterator()) + ")"; } public String transactionalId() { return this.transactionalId; } public AddPartitionsToTxnTopicResultCollection topicResults() { return this.topicResults; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AddPartitionsToTxnResult setTransactionalId(String v) { this.transactionalId = v; return this; } public AddPartitionsToTxnResult setTopicResults(AddPartitionsToTxnTopicResultCollection v) { this.topicResults = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AddPartitionsToTxnResultCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnResult> { public AddPartitionsToTxnResultCollection() { super(); } public AddPartitionsToTxnResultCollection(int expectedNumElements) { super(expectedNumElements); } public AddPartitionsToTxnResultCollection(Iterator<AddPartitionsToTxnResult> iterator) { super(iterator); } public AddPartitionsToTxnResult find(String transactionalId) { AddPartitionsToTxnResult _key = new AddPartitionsToTxnResult(); _key.setTransactionalId(transactionalId); return find(_key); } public List<AddPartitionsToTxnResult> findAll(String transactionalId) { AddPartitionsToTxnResult _key = new AddPartitionsToTxnResult(); _key.setTransactionalId(transactionalId); return findAll(_key); } public AddPartitionsToTxnResultCollection duplicate() { AddPartitionsToTxnResultCollection _duplicate = new AddPartitionsToTxnResultCollection(size()); for (AddPartitionsToTxnResult _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class AddPartitionsToTxnPartitionResult implements Message, ImplicitLinkedHashMultiCollection.Element { int partitionIndex; short partitionErrorCode; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition indexes."), new Field("partition_error_code", Type.INT16, "The response error code.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("partition_index", Type.INT32, "The partition indexes."), new Field("partition_error_code", Type.INT16, "The response error code."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public AddPartitionsToTxnPartitionResult(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AddPartitionsToTxnPartitionResult() { this.partitionIndex = 0; this.partitionErrorCode = (short) 0; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 32767; } @Override public void read(Readable _readable, short _version) { this.partitionIndex = _readable.readInt(); this.partitionErrorCode = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeShort(partitionErrorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AddPartitionsToTxnPartitionResult)) return false; AddPartitionsToTxnPartitionResult other = (AddPartitionsToTxnPartitionResult) obj; if (partitionIndex != other.partitionIndex) return false; return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AddPartitionsToTxnPartitionResult)) return false; AddPartitionsToTxnPartitionResult other = (AddPartitionsToTxnPartitionResult) obj; if (partitionIndex != other.partitionIndex) return false; if (partitionErrorCode != other.partitionErrorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; return hashCode; } @Override public AddPartitionsToTxnPartitionResult duplicate() { AddPartitionsToTxnPartitionResult _duplicate = new AddPartitionsToTxnPartitionResult(); _duplicate.partitionIndex = partitionIndex; _duplicate.partitionErrorCode = partitionErrorCode; return _duplicate; } @Override public String toString() { return "AddPartitionsToTxnPartitionResult(" + "partitionIndex=" + partitionIndex + ", partitionErrorCode=" + partitionErrorCode + ")"; } public int partitionIndex() { return this.partitionIndex; } public short partitionErrorCode() { return this.partitionErrorCode; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AddPartitionsToTxnPartitionResult setPartitionIndex(int v) { this.partitionIndex = v; return this; } public AddPartitionsToTxnPartitionResult setPartitionErrorCode(short v) { this.partitionErrorCode = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AddPartitionsToTxnPartitionResultCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnPartitionResult> { public AddPartitionsToTxnPartitionResultCollection() { super(); } public AddPartitionsToTxnPartitionResultCollection(int expectedNumElements) { super(expectedNumElements); } public AddPartitionsToTxnPartitionResultCollection(Iterator<AddPartitionsToTxnPartitionResult> iterator) { super(iterator); } public AddPartitionsToTxnPartitionResult find(int partitionIndex) { AddPartitionsToTxnPartitionResult _key = new AddPartitionsToTxnPartitionResult(); _key.setPartitionIndex(partitionIndex); return find(_key); } public List<AddPartitionsToTxnPartitionResult> findAll(int partitionIndex) { AddPartitionsToTxnPartitionResult _key = new AddPartitionsToTxnPartitionResult(); _key.setPartitionIndex(partitionIndex); return findAll(_key); } public AddPartitionsToTxnPartitionResultCollection duplicate() { AddPartitionsToTxnPartitionResultCollection _duplicate = new AddPartitionsToTxnPartitionResultCollection(size()); for (AddPartitionsToTxnPartitionResult _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class AddPartitionsToTxnTopicResult implements Message, ImplicitLinkedHashMultiCollection.Element { String name; AddPartitionsToTxnPartitionResultCollection resultsByPartition; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("results_by_partition", new ArrayOf(AddPartitionsToTxnPartitionResult.SCHEMA_0), "The results for each partition") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("results_by_partition", new CompactArrayOf(AddPartitionsToTxnPartitionResult.SCHEMA_3), "The results for each partition"), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_4 = SCHEMA_3; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3, SCHEMA_4 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 4; public AddPartitionsToTxnTopicResult(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AddPartitionsToTxnTopicResult() { this.name = ""; this.resultsByPartition = new AddPartitionsToTxnPartitionResultCollection(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 32767; } @Override public void read(Readable _readable, short _version) { { int length; if (_version >= 3) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { if (_version >= 3) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field resultsByPartition was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnPartitionResultCollection newCollection = new AddPartitionsToTxnPartitionResultCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnPartitionResult(_readable, _version)); } this.resultsByPartition = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field resultsByPartition was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AddPartitionsToTxnPartitionResultCollection newCollection = new AddPartitionsToTxnPartitionResultCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AddPartitionsToTxnPartitionResult(_readable, _version)); } this.resultsByPartition = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 3) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 3) { _writable.writeUnsignedVarint(resultsByPartition.size() + 1); for (AddPartitionsToTxnPartitionResult resultsByPartitionElement : resultsByPartition) { resultsByPartitionElement.write(_writable, _cache, _version); } } else { _writable.writeInt(resultsByPartition.size()); for (AddPartitionsToTxnPartitionResult resultsByPartitionElement : resultsByPartition) { resultsByPartitionElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 3) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(resultsByPartition.size() + 1)); } else { _size.addBytes(4); } for (AddPartitionsToTxnPartitionResult resultsByPartitionElement : resultsByPartition) { resultsByPartitionElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AddPartitionsToTxnTopicResult)) return false; AddPartitionsToTxnTopicResult other = (AddPartitionsToTxnTopicResult) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AddPartitionsToTxnTopicResult)) return false; AddPartitionsToTxnTopicResult other = (AddPartitionsToTxnTopicResult) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.resultsByPartition == null) { if (other.resultsByPartition != null) return false; } else { if (!this.resultsByPartition.equals(other.resultsByPartition)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public AddPartitionsToTxnTopicResult duplicate() { AddPartitionsToTxnTopicResult _duplicate = new AddPartitionsToTxnTopicResult(); _duplicate.name = name; AddPartitionsToTxnPartitionResultCollection newResultsByPartition = new AddPartitionsToTxnPartitionResultCollection(resultsByPartition.size()); for (AddPartitionsToTxnPartitionResult _element : resultsByPartition) { newResultsByPartition.add(_element.duplicate()); } _duplicate.resultsByPartition = newResultsByPartition; return _duplicate; } @Override public String toString() { return "AddPartitionsToTxnTopicResult(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", resultsByPartition=" + MessageUtil.deepToString(resultsByPartition.iterator()) + ")"; } public String name() { return this.name; } public AddPartitionsToTxnPartitionResultCollection resultsByPartition() { return this.resultsByPartition; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AddPartitionsToTxnTopicResult setName(String v) { this.name = v; return this; } public AddPartitionsToTxnTopicResult setResultsByPartition(AddPartitionsToTxnPartitionResultCollection v) { this.resultsByPartition = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AddPartitionsToTxnTopicResultCollection extends ImplicitLinkedHashMultiCollection<AddPartitionsToTxnTopicResult> { public AddPartitionsToTxnTopicResultCollection() { super(); } public AddPartitionsToTxnTopicResultCollection(int expectedNumElements) { super(expectedNumElements); } public AddPartitionsToTxnTopicResultCollection(Iterator<AddPartitionsToTxnTopicResult> iterator) { super(iterator); } public AddPartitionsToTxnTopicResult find(String name) { AddPartitionsToTxnTopicResult _key = new AddPartitionsToTxnTopicResult(); _key.setName(name); return find(_key); } public List<AddPartitionsToTxnTopicResult> findAll(String name) { AddPartitionsToTxnTopicResult _key = new AddPartitionsToTxnTopicResult(); _key.setName(name); return findAll(_key); } public AddPartitionsToTxnTopicResultCollection duplicate() { AddPartitionsToTxnTopicResultCollection _duplicate = new AddPartitionsToTxnTopicResultCollection(size()); for (AddPartitionsToTxnTopicResult _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AddPartitionsToTxnResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AddPartitionsToTxnResponseData.*; public class AddPartitionsToTxnResponseDataJsonConverter { public static AddPartitionsToTxnResponseData read(JsonNode _node, short _version) { AddPartitionsToTxnResponseData _object = new AddPartitionsToTxnResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("AddPartitionsToTxnResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "AddPartitionsToTxnResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { if (_version >= 4) { throw new RuntimeException("AddPartitionsToTxnResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = (short) 0; } } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "AddPartitionsToTxnResponseData"); } JsonNode _resultsByTransactionNode = _node.get("resultsByTransaction"); if (_resultsByTransactionNode == null) { if (_version >= 4) { throw new RuntimeException("AddPartitionsToTxnResponseData: unable to locate field 'resultsByTransaction', which is mandatory in version " + _version); } else { _object.resultsByTransaction = new AddPartitionsToTxnResultCollection(0); } } else { if (!_resultsByTransactionNode.isArray()) { throw new RuntimeException("AddPartitionsToTxnResponseData expected a JSON array, but got " + _node.getNodeType()); } AddPartitionsToTxnResultCollection _collection = new AddPartitionsToTxnResultCollection(_resultsByTransactionNode.size()); _object.resultsByTransaction = _collection; for (JsonNode _element : _resultsByTransactionNode) { _collection.add(AddPartitionsToTxnResultJsonConverter.read(_element, _version)); } } JsonNode _resultsByTopicV3AndBelowNode = _node.get("resultsByTopicV3AndBelow"); if (_resultsByTopicV3AndBelowNode == null) { if (_version <= 3) { throw new RuntimeException("AddPartitionsToTxnResponseData: unable to locate field 'resultsByTopicV3AndBelow', which is mandatory in version " + _version); } else { _object.resultsByTopicV3AndBelow = new AddPartitionsToTxnTopicResultCollection(0); } } else { if (!_resultsByTopicV3AndBelowNode.isArray()) { throw new RuntimeException("AddPartitionsToTxnResponseData expected a JSON array, but got " + _node.getNodeType()); } AddPartitionsToTxnTopicResultCollection _collection = new AddPartitionsToTxnTopicResultCollection(_resultsByTopicV3AndBelowNode.size()); _object.resultsByTopicV3AndBelow = _collection; for (JsonNode _element : _resultsByTopicV3AndBelowNode) { _collection.add(AddPartitionsToTxnTopicResultJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AddPartitionsToTxnResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); if (_version >= 4) { _node.set("errorCode", new ShortNode(_object.errorCode)); } if (_version >= 4) { ArrayNode _resultsByTransactionArray = new ArrayNode(JsonNodeFactory.instance); for (AddPartitionsToTxnResult _element : _object.resultsByTransaction) { _resultsByTransactionArray.add(AddPartitionsToTxnResultJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("resultsByTransaction", _resultsByTransactionArray); } else { if (!_object.resultsByTransaction.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default resultsByTransaction at version " + _version); } } if (_version <= 3) { ArrayNode _resultsByTopicV3AndBelowArray = new ArrayNode(JsonNodeFactory.instance); for (AddPartitionsToTxnTopicResult _element : _object.resultsByTopicV3AndBelow) { _resultsByTopicV3AndBelowArray.add(AddPartitionsToTxnTopicResultJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("resultsByTopicV3AndBelow", _resultsByTopicV3AndBelowArray); } else { if (!_object.resultsByTopicV3AndBelow.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default resultsByTopicV3AndBelow at version " + _version); } } return _node; } public static JsonNode write(AddPartitionsToTxnResponseData _object, short _version) { return write(_object, _version, true); } public static class AddPartitionsToTxnPartitionResultJsonConverter { public static AddPartitionsToTxnPartitionResult read(JsonNode _node, short _version) { AddPartitionsToTxnPartitionResult _object = new AddPartitionsToTxnPartitionResult(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("AddPartitionsToTxnPartitionResult: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "AddPartitionsToTxnPartitionResult"); } JsonNode _partitionErrorCodeNode = _node.get("partitionErrorCode"); if (_partitionErrorCodeNode == null) { throw new RuntimeException("AddPartitionsToTxnPartitionResult: unable to locate field 'partitionErrorCode', which is mandatory in version " + _version); } else { _object.partitionErrorCode = MessageUtil.jsonNodeToShort(_partitionErrorCodeNode, "AddPartitionsToTxnPartitionResult"); } return _object; } public static JsonNode write(AddPartitionsToTxnPartitionResult _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("partitionErrorCode", new ShortNode(_object.partitionErrorCode)); return _node; } public static JsonNode write(AddPartitionsToTxnPartitionResult _object, short _version) { return write(_object, _version, true); } } public static class AddPartitionsToTxnResultJsonConverter { public static AddPartitionsToTxnResult read(JsonNode _node, short _version) { AddPartitionsToTxnResult _object = new AddPartitionsToTxnResult(); if (_version < 4) { throw new UnsupportedVersionException("Can't read version " + _version + " of AddPartitionsToTxnResult"); } JsonNode _transactionalIdNode = _node.get("transactionalId"); if (_transactionalIdNode == null) { throw new RuntimeException("AddPartitionsToTxnResult: unable to locate field 'transactionalId', which is mandatory in version " + _version); } else { if (!_transactionalIdNode.isTextual()) { throw new RuntimeException("AddPartitionsToTxnResult expected a string type, but got " + _node.getNodeType()); } _object.transactionalId = _transactionalIdNode.asText(); } JsonNode _topicResultsNode = _node.get("topicResults"); if (_topicResultsNode == null) { throw new RuntimeException("AddPartitionsToTxnResult: unable to locate field 'topicResults', which is mandatory in version " + _version); } else { if (!_topicResultsNode.isArray()) { throw new RuntimeException("AddPartitionsToTxnResult expected a JSON array, but got " + _node.getNodeType()); } AddPartitionsToTxnTopicResultCollection _collection = new AddPartitionsToTxnTopicResultCollection(_topicResultsNode.size()); _object.topicResults = _collection; for (JsonNode _element : _topicResultsNode) { _collection.add(AddPartitionsToTxnTopicResultJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AddPartitionsToTxnResult _object, short _version, boolean _serializeRecords) { if (_version < 4) { throw new UnsupportedVersionException("Can't write version " + _version + " of AddPartitionsToTxnResult"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("transactionalId", new TextNode(_object.transactionalId)); ArrayNode _topicResultsArray = new ArrayNode(JsonNodeFactory.instance); for (AddPartitionsToTxnTopicResult _element : _object.topicResults) { _topicResultsArray.add(AddPartitionsToTxnTopicResultJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topicResults", _topicResultsArray); return _node; } public static JsonNode write(AddPartitionsToTxnResult _object, short _version) { return write(_object, _version, true); } } public static class AddPartitionsToTxnTopicResultJsonConverter { public static AddPartitionsToTxnTopicResult read(JsonNode _node, short _version) { AddPartitionsToTxnTopicResult _object = new AddPartitionsToTxnTopicResult(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("AddPartitionsToTxnTopicResult: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("AddPartitionsToTxnTopicResult expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _resultsByPartitionNode = _node.get("resultsByPartition"); if (_resultsByPartitionNode == null) { throw new RuntimeException("AddPartitionsToTxnTopicResult: unable to locate field 'resultsByPartition', which is mandatory in version " + _version); } else { if (!_resultsByPartitionNode.isArray()) { throw new RuntimeException("AddPartitionsToTxnTopicResult expected a JSON array, but got " + _node.getNodeType()); } AddPartitionsToTxnPartitionResultCollection _collection = new AddPartitionsToTxnPartitionResultCollection(_resultsByPartitionNode.size()); _object.resultsByPartition = _collection; for (JsonNode _element : _resultsByPartitionNode) { _collection.add(AddPartitionsToTxnPartitionResultJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AddPartitionsToTxnTopicResult _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _resultsByPartitionArray = new ArrayNode(JsonNodeFactory.instance); for (AddPartitionsToTxnPartitionResult _element : _object.resultsByPartition) { _resultsByPartitionArray.add(AddPartitionsToTxnPartitionResultJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("resultsByPartition", _resultsByPartitionArray); return _node; } public static JsonNode write(AddPartitionsToTxnTopicResult _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AllocateProducerIdsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AllocateProducerIdsRequestData implements ApiMessage { int brokerId; long brokerEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("broker_id", Type.INT32, "The ID of the requesting broker"), new Field("broker_epoch", Type.INT64, "The epoch of the requesting broker"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public AllocateProducerIdsRequestData(Readable _readable, short _version) { read(_readable, _version); } public AllocateProducerIdsRequestData() { this.brokerId = 0; this.brokerEpoch = -1L; } @Override public short apiKey() { return 67; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.brokerId = _readable.readInt(); this.brokerEpoch = _readable.readLong(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(brokerId); _writable.writeLong(brokerEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(8); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof AllocateProducerIdsRequestData)) return false; AllocateProducerIdsRequestData other = (AllocateProducerIdsRequestData) obj; if (brokerId != other.brokerId) return false; if (brokerEpoch != other.brokerEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + brokerId; hashCode = 31 * hashCode + ((int) (brokerEpoch >> 32) ^ (int) brokerEpoch); return hashCode; } @Override public AllocateProducerIdsRequestData duplicate() { AllocateProducerIdsRequestData _duplicate = new AllocateProducerIdsRequestData(); _duplicate.brokerId = brokerId; _duplicate.brokerEpoch = brokerEpoch; return _duplicate; } @Override public String toString() { return "AllocateProducerIdsRequestData(" + "brokerId=" + brokerId + ", brokerEpoch=" + brokerEpoch + ")"; } public int brokerId() { return this.brokerId; } public long brokerEpoch() { return this.brokerEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AllocateProducerIdsRequestData setBrokerId(int v) { this.brokerId = v; return this; } public AllocateProducerIdsRequestData setBrokerEpoch(long v) { this.brokerEpoch = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AllocateProducerIdsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AllocateProducerIdsRequestData.*; public class AllocateProducerIdsRequestDataJsonConverter { public static AllocateProducerIdsRequestData read(JsonNode _node, short _version) { AllocateProducerIdsRequestData _object = new AllocateProducerIdsRequestData(); JsonNode _brokerIdNode = _node.get("brokerId"); if (_brokerIdNode == null) { throw new RuntimeException("AllocateProducerIdsRequestData: unable to locate field 'brokerId', which is mandatory in version " + _version); } else { _object.brokerId = MessageUtil.jsonNodeToInt(_brokerIdNode, "AllocateProducerIdsRequestData"); } JsonNode _brokerEpochNode = _node.get("brokerEpoch"); if (_brokerEpochNode == null) { throw new RuntimeException("AllocateProducerIdsRequestData: unable to locate field 'brokerEpoch', which is mandatory in version " + _version); } else { _object.brokerEpoch = MessageUtil.jsonNodeToLong(_brokerEpochNode, "AllocateProducerIdsRequestData"); } return _object; } public static JsonNode write(AllocateProducerIdsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("brokerId", new IntNode(_object.brokerId)); _node.set("brokerEpoch", new LongNode(_object.brokerEpoch)); return _node; } public static JsonNode write(AllocateProducerIdsRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AllocateProducerIdsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AllocateProducerIdsResponseData implements ApiMessage { int throttleTimeMs; short errorCode; long producerIdStart; int producerIdLen; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top level response error code"), new Field("producer_id_start", Type.INT64, "The first producer ID in this range, inclusive"), new Field("producer_id_len", Type.INT32, "The number of producer IDs in this range"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public AllocateProducerIdsResponseData(Readable _readable, short _version) { read(_readable, _version); } public AllocateProducerIdsResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.producerIdStart = 0L; this.producerIdLen = 0; } @Override public short apiKey() { return 67; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); this.producerIdStart = _readable.readLong(); this.producerIdLen = _readable.readInt(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); _writable.writeLong(producerIdStart); _writable.writeInt(producerIdLen); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); _size.addBytes(8); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof AllocateProducerIdsResponseData)) return false; AllocateProducerIdsResponseData other = (AllocateProducerIdsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (producerIdStart != other.producerIdStart) return false; if (producerIdLen != other.producerIdLen) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + ((int) (producerIdStart >> 32) ^ (int) producerIdStart); hashCode = 31 * hashCode + producerIdLen; return hashCode; } @Override public AllocateProducerIdsResponseData duplicate() { AllocateProducerIdsResponseData _duplicate = new AllocateProducerIdsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; _duplicate.producerIdStart = producerIdStart; _duplicate.producerIdLen = producerIdLen; return _duplicate; } @Override public String toString() { return "AllocateProducerIdsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", producerIdStart=" + producerIdStart + ", producerIdLen=" + producerIdLen + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public long producerIdStart() { return this.producerIdStart; } public int producerIdLen() { return this.producerIdLen; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AllocateProducerIdsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public AllocateProducerIdsResponseData setErrorCode(short v) { this.errorCode = v; return this; } public AllocateProducerIdsResponseData setProducerIdStart(long v) { this.producerIdStart = v; return this; } public AllocateProducerIdsResponseData setProducerIdLen(int v) { this.producerIdLen = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AllocateProducerIdsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AllocateProducerIdsResponseData.*; public class AllocateProducerIdsResponseDataJsonConverter { public static AllocateProducerIdsResponseData read(JsonNode _node, short _version) { AllocateProducerIdsResponseData _object = new AllocateProducerIdsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("AllocateProducerIdsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "AllocateProducerIdsResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("AllocateProducerIdsResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "AllocateProducerIdsResponseData"); } JsonNode _producerIdStartNode = _node.get("producerIdStart"); if (_producerIdStartNode == null) { throw new RuntimeException("AllocateProducerIdsResponseData: unable to locate field 'producerIdStart', which is mandatory in version " + _version); } else { _object.producerIdStart = MessageUtil.jsonNodeToLong(_producerIdStartNode, "AllocateProducerIdsResponseData"); } JsonNode _producerIdLenNode = _node.get("producerIdLen"); if (_producerIdLenNode == null) { throw new RuntimeException("AllocateProducerIdsResponseData: unable to locate field 'producerIdLen', which is mandatory in version " + _version); } else { _object.producerIdLen = MessageUtil.jsonNodeToInt(_producerIdLenNode, "AllocateProducerIdsResponseData"); } return _object; } public static JsonNode write(AllocateProducerIdsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("producerIdStart", new LongNode(_object.producerIdStart)); _node.set("producerIdLen", new IntNode(_object.producerIdLen)); return _node; } public static JsonNode write(AllocateProducerIdsResponseData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterClientQuotasRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterClientQuotasRequestData implements ApiMessage { List<EntryData> entries; boolean validateOnly; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("entries", new ArrayOf(EntryData.SCHEMA_0), "The quota configuration entries to alter."), new Field("validate_only", Type.BOOLEAN, "Whether the alteration should be validated, but not performed.") ); public static final Schema SCHEMA_1 = new Schema( new Field("entries", new CompactArrayOf(EntryData.SCHEMA_1), "The quota configuration entries to alter."), new Field("validate_only", Type.BOOLEAN, "Whether the alteration should be validated, but not performed."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public AlterClientQuotasRequestData(Readable _readable, short _version) { read(_readable, _version); } public AlterClientQuotasRequestData() { this.entries = new ArrayList<EntryData>(0); this.validateOnly = false; } @Override public short apiKey() { return 49; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { { if (_version >= 1) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field entries was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EntryData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EntryData(_readable, _version)); } this.entries = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field entries was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EntryData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EntryData(_readable, _version)); } this.entries = newCollection; } } } this.validateOnly = _readable.readByte() != 0; this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _writable.writeUnsignedVarint(entries.size() + 1); for (EntryData entriesElement : entries) { entriesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(entries.size()); for (EntryData entriesElement : entries) { entriesElement.write(_writable, _cache, _version); } } _writable.writeByte(validateOnly ? (byte) 1 : (byte) 0); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(entries.size() + 1)); } else { _size.addBytes(4); } for (EntryData entriesElement : entries) { entriesElement.addSize(_size, _cache, _version); } } _size.addBytes(1); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterClientQuotasRequestData)) return false; AlterClientQuotasRequestData other = (AlterClientQuotasRequestData) obj; if (this.entries == null) { if (other.entries != null) return false; } else { if (!this.entries.equals(other.entries)) return false; } if (validateOnly != other.validateOnly) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (entries == null ? 0 : entries.hashCode()); hashCode = 31 * hashCode + (validateOnly ? 1231 : 1237); return hashCode; } @Override public AlterClientQuotasRequestData duplicate() { AlterClientQuotasRequestData _duplicate = new AlterClientQuotasRequestData(); ArrayList<EntryData> newEntries = new ArrayList<EntryData>(entries.size()); for (EntryData _element : entries) { newEntries.add(_element.duplicate()); } _duplicate.entries = newEntries; _duplicate.validateOnly = validateOnly; return _duplicate; } @Override public String toString() { return "AlterClientQuotasRequestData(" + "entries=" + MessageUtil.deepToString(entries.iterator()) + ", validateOnly=" + (validateOnly ? "true" : "false") + ")"; } public List<EntryData> entries() { return this.entries; } public boolean validateOnly() { return this.validateOnly; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterClientQuotasRequestData setEntries(List<EntryData> v) { this.entries = v; return this; } public AlterClientQuotasRequestData setValidateOnly(boolean v) { this.validateOnly = v; return this; } public static class EntryData implements Message { List<EntityData> entity; List<OpData> ops; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("entity", new ArrayOf(EntityData.SCHEMA_0), "The quota entity to alter."), new Field("ops", new ArrayOf(OpData.SCHEMA_0), "An individual quota configuration entry to alter.") ); public static final Schema SCHEMA_1 = new Schema( new Field("entity", new CompactArrayOf(EntityData.SCHEMA_1), "The quota entity to alter."), new Field("ops", new CompactArrayOf(OpData.SCHEMA_1), "An individual quota configuration entry to alter."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public EntryData(Readable _readable, short _version) { read(_readable, _version); } public EntryData() { this.entity = new ArrayList<EntityData>(0); this.ops = new ArrayList<OpData>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of EntryData"); } { if (_version >= 1) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field entity was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EntityData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EntityData(_readable, _version)); } this.entity = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field entity was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EntityData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EntityData(_readable, _version)); } this.entity = newCollection; } } } { if (_version >= 1) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field ops was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OpData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OpData(_readable, _version)); } this.ops = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field ops was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<OpData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new OpData(_readable, _version)); } this.ops = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 1) { _writable.writeUnsignedVarint(entity.size() + 1); for (EntityData entityElement : entity) { entityElement.write(_writable, _cache, _version); } } else { _writable.writeInt(entity.size()); for (EntityData entityElement : entity) { entityElement.write(_writable, _cache, _version); } } if (_version >= 1) { _writable.writeUnsignedVarint(ops.size() + 1); for (OpData opsElement : ops) { opsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(ops.size()); for (OpData opsElement : ops) { opsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of EntryData"); } { if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(entity.size() + 1)); } else { _size.addBytes(4); } for (EntityData entityElement : entity) { entityElement.addSize(_size, _cache, _version); } } { if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(ops.size() + 1)); } else { _size.addBytes(4); } for (OpData opsElement : ops) { opsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof EntryData)) return false; EntryData other = (EntryData) obj; if (this.entity == null) { if (other.entity != null) return false; } else { if (!this.entity.equals(other.entity)) return false; } if (this.ops == null) { if (other.ops != null) return false; } else { if (!this.ops.equals(other.ops)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (entity == null ? 0 : entity.hashCode()); hashCode = 31 * hashCode + (ops == null ? 0 : ops.hashCode()); return hashCode; } @Override public EntryData duplicate() { EntryData _duplicate = new EntryData(); ArrayList<EntityData> newEntity = new ArrayList<EntityData>(entity.size()); for (EntityData _element : entity) { newEntity.add(_element.duplicate()); } _duplicate.entity = newEntity; ArrayList<OpData> newOps = new ArrayList<OpData>(ops.size()); for (OpData _element : ops) { newOps.add(_element.duplicate()); } _duplicate.ops = newOps; return _duplicate; } @Override public String toString() { return "EntryData(" + "entity=" + MessageUtil.deepToString(entity.iterator()) + ", ops=" + MessageUtil.deepToString(ops.iterator()) + ")"; } public List<EntityData> entity() { return this.entity; } public List<OpData> ops() { return this.ops; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EntryData setEntity(List<EntityData> v) { this.entity = v; return this; } public EntryData setOps(List<OpData> v) { this.ops = v; return this; } } public static class EntityData implements Message { String entityType; String entityName; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("entity_type", Type.STRING, "The entity type."), new Field("entity_name", Type.NULLABLE_STRING, "The name of the entity, or null if the default.") ); public static final Schema SCHEMA_1 = new Schema( new Field("entity_type", Type.COMPACT_STRING, "The entity type."), new Field("entity_name", Type.COMPACT_NULLABLE_STRING, "The name of the entity, or null if the default."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public EntityData(Readable _readable, short _version) { read(_readable, _version); } public EntityData() { this.entityType = ""; this.entityName = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of EntityData"); } { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field entityType was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field entityType had invalid length " + length); } else { this.entityType = _readable.readString(length); } } { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.entityName = null; } else if (length > 0x7fff) { throw new RuntimeException("string field entityName had invalid length " + length); } else { this.entityName = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(entityType); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (entityName == null) { if (_version >= 1) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(entityName); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of EntityData"); } { byte[] _stringBytes = entityType.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'entityType' field is too long to be serialized"); } _cache.cacheSerializedValue(entityType, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (entityName == null) { if (_version >= 1) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = entityName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'entityName' field is too long to be serialized"); } _cache.cacheSerializedValue(entityName, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof EntityData)) return false; EntityData other = (EntityData) obj; if (this.entityType == null) { if (other.entityType != null) return false; } else { if (!this.entityType.equals(other.entityType)) return false; } if (this.entityName == null) { if (other.entityName != null) return false; } else { if (!this.entityName.equals(other.entityName)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (entityType == null ? 0 : entityType.hashCode()); hashCode = 31 * hashCode + (entityName == null ? 0 : entityName.hashCode()); return hashCode; } @Override public EntityData duplicate() { EntityData _duplicate = new EntityData(); _duplicate.entityType = entityType; if (entityName == null) { _duplicate.entityName = null; } else { _duplicate.entityName = entityName; } return _duplicate; } @Override public String toString() { return "EntityData(" + "entityType=" + ((entityType == null) ? "null" : "'" + entityType.toString() + "'") + ", entityName=" + ((entityName == null) ? "null" : "'" + entityName.toString() + "'") + ")"; } public String entityType() { return this.entityType; } public String entityName() { return this.entityName; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EntityData setEntityType(String v) { this.entityType = v; return this; } public EntityData setEntityName(String v) { this.entityName = v; return this; } } public static class OpData implements Message { String key; double value; boolean remove; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("key", Type.STRING, "The quota configuration key."), new Field("value", Type.FLOAT64, "The value to set, otherwise ignored if the value is to be removed."), new Field("remove", Type.BOOLEAN, "Whether the quota configuration value should be removed, otherwise set.") ); public static final Schema SCHEMA_1 = new Schema( new Field("key", Type.COMPACT_STRING, "The quota configuration key."), new Field("value", Type.FLOAT64, "The value to set, otherwise ignored if the value is to be removed."), new Field("remove", Type.BOOLEAN, "Whether the quota configuration value should be removed, otherwise set."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public OpData(Readable _readable, short _version) { read(_readable, _version); } public OpData() { this.key = ""; this.value = 0.0; this.remove = false; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of OpData"); } { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field key was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field key had invalid length " + length); } else { this.key = _readable.readString(length); } } this.value = _readable.readDouble(); this.remove = _readable.readByte() != 0; this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(key); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeDouble(value); _writable.writeByte(remove ? (byte) 1 : (byte) 0); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of OpData"); } { byte[] _stringBytes = key.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'key' field is too long to be serialized"); } _cache.cacheSerializedValue(key, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(8); _size.addBytes(1); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof OpData)) return false; OpData other = (OpData) obj; if (this.key == null) { if (other.key != null) return false; } else { if (!this.key.equals(other.key)) return false; } if (value != other.value) return false; if (remove != other.remove) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (key == null ? 0 : key.hashCode()); hashCode = 31 * hashCode + Double.hashCode(value); hashCode = 31 * hashCode + (remove ? 1231 : 1237); return hashCode; } @Override public OpData duplicate() { OpData _duplicate = new OpData(); _duplicate.key = key; _duplicate.value = value; _duplicate.remove = remove; return _duplicate; } @Override public String toString() { return "OpData(" + "key=" + ((key == null) ? "null" : "'" + key.toString() + "'") + ", value=" + value + ", remove=" + (remove ? "true" : "false") + ")"; } public String key() { return this.key; } public double value() { return this.value; } public boolean remove() { return this.remove; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public OpData setKey(String v) { this.key = v; return this; } public OpData setValue(double v) { this.value = v; return this; } public OpData setRemove(boolean v) { this.remove = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterClientQuotasRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.DoubleNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterClientQuotasRequestData.*; public class AlterClientQuotasRequestDataJsonConverter { public static AlterClientQuotasRequestData read(JsonNode _node, short _version) { AlterClientQuotasRequestData _object = new AlterClientQuotasRequestData(); JsonNode _entriesNode = _node.get("entries"); if (_entriesNode == null) { throw new RuntimeException("AlterClientQuotasRequestData: unable to locate field 'entries', which is mandatory in version " + _version); } else { if (!_entriesNode.isArray()) { throw new RuntimeException("AlterClientQuotasRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<EntryData> _collection = new ArrayList<EntryData>(_entriesNode.size()); _object.entries = _collection; for (JsonNode _element : _entriesNode) { _collection.add(EntryDataJsonConverter.read(_element, _version)); } } JsonNode _validateOnlyNode = _node.get("validateOnly"); if (_validateOnlyNode == null) { throw new RuntimeException("AlterClientQuotasRequestData: unable to locate field 'validateOnly', which is mandatory in version " + _version); } else { if (!_validateOnlyNode.isBoolean()) { throw new RuntimeException("AlterClientQuotasRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.validateOnly = _validateOnlyNode.asBoolean(); } return _object; } public static JsonNode write(AlterClientQuotasRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); ArrayNode _entriesArray = new ArrayNode(JsonNodeFactory.instance); for (EntryData _element : _object.entries) { _entriesArray.add(EntryDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("entries", _entriesArray); _node.set("validateOnly", BooleanNode.valueOf(_object.validateOnly)); return _node; } public static JsonNode write(AlterClientQuotasRequestData _object, short _version) { return write(_object, _version, true); } public static class EntityDataJsonConverter { public static EntityData read(JsonNode _node, short _version) { EntityData _object = new EntityData(); JsonNode _entityTypeNode = _node.get("entityType"); if (_entityTypeNode == null) { throw new RuntimeException("EntityData: unable to locate field 'entityType', which is mandatory in version " + _version); } else { if (!_entityTypeNode.isTextual()) { throw new RuntimeException("EntityData expected a string type, but got " + _node.getNodeType()); } _object.entityType = _entityTypeNode.asText(); } JsonNode _entityNameNode = _node.get("entityName"); if (_entityNameNode == null) { throw new RuntimeException("EntityData: unable to locate field 'entityName', which is mandatory in version " + _version); } else { if (_entityNameNode.isNull()) { _object.entityName = null; } else { if (!_entityNameNode.isTextual()) { throw new RuntimeException("EntityData expected a string type, but got " + _node.getNodeType()); } _object.entityName = _entityNameNode.asText(); } } return _object; } public static JsonNode write(EntityData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("entityType", new TextNode(_object.entityType)); if (_object.entityName == null) { _node.set("entityName", NullNode.instance); } else { _node.set("entityName", new TextNode(_object.entityName)); } return _node; } public static JsonNode write(EntityData _object, short _version) { return write(_object, _version, true); } } public static class EntryDataJsonConverter { public static EntryData read(JsonNode _node, short _version) { EntryData _object = new EntryData(); JsonNode _entityNode = _node.get("entity"); if (_entityNode == null) { throw new RuntimeException("EntryData: unable to locate field 'entity', which is mandatory in version " + _version); } else { if (!_entityNode.isArray()) { throw new RuntimeException("EntryData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<EntityData> _collection = new ArrayList<EntityData>(_entityNode.size()); _object.entity = _collection; for (JsonNode _element : _entityNode) { _collection.add(EntityDataJsonConverter.read(_element, _version)); } } JsonNode _opsNode = _node.get("ops"); if (_opsNode == null) { throw new RuntimeException("EntryData: unable to locate field 'ops', which is mandatory in version " + _version); } else { if (!_opsNode.isArray()) { throw new RuntimeException("EntryData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<OpData> _collection = new ArrayList<OpData>(_opsNode.size()); _object.ops = _collection; for (JsonNode _element : _opsNode) { _collection.add(OpDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(EntryData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); ArrayNode _entityArray = new ArrayNode(JsonNodeFactory.instance); for (EntityData _element : _object.entity) { _entityArray.add(EntityDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("entity", _entityArray); ArrayNode _opsArray = new ArrayNode(JsonNodeFactory.instance); for (OpData _element : _object.ops) { _opsArray.add(OpDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("ops", _opsArray); return _node; } public static JsonNode write(EntryData _object, short _version) { return write(_object, _version, true); } } public static class OpDataJsonConverter { public static OpData read(JsonNode _node, short _version) { OpData _object = new OpData(); JsonNode _keyNode = _node.get("key"); if (_keyNode == null) { throw new RuntimeException("OpData: unable to locate field 'key', which is mandatory in version " + _version); } else { if (!_keyNode.isTextual()) { throw new RuntimeException("OpData expected a string type, but got " + _node.getNodeType()); } _object.key = _keyNode.asText(); } JsonNode _valueNode = _node.get("value"); if (_valueNode == null) { throw new RuntimeException("OpData: unable to locate field 'value', which is mandatory in version " + _version); } else { _object.value = MessageUtil.jsonNodeToDouble(_valueNode, "OpData"); } JsonNode _removeNode = _node.get("remove"); if (_removeNode == null) { throw new RuntimeException("OpData: unable to locate field 'remove', which is mandatory in version " + _version); } else { if (!_removeNode.isBoolean()) { throw new RuntimeException("OpData expected Boolean type, but got " + _node.getNodeType()); } _object.remove = _removeNode.asBoolean(); } return _object; } public static JsonNode write(OpData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("key", new TextNode(_object.key)); _node.set("value", new DoubleNode(_object.value)); _node.set("remove", BooleanNode.valueOf(_object.remove)); return _node; } public static JsonNode write(OpData _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterClientQuotasResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterClientQuotasResponseData implements ApiMessage { int throttleTimeMs; List<EntryData> entries; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("entries", new ArrayOf(EntryData.SCHEMA_0), "The quota configuration entries to alter.") ); public static final Schema SCHEMA_1 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("entries", new CompactArrayOf(EntryData.SCHEMA_1), "The quota configuration entries to alter."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public AlterClientQuotasResponseData(Readable _readable, short _version) { read(_readable, _version); } public AlterClientQuotasResponseData() { this.throttleTimeMs = 0; this.entries = new ArrayList<EntryData>(0); } @Override public short apiKey() { return 49; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); { if (_version >= 1) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field entries was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EntryData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EntryData(_readable, _version)); } this.entries = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field entries was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EntryData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EntryData(_readable, _version)); } this.entries = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); if (_version >= 1) { _writable.writeUnsignedVarint(entries.size() + 1); for (EntryData entriesElement : entries) { entriesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(entries.size()); for (EntryData entriesElement : entries) { entriesElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); { if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(entries.size() + 1)); } else { _size.addBytes(4); } for (EntryData entriesElement : entries) { entriesElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterClientQuotasResponseData)) return false; AlterClientQuotasResponseData other = (AlterClientQuotasResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.entries == null) { if (other.entries != null) return false; } else { if (!this.entries.equals(other.entries)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (entries == null ? 0 : entries.hashCode()); return hashCode; } @Override public AlterClientQuotasResponseData duplicate() { AlterClientQuotasResponseData _duplicate = new AlterClientQuotasResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; ArrayList<EntryData> newEntries = new ArrayList<EntryData>(entries.size()); for (EntryData _element : entries) { newEntries.add(_element.duplicate()); } _duplicate.entries = newEntries; return _duplicate; } @Override public String toString() { return "AlterClientQuotasResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", entries=" + MessageUtil.deepToString(entries.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public List<EntryData> entries() { return this.entries; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterClientQuotasResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public AlterClientQuotasResponseData setEntries(List<EntryData> v) { this.entries = v; return this; } public static class EntryData implements Message { short errorCode; String errorMessage; List<EntityData> entity; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The error code, or `0` if the quota alteration succeeded."), new Field("error_message", Type.NULLABLE_STRING, "The error message, or `null` if the quota alteration succeeded."), new Field("entity", new ArrayOf(EntityData.SCHEMA_0), "The quota entity to alter.") ); public static final Schema SCHEMA_1 = new Schema( new Field("error_code", Type.INT16, "The error code, or `0` if the quota alteration succeeded."), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The error message, or `null` if the quota alteration succeeded."), new Field("entity", new CompactArrayOf(EntityData.SCHEMA_1), "The quota entity to alter."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public EntryData(Readable _readable, short _version) { read(_readable, _version); } public EntryData() { this.errorCode = (short) 0; this.errorMessage = ""; this.entity = new ArrayList<EntityData>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of EntryData"); } this.errorCode = _readable.readShort(); { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } { if (_version >= 1) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field entity was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EntityData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EntityData(_readable, _version)); } this.entity = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field entity was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<EntityData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new EntityData(_readable, _version)); } this.entity = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); if (errorMessage == null) { if (_version >= 1) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 1) { _writable.writeUnsignedVarint(entity.size() + 1); for (EntityData entityElement : entity) { entityElement.write(_writable, _cache, _version); } } else { _writable.writeInt(entity.size()); for (EntityData entityElement : entity) { entityElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of EntryData"); } _size.addBytes(2); if (errorMessage == null) { if (_version >= 1) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(entity.size() + 1)); } else { _size.addBytes(4); } for (EntityData entityElement : entity) { entityElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof EntryData)) return false; EntryData other = (EntryData) obj; if (errorCode != other.errorCode) return false; if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } if (this.entity == null) { if (other.entity != null) return false; } else { if (!this.entity.equals(other.entity)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); hashCode = 31 * hashCode + (entity == null ? 0 : entity.hashCode()); return hashCode; } @Override public EntryData duplicate() { EntryData _duplicate = new EntryData(); _duplicate.errorCode = errorCode; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } ArrayList<EntityData> newEntity = new ArrayList<EntityData>(entity.size()); for (EntityData _element : entity) { newEntity.add(_element.duplicate()); } _duplicate.entity = newEntity; return _duplicate; } @Override public String toString() { return "EntryData(" + "errorCode=" + errorCode + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ", entity=" + MessageUtil.deepToString(entity.iterator()) + ")"; } public short errorCode() { return this.errorCode; } public String errorMessage() { return this.errorMessage; } public List<EntityData> entity() { return this.entity; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EntryData setErrorCode(short v) { this.errorCode = v; return this; } public EntryData setErrorMessage(String v) { this.errorMessage = v; return this; } public EntryData setEntity(List<EntityData> v) { this.entity = v; return this; } } public static class EntityData implements Message { String entityType; String entityName; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("entity_type", Type.STRING, "The entity type."), new Field("entity_name", Type.NULLABLE_STRING, "The name of the entity, or null if the default.") ); public static final Schema SCHEMA_1 = new Schema( new Field("entity_type", Type.COMPACT_STRING, "The entity type."), new Field("entity_name", Type.COMPACT_NULLABLE_STRING, "The name of the entity, or null if the default."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public EntityData(Readable _readable, short _version) { read(_readable, _version); } public EntityData() { this.entityType = ""; this.entityName = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of EntityData"); } { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field entityType was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field entityType had invalid length " + length); } else { this.entityType = _readable.readString(length); } } { int length; if (_version >= 1) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.entityName = null; } else if (length > 0x7fff) { throw new RuntimeException("string field entityName had invalid length " + length); } else { this.entityName = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 1) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(entityType); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (entityName == null) { if (_version >= 1) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(entityName); if (_version >= 1) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 1) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of EntityData"); } { byte[] _stringBytes = entityType.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'entityType' field is too long to be serialized"); } _cache.cacheSerializedValue(entityType, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (entityName == null) { if (_version >= 1) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = entityName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'entityName' field is too long to be serialized"); } _cache.cacheSerializedValue(entityName, _stringBytes); if (_version >= 1) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 1) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof EntityData)) return false; EntityData other = (EntityData) obj; if (this.entityType == null) { if (other.entityType != null) return false; } else { if (!this.entityType.equals(other.entityType)) return false; } if (this.entityName == null) { if (other.entityName != null) return false; } else { if (!this.entityName.equals(other.entityName)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (entityType == null ? 0 : entityType.hashCode()); hashCode = 31 * hashCode + (entityName == null ? 0 : entityName.hashCode()); return hashCode; } @Override public EntityData duplicate() { EntityData _duplicate = new EntityData(); _duplicate.entityType = entityType; if (entityName == null) { _duplicate.entityName = null; } else { _duplicate.entityName = entityName; } return _duplicate; } @Override public String toString() { return "EntityData(" + "entityType=" + ((entityType == null) ? "null" : "'" + entityType.toString() + "'") + ", entityName=" + ((entityName == null) ? "null" : "'" + entityName.toString() + "'") + ")"; } public String entityType() { return this.entityType; } public String entityName() { return this.entityName; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public EntityData setEntityType(String v) { this.entityType = v; return this; } public EntityData setEntityName(String v) { this.entityName = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterClientQuotasResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterClientQuotasResponseData.*; public class AlterClientQuotasResponseDataJsonConverter { public static AlterClientQuotasResponseData read(JsonNode _node, short _version) { AlterClientQuotasResponseData _object = new AlterClientQuotasResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("AlterClientQuotasResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "AlterClientQuotasResponseData"); } JsonNode _entriesNode = _node.get("entries"); if (_entriesNode == null) { throw new RuntimeException("AlterClientQuotasResponseData: unable to locate field 'entries', which is mandatory in version " + _version); } else { if (!_entriesNode.isArray()) { throw new RuntimeException("AlterClientQuotasResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<EntryData> _collection = new ArrayList<EntryData>(_entriesNode.size()); _object.entries = _collection; for (JsonNode _element : _entriesNode) { _collection.add(EntryDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterClientQuotasResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); ArrayNode _entriesArray = new ArrayNode(JsonNodeFactory.instance); for (EntryData _element : _object.entries) { _entriesArray.add(EntryDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("entries", _entriesArray); return _node; } public static JsonNode write(AlterClientQuotasResponseData _object, short _version) { return write(_object, _version, true); } public static class EntityDataJsonConverter { public static EntityData read(JsonNode _node, short _version) { EntityData _object = new EntityData(); JsonNode _entityTypeNode = _node.get("entityType"); if (_entityTypeNode == null) { throw new RuntimeException("EntityData: unable to locate field 'entityType', which is mandatory in version " + _version); } else { if (!_entityTypeNode.isTextual()) { throw new RuntimeException("EntityData expected a string type, but got " + _node.getNodeType()); } _object.entityType = _entityTypeNode.asText(); } JsonNode _entityNameNode = _node.get("entityName"); if (_entityNameNode == null) { throw new RuntimeException("EntityData: unable to locate field 'entityName', which is mandatory in version " + _version); } else { if (_entityNameNode.isNull()) { _object.entityName = null; } else { if (!_entityNameNode.isTextual()) { throw new RuntimeException("EntityData expected a string type, but got " + _node.getNodeType()); } _object.entityName = _entityNameNode.asText(); } } return _object; } public static JsonNode write(EntityData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("entityType", new TextNode(_object.entityType)); if (_object.entityName == null) { _node.set("entityName", NullNode.instance); } else { _node.set("entityName", new TextNode(_object.entityName)); } return _node; } public static JsonNode write(EntityData _object, short _version) { return write(_object, _version, true); } } public static class EntryDataJsonConverter { public static EntryData read(JsonNode _node, short _version) { EntryData _object = new EntryData(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("EntryData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "EntryData"); } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { throw new RuntimeException("EntryData: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("EntryData expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } JsonNode _entityNode = _node.get("entity"); if (_entityNode == null) { throw new RuntimeException("EntryData: unable to locate field 'entity', which is mandatory in version " + _version); } else { if (!_entityNode.isArray()) { throw new RuntimeException("EntryData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<EntityData> _collection = new ArrayList<EntityData>(_entityNode.size()); _object.entity = _collection; for (JsonNode _element : _entityNode) { _collection.add(EntityDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(EntryData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } ArrayNode _entityArray = new ArrayNode(JsonNodeFactory.instance); for (EntityData _element : _object.entity) { _entityArray.add(EntityDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("entity", _entityArray); return _node; } public static JsonNode write(EntryData _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterConfigsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterConfigsRequestData implements ApiMessage { AlterConfigsResourceCollection resources; boolean validateOnly; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("resources", new ArrayOf(AlterConfigsResource.SCHEMA_0), "The updates for each resource."), new Field("validate_only", Type.BOOLEAN, "True if we should validate the request, but not change the configurations.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("resources", new CompactArrayOf(AlterConfigsResource.SCHEMA_2), "The updates for each resource."), new Field("validate_only", Type.BOOLEAN, "True if we should validate the request, but not change the configurations."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterConfigsRequestData(Readable _readable, short _version) { read(_readable, _version); } public AlterConfigsRequestData() { this.resources = new AlterConfigsResourceCollection(0); this.validateOnly = false; } @Override public short apiKey() { return 33; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { { if (_version >= 2) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field resources was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterConfigsResourceCollection newCollection = new AlterConfigsResourceCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterConfigsResource(_readable, _version)); } this.resources = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field resources was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterConfigsResourceCollection newCollection = new AlterConfigsResourceCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterConfigsResource(_readable, _version)); } this.resources = newCollection; } } } this.validateOnly = _readable.readByte() != 0; this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _writable.writeUnsignedVarint(resources.size() + 1); for (AlterConfigsResource resourcesElement : resources) { resourcesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(resources.size()); for (AlterConfigsResource resourcesElement : resources) { resourcesElement.write(_writable, _cache, _version); } } _writable.writeByte(validateOnly ? (byte) 1 : (byte) 0); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(resources.size() + 1)); } else { _size.addBytes(4); } for (AlterConfigsResource resourcesElement : resources) { resourcesElement.addSize(_size, _cache, _version); } } _size.addBytes(1); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterConfigsRequestData)) return false; AlterConfigsRequestData other = (AlterConfigsRequestData) obj; if (this.resources == null) { if (other.resources != null) return false; } else { if (!this.resources.equals(other.resources)) return false; } if (validateOnly != other.validateOnly) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (resources == null ? 0 : resources.hashCode()); hashCode = 31 * hashCode + (validateOnly ? 1231 : 1237); return hashCode; } @Override public AlterConfigsRequestData duplicate() { AlterConfigsRequestData _duplicate = new AlterConfigsRequestData(); AlterConfigsResourceCollection newResources = new AlterConfigsResourceCollection(resources.size()); for (AlterConfigsResource _element : resources) { newResources.add(_element.duplicate()); } _duplicate.resources = newResources; _duplicate.validateOnly = validateOnly; return _duplicate; } @Override public String toString() { return "AlterConfigsRequestData(" + "resources=" + MessageUtil.deepToString(resources.iterator()) + ", validateOnly=" + (validateOnly ? "true" : "false") + ")"; } public AlterConfigsResourceCollection resources() { return this.resources; } public boolean validateOnly() { return this.validateOnly; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterConfigsRequestData setResources(AlterConfigsResourceCollection v) { this.resources = v; return this; } public AlterConfigsRequestData setValidateOnly(boolean v) { this.validateOnly = v; return this; } public static class AlterConfigsResource implements Message, ImplicitLinkedHashMultiCollection.Element { byte resourceType; String resourceName; AlterableConfigCollection configs; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("resource_type", Type.INT8, "The resource type."), new Field("resource_name", Type.STRING, "The resource name."), new Field("configs", new ArrayOf(AlterableConfig.SCHEMA_0), "The configurations.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("resource_type", Type.INT8, "The resource type."), new Field("resource_name", Type.COMPACT_STRING, "The resource name."), new Field("configs", new CompactArrayOf(AlterableConfig.SCHEMA_2), "The configurations."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterConfigsResource(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AlterConfigsResource() { this.resourceType = (byte) 0; this.resourceName = ""; this.configs = new AlterableConfigCollection(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { if (_version > 2) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResource"); } this.resourceType = _readable.readByte(); { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field resourceName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field resourceName had invalid length " + length); } else { this.resourceName = _readable.readString(length); } } { if (_version >= 2) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field configs was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterableConfigCollection newCollection = new AlterableConfigCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterableConfig(_readable, _version)); } this.configs = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field configs was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterableConfigCollection newCollection = new AlterableConfigCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterableConfig(_readable, _version)); } this.configs = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeByte(resourceType); { byte[] _stringBytes = _cache.getSerializedValue(resourceName); if (_version >= 2) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 2) { _writable.writeUnsignedVarint(configs.size() + 1); for (AlterableConfig configsElement : configs) { configsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(configs.size()); for (AlterableConfig configsElement : configs) { configsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 2) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterConfigsResource"); } _size.addBytes(1); { byte[] _stringBytes = resourceName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'resourceName' field is too long to be serialized"); } _cache.cacheSerializedValue(resourceName, _stringBytes); if (_version >= 2) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(configs.size() + 1)); } else { _size.addBytes(4); } for (AlterableConfig configsElement : configs) { configsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AlterConfigsResource)) return false; AlterConfigsResource other = (AlterConfigsResource) obj; if (resourceType != other.resourceType) return false; if (this.resourceName == null) { if (other.resourceName != null) return false; } else { if (!this.resourceName.equals(other.resourceName)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterConfigsResource)) return false; AlterConfigsResource other = (AlterConfigsResource) obj; if (resourceType != other.resourceType) return false; if (this.resourceName == null) { if (other.resourceName != null) return false; } else { if (!this.resourceName.equals(other.resourceName)) return false; } if (this.configs == null) { if (other.configs != null) return false; } else { if (!this.configs.equals(other.configs)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + resourceType; hashCode = 31 * hashCode + (resourceName == null ? 0 : resourceName.hashCode()); return hashCode; } @Override public AlterConfigsResource duplicate() { AlterConfigsResource _duplicate = new AlterConfigsResource(); _duplicate.resourceType = resourceType; _duplicate.resourceName = resourceName; AlterableConfigCollection newConfigs = new AlterableConfigCollection(configs.size()); for (AlterableConfig _element : configs) { newConfigs.add(_element.duplicate()); } _duplicate.configs = newConfigs; return _duplicate; } @Override public String toString() { return "AlterConfigsResource(" + "resourceType=" + resourceType + ", resourceName=" + ((resourceName == null) ? "null" : "'" + resourceName.toString() + "'") + ", configs=" + MessageUtil.deepToString(configs.iterator()) + ")"; } public byte resourceType() { return this.resourceType; } public String resourceName() { return this.resourceName; } public AlterableConfigCollection configs() { return this.configs; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterConfigsResource setResourceType(byte v) { this.resourceType = v; return this; } public AlterConfigsResource setResourceName(String v) { this.resourceName = v; return this; } public AlterConfigsResource setConfigs(AlterableConfigCollection v) { this.configs = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AlterableConfig implements Message, ImplicitLinkedHashMultiCollection.Element { String name; String value; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The configuration key name."), new Field("value", Type.NULLABLE_STRING, "The value to set for the configuration key.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("name", Type.COMPACT_STRING, "The configuration key name."), new Field("value", Type.COMPACT_NULLABLE_STRING, "The value to set for the configuration key."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterableConfig(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AlterableConfig() { this.name = ""; this.value = ""; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { if (_version > 2) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterableConfig"); } { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.value = null; } else if (length > 0x7fff) { throw new RuntimeException("string field value had invalid length " + length); } else { this.value = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 2) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (value == null) { if (_version >= 2) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(value); if (_version >= 2) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 2) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterableConfig"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 2) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (value == null) { if (_version >= 2) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = value.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'value' field is too long to be serialized"); } _cache.cacheSerializedValue(value, _stringBytes); if (_version >= 2) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AlterableConfig)) return false; AlterableConfig other = (AlterableConfig) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterableConfig)) return false; AlterableConfig other = (AlterableConfig) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.value == null) { if (other.value != null) return false; } else { if (!this.value.equals(other.value)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public AlterableConfig duplicate() { AlterableConfig _duplicate = new AlterableConfig(); _duplicate.name = name; if (value == null) { _duplicate.value = null; } else { _duplicate.value = value; } return _duplicate; } @Override public String toString() { return "AlterableConfig(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", value=" + ((value == null) ? "null" : "'" + value.toString() + "'") + ")"; } public String name() { return this.name; } public String value() { return this.value; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterableConfig setName(String v) { this.name = v; return this; } public AlterableConfig setValue(String v) { this.value = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AlterableConfigCollection extends ImplicitLinkedHashMultiCollection<AlterableConfig> { public AlterableConfigCollection() { super(); } public AlterableConfigCollection(int expectedNumElements) { super(expectedNumElements); } public AlterableConfigCollection(Iterator<AlterableConfig> iterator) { super(iterator); } public AlterableConfig find(String name) { AlterableConfig _key = new AlterableConfig(); _key.setName(name); return find(_key); } public List<AlterableConfig> findAll(String name) { AlterableConfig _key = new AlterableConfig(); _key.setName(name); return findAll(_key); } public AlterableConfigCollection duplicate() { AlterableConfigCollection _duplicate = new AlterableConfigCollection(size()); for (AlterableConfig _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class AlterConfigsResourceCollection extends ImplicitLinkedHashMultiCollection<AlterConfigsResource> { public AlterConfigsResourceCollection() { super(); } public AlterConfigsResourceCollection(int expectedNumElements) { super(expectedNumElements); } public AlterConfigsResourceCollection(Iterator<AlterConfigsResource> iterator) { super(iterator); } public AlterConfigsResource find(byte resourceType, String resourceName) { AlterConfigsResource _key = new AlterConfigsResource(); _key.setResourceType(resourceType); _key.setResourceName(resourceName); return find(_key); } public List<AlterConfigsResource> findAll(byte resourceType, String resourceName) { AlterConfigsResource _key = new AlterConfigsResource(); _key.setResourceType(resourceType); _key.setResourceName(resourceName); return findAll(_key); } public AlterConfigsResourceCollection duplicate() { AlterConfigsResourceCollection _duplicate = new AlterConfigsResourceCollection(size()); for (AlterConfigsResource _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterConfigsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterConfigsRequestData.*; public class AlterConfigsRequestDataJsonConverter { public static AlterConfigsRequestData read(JsonNode _node, short _version) { AlterConfigsRequestData _object = new AlterConfigsRequestData(); JsonNode _resourcesNode = _node.get("resources"); if (_resourcesNode == null) { throw new RuntimeException("AlterConfigsRequestData: unable to locate field 'resources', which is mandatory in version " + _version); } else { if (!_resourcesNode.isArray()) { throw new RuntimeException("AlterConfigsRequestData expected a JSON array, but got " + _node.getNodeType()); } AlterConfigsResourceCollection _collection = new AlterConfigsResourceCollection(_resourcesNode.size()); _object.resources = _collection; for (JsonNode _element : _resourcesNode) { _collection.add(AlterConfigsResourceJsonConverter.read(_element, _version)); } } JsonNode _validateOnlyNode = _node.get("validateOnly"); if (_validateOnlyNode == null) { throw new RuntimeException("AlterConfigsRequestData: unable to locate field 'validateOnly', which is mandatory in version " + _version); } else { if (!_validateOnlyNode.isBoolean()) { throw new RuntimeException("AlterConfigsRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.validateOnly = _validateOnlyNode.asBoolean(); } return _object; } public static JsonNode write(AlterConfigsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); ArrayNode _resourcesArray = new ArrayNode(JsonNodeFactory.instance); for (AlterConfigsResource _element : _object.resources) { _resourcesArray.add(AlterConfigsResourceJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("resources", _resourcesArray); _node.set("validateOnly", BooleanNode.valueOf(_object.validateOnly)); return _node; } public static JsonNode write(AlterConfigsRequestData _object, short _version) { return write(_object, _version, true); } public static class AlterConfigsResourceJsonConverter { public static AlterConfigsResource read(JsonNode _node, short _version) { AlterConfigsResource _object = new AlterConfigsResource(); JsonNode _resourceTypeNode = _node.get("resourceType"); if (_resourceTypeNode == null) { throw new RuntimeException("AlterConfigsResource: unable to locate field 'resourceType', which is mandatory in version " + _version); } else { _object.resourceType = MessageUtil.jsonNodeToByte(_resourceTypeNode, "AlterConfigsResource"); } JsonNode _resourceNameNode = _node.get("resourceName"); if (_resourceNameNode == null) { throw new RuntimeException("AlterConfigsResource: unable to locate field 'resourceName', which is mandatory in version " + _version); } else { if (!_resourceNameNode.isTextual()) { throw new RuntimeException("AlterConfigsResource expected a string type, but got " + _node.getNodeType()); } _object.resourceName = _resourceNameNode.asText(); } JsonNode _configsNode = _node.get("configs"); if (_configsNode == null) { throw new RuntimeException("AlterConfigsResource: unable to locate field 'configs', which is mandatory in version " + _version); } else { if (!_configsNode.isArray()) { throw new RuntimeException("AlterConfigsResource expected a JSON array, but got " + _node.getNodeType()); } AlterableConfigCollection _collection = new AlterableConfigCollection(_configsNode.size()); _object.configs = _collection; for (JsonNode _element : _configsNode) { _collection.add(AlterableConfigJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterConfigsResource _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("resourceType", new ShortNode(_object.resourceType)); _node.set("resourceName", new TextNode(_object.resourceName)); ArrayNode _configsArray = new ArrayNode(JsonNodeFactory.instance); for (AlterableConfig _element : _object.configs) { _configsArray.add(AlterableConfigJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("configs", _configsArray); return _node; } public static JsonNode write(AlterConfigsResource _object, short _version) { return write(_object, _version, true); } } public static class AlterableConfigJsonConverter { public static AlterableConfig read(JsonNode _node, short _version) { AlterableConfig _object = new AlterableConfig(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("AlterableConfig: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("AlterableConfig expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _valueNode = _node.get("value"); if (_valueNode == null) { throw new RuntimeException("AlterableConfig: unable to locate field 'value', which is mandatory in version " + _version); } else { if (_valueNode.isNull()) { _object.value = null; } else { if (!_valueNode.isTextual()) { throw new RuntimeException("AlterableConfig expected a string type, but got " + _node.getNodeType()); } _object.value = _valueNode.asText(); } } return _object; } public static JsonNode write(AlterableConfig _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); if (_object.value == null) { _node.set("value", NullNode.instance); } else { _node.set("value", new TextNode(_object.value)); } return _node; } public static JsonNode write(AlterableConfig _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterConfigsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterConfigsResponseData implements ApiMessage { int throttleTimeMs; List<AlterConfigsResourceResponse> responses; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("responses", new ArrayOf(AlterConfigsResourceResponse.SCHEMA_0), "The responses for each resource.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("responses", new CompactArrayOf(AlterConfigsResourceResponse.SCHEMA_2), "The responses for each resource."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterConfigsResponseData(Readable _readable, short _version) { read(_readable, _version); } public AlterConfigsResponseData() { this.throttleTimeMs = 0; this.responses = new ArrayList<AlterConfigsResourceResponse>(0); } @Override public short apiKey() { return 33; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); { if (_version >= 2) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field responses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AlterConfigsResourceResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterConfigsResourceResponse(_readable, _version)); } this.responses = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field responses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AlterConfigsResourceResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterConfigsResourceResponse(_readable, _version)); } this.responses = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); if (_version >= 2) { _writable.writeUnsignedVarint(responses.size() + 1); for (AlterConfigsResourceResponse responsesElement : responses) { responsesElement.write(_writable, _cache, _version); } } else { _writable.writeInt(responses.size()); for (AlterConfigsResourceResponse responsesElement : responses) { responsesElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); { if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(responses.size() + 1)); } else { _size.addBytes(4); } for (AlterConfigsResourceResponse responsesElement : responses) { responsesElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterConfigsResponseData)) return false; AlterConfigsResponseData other = (AlterConfigsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.responses == null) { if (other.responses != null) return false; } else { if (!this.responses.equals(other.responses)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (responses == null ? 0 : responses.hashCode()); return hashCode; } @Override public AlterConfigsResponseData duplicate() { AlterConfigsResponseData _duplicate = new AlterConfigsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; ArrayList<AlterConfigsResourceResponse> newResponses = new ArrayList<AlterConfigsResourceResponse>(responses.size()); for (AlterConfigsResourceResponse _element : responses) { newResponses.add(_element.duplicate()); } _duplicate.responses = newResponses; return _duplicate; } @Override public String toString() { return "AlterConfigsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", responses=" + MessageUtil.deepToString(responses.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public List<AlterConfigsResourceResponse> responses() { return this.responses; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterConfigsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public AlterConfigsResponseData setResponses(List<AlterConfigsResourceResponse> v) { this.responses = v; return this; } public static class AlterConfigsResourceResponse implements Message { short errorCode; String errorMessage; byte resourceType; String resourceName; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The resource error code."), new Field("error_message", Type.NULLABLE_STRING, "The resource error message, or null if there was no error."), new Field("resource_type", Type.INT8, "The resource type."), new Field("resource_name", Type.STRING, "The resource name.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("error_code", Type.INT16, "The resource error code."), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The resource error message, or null if there was no error."), new Field("resource_type", Type.INT8, "The resource type."), new Field("resource_name", Type.COMPACT_STRING, "The resource name."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterConfigsResourceResponse(Readable _readable, short _version) { read(_readable, _version); } public AlterConfigsResourceResponse() { this.errorCode = (short) 0; this.errorMessage = ""; this.resourceType = (byte) 0; this.resourceName = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { if (_version > 2) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterConfigsResourceResponse"); } this.errorCode = _readable.readShort(); { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } this.resourceType = _readable.readByte(); { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field resourceName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field resourceName had invalid length " + length); } else { this.resourceName = _readable.readString(length); } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); if (errorMessage == null) { if (_version >= 2) { _writable.writeUnsignedVarint(0); } else { _writable.writeShort((short) -1); } } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); if (_version >= 2) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } _writable.writeByte(resourceType); { byte[] _stringBytes = _cache.getSerializedValue(resourceName); if (_version >= 2) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 2) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterConfigsResourceResponse"); } _size.addBytes(2); if (errorMessage == null) { if (_version >= 2) { _size.addBytes(1); } else { _size.addBytes(2); } } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); if (_version >= 2) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } _size.addBytes(1); { byte[] _stringBytes = resourceName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'resourceName' field is too long to be serialized"); } _cache.cacheSerializedValue(resourceName, _stringBytes); if (_version >= 2) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterConfigsResourceResponse)) return false; AlterConfigsResourceResponse other = (AlterConfigsResourceResponse) obj; if (errorCode != other.errorCode) return false; if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } if (resourceType != other.resourceType) return false; if (this.resourceName == null) { if (other.resourceName != null) return false; } else { if (!this.resourceName.equals(other.resourceName)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); hashCode = 31 * hashCode + resourceType; hashCode = 31 * hashCode + (resourceName == null ? 0 : resourceName.hashCode()); return hashCode; } @Override public AlterConfigsResourceResponse duplicate() { AlterConfigsResourceResponse _duplicate = new AlterConfigsResourceResponse(); _duplicate.errorCode = errorCode; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } _duplicate.resourceType = resourceType; _duplicate.resourceName = resourceName; return _duplicate; } @Override public String toString() { return "AlterConfigsResourceResponse(" + "errorCode=" + errorCode + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ", resourceType=" + resourceType + ", resourceName=" + ((resourceName == null) ? "null" : "'" + resourceName.toString() + "'") + ")"; } public short errorCode() { return this.errorCode; } public String errorMessage() { return this.errorMessage; } public byte resourceType() { return this.resourceType; } public String resourceName() { return this.resourceName; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterConfigsResourceResponse setErrorCode(short v) { this.errorCode = v; return this; } public AlterConfigsResourceResponse setErrorMessage(String v) { this.errorMessage = v; return this; } public AlterConfigsResourceResponse setResourceType(byte v) { this.resourceType = v; return this; } public AlterConfigsResourceResponse setResourceName(String v) { this.resourceName = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterConfigsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterConfigsResponseData.*; public class AlterConfigsResponseDataJsonConverter { public static AlterConfigsResponseData read(JsonNode _node, short _version) { AlterConfigsResponseData _object = new AlterConfigsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("AlterConfigsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "AlterConfigsResponseData"); } JsonNode _responsesNode = _node.get("responses"); if (_responsesNode == null) { throw new RuntimeException("AlterConfigsResponseData: unable to locate field 'responses', which is mandatory in version " + _version); } else { if (!_responsesNode.isArray()) { throw new RuntimeException("AlterConfigsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<AlterConfigsResourceResponse> _collection = new ArrayList<AlterConfigsResourceResponse>(_responsesNode.size()); _object.responses = _collection; for (JsonNode _element : _responsesNode) { _collection.add(AlterConfigsResourceResponseJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterConfigsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); ArrayNode _responsesArray = new ArrayNode(JsonNodeFactory.instance); for (AlterConfigsResourceResponse _element : _object.responses) { _responsesArray.add(AlterConfigsResourceResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("responses", _responsesArray); return _node; } public static JsonNode write(AlterConfigsResponseData _object, short _version) { return write(_object, _version, true); } public static class AlterConfigsResourceResponseJsonConverter { public static AlterConfigsResourceResponse read(JsonNode _node, short _version) { AlterConfigsResourceResponse _object = new AlterConfigsResourceResponse(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("AlterConfigsResourceResponse: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "AlterConfigsResourceResponse"); } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { throw new RuntimeException("AlterConfigsResourceResponse: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("AlterConfigsResourceResponse expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } JsonNode _resourceTypeNode = _node.get("resourceType"); if (_resourceTypeNode == null) { throw new RuntimeException("AlterConfigsResourceResponse: unable to locate field 'resourceType', which is mandatory in version " + _version); } else { _object.resourceType = MessageUtil.jsonNodeToByte(_resourceTypeNode, "AlterConfigsResourceResponse"); } JsonNode _resourceNameNode = _node.get("resourceName"); if (_resourceNameNode == null) { throw new RuntimeException("AlterConfigsResourceResponse: unable to locate field 'resourceName', which is mandatory in version " + _version); } else { if (!_resourceNameNode.isTextual()) { throw new RuntimeException("AlterConfigsResourceResponse expected a string type, but got " + _node.getNodeType()); } _object.resourceName = _resourceNameNode.asText(); } return _object; } public static JsonNode write(AlterConfigsResourceResponse _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } _node.set("resourceType", new ShortNode(_object.resourceType)); _node.set("resourceName", new TextNode(_object.resourceName)); return _node; } public static JsonNode write(AlterConfigsResourceResponse _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterPartitionReassignmentsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterPartitionReassignmentsRequestData implements ApiMessage { int timeoutMs; List<ReassignableTopic> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("timeout_ms", Type.INT32, "The time in ms to wait for the request to complete."), new Field("topics", new CompactArrayOf(ReassignableTopic.SCHEMA_0), "The topics to reassign."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public AlterPartitionReassignmentsRequestData(Readable _readable, short _version) { read(_readable, _version); } public AlterPartitionReassignmentsRequestData() { this.timeoutMs = 60000; this.topics = new ArrayList<ReassignableTopic>(0); } @Override public short apiKey() { return 45; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.timeoutMs = _readable.readInt(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ReassignableTopic> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ReassignableTopic(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(timeoutMs); _writable.writeUnsignedVarint(topics.size() + 1); for (ReassignableTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (ReassignableTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterPartitionReassignmentsRequestData)) return false; AlterPartitionReassignmentsRequestData other = (AlterPartitionReassignmentsRequestData) obj; if (timeoutMs != other.timeoutMs) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + timeoutMs; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public AlterPartitionReassignmentsRequestData duplicate() { AlterPartitionReassignmentsRequestData _duplicate = new AlterPartitionReassignmentsRequestData(); _duplicate.timeoutMs = timeoutMs; ArrayList<ReassignableTopic> newTopics = new ArrayList<ReassignableTopic>(topics.size()); for (ReassignableTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "AlterPartitionReassignmentsRequestData(" + "timeoutMs=" + timeoutMs + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int timeoutMs() { return this.timeoutMs; } public List<ReassignableTopic> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterPartitionReassignmentsRequestData setTimeoutMs(int v) { this.timeoutMs = v; return this; } public AlterPartitionReassignmentsRequestData setTopics(List<ReassignableTopic> v) { this.topics = v; return this; } public static class ReassignableTopic implements Message { String name; List<ReassignablePartition> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(ReassignablePartition.SCHEMA_0), "The partitions to reassign."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ReassignableTopic(Readable _readable, short _version) { read(_readable, _version); } public ReassignableTopic() { this.name = ""; this.partitions = new ArrayList<ReassignablePartition>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopic"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ReassignablePartition> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ReassignablePartition(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(partitions.size() + 1); for (ReassignablePartition partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignableTopic"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); for (ReassignablePartition partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ReassignableTopic)) return false; ReassignableTopic other = (ReassignableTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public ReassignableTopic duplicate() { ReassignableTopic _duplicate = new ReassignableTopic(); _duplicate.name = name; ArrayList<ReassignablePartition> newPartitions = new ArrayList<ReassignablePartition>(partitions.size()); for (ReassignablePartition _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "ReassignableTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<ReassignablePartition> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ReassignableTopic setName(String v) { this.name = v; return this; } public ReassignableTopic setPartitions(List<ReassignablePartition> v) { this.partitions = v; return this; } } public static class ReassignablePartition implements Message { int partitionIndex; List<Integer> replicas; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("replicas", CompactArrayOf.nullable(Type.INT32), "The replicas to place the partitions on, or null to cancel a pending reassignment for this partition."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ReassignablePartition(Readable _readable, short _version) { read(_readable, _version); } public ReassignablePartition() { this.partitionIndex = 0; this.replicas = null; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartition"); } this.partitionIndex = _readable.readInt(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { this.replicas = null; } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.replicas = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); if (replicas == null) { _writable.writeUnsignedVarint(0); } else { _writable.writeUnsignedVarint(replicas.size() + 1); for (Integer replicasElement : replicas) { _writable.writeInt(replicasElement); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignablePartition"); } _size.addBytes(4); if (replicas == null) { _size.addBytes(1); } else { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(replicas.size() + 1)); _size.addBytes(replicas.size() * 4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ReassignablePartition)) return false; ReassignablePartition other = (ReassignablePartition) obj; if (partitionIndex != other.partitionIndex) return false; if (this.replicas == null) { if (other.replicas != null) return false; } else { if (!this.replicas.equals(other.replicas)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + (replicas == null ? 0 : replicas.hashCode()); return hashCode; } @Override public ReassignablePartition duplicate() { ReassignablePartition _duplicate = new ReassignablePartition(); _duplicate.partitionIndex = partitionIndex; if (replicas == null) { _duplicate.replicas = null; } else { ArrayList<Integer> newReplicas = new ArrayList<Integer>(replicas.size()); for (Integer _element : replicas) { newReplicas.add(_element); } _duplicate.replicas = newReplicas; } return _duplicate; } @Override public String toString() { return "ReassignablePartition(" + "partitionIndex=" + partitionIndex + ", replicas=" + ((replicas == null) ? "null" : MessageUtil.deepToString(replicas.iterator())) + ")"; } public int partitionIndex() { return this.partitionIndex; } public List<Integer> replicas() { return this.replicas; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ReassignablePartition setPartitionIndex(int v) { this.partitionIndex = v; return this; } public ReassignablePartition setReplicas(List<Integer> v) { this.replicas = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterPartitionReassignmentsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.*; public class AlterPartitionReassignmentsRequestDataJsonConverter { public static AlterPartitionReassignmentsRequestData read(JsonNode _node, short _version) { AlterPartitionReassignmentsRequestData _object = new AlterPartitionReassignmentsRequestData(); JsonNode _timeoutMsNode = _node.get("timeoutMs"); if (_timeoutMsNode == null) { throw new RuntimeException("AlterPartitionReassignmentsRequestData: unable to locate field 'timeoutMs', which is mandatory in version " + _version); } else { _object.timeoutMs = MessageUtil.jsonNodeToInt(_timeoutMsNode, "AlterPartitionReassignmentsRequestData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("AlterPartitionReassignmentsRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("AlterPartitionReassignmentsRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ReassignableTopic> _collection = new ArrayList<ReassignableTopic>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(ReassignableTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterPartitionReassignmentsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("timeoutMs", new IntNode(_object.timeoutMs)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (ReassignableTopic _element : _object.topics) { _topicsArray.add(ReassignableTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(AlterPartitionReassignmentsRequestData _object, short _version) { return write(_object, _version, true); } public static class ReassignablePartitionJsonConverter { public static ReassignablePartition read(JsonNode _node, short _version) { ReassignablePartition _object = new ReassignablePartition(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("ReassignablePartition: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "ReassignablePartition"); } JsonNode _replicasNode = _node.get("replicas"); if (_replicasNode == null) { throw new RuntimeException("ReassignablePartition: unable to locate field 'replicas', which is mandatory in version " + _version); } else { if (_replicasNode.isNull()) { _object.replicas = null; } else { if (!_replicasNode.isArray()) { throw new RuntimeException("ReassignablePartition expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_replicasNode.size()); _object.replicas = _collection; for (JsonNode _element : _replicasNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "ReassignablePartition element")); } } } return _object; } public static JsonNode write(ReassignablePartition _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); if (_object.replicas == null) { _node.set("replicas", NullNode.instance); } else { ArrayNode _replicasArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.replicas) { _replicasArray.add(new IntNode(_element)); } _node.set("replicas", _replicasArray); } return _node; } public static JsonNode write(ReassignablePartition _object, short _version) { return write(_object, _version, true); } } public static class ReassignableTopicJsonConverter { public static ReassignableTopic read(JsonNode _node, short _version) { ReassignableTopic _object = new ReassignableTopic(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("ReassignableTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("ReassignableTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("ReassignableTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("ReassignableTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ReassignablePartition> _collection = new ArrayList<ReassignablePartition>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(ReassignablePartitionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ReassignableTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (ReassignablePartition _element : _object.partitions) { _partitionsArray.add(ReassignablePartitionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(ReassignableTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterPartitionReassignmentsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterPartitionReassignmentsResponseData implements ApiMessage { int throttleTimeMs; short errorCode; String errorMessage; List<ReassignableTopicResponse> responses; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top-level error code, or 0 if there was no error."), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The top-level error message, or null if there was no error."), new Field("responses", new CompactArrayOf(ReassignableTopicResponse.SCHEMA_0), "The responses to topics to reassign."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public AlterPartitionReassignmentsResponseData(Readable _readable, short _version) { read(_readable, _version); } public AlterPartitionReassignmentsResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.errorMessage = ""; this.responses = new ArrayList<ReassignableTopicResponse>(0); } @Override public short apiKey() { return 45; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field responses was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ReassignableTopicResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ReassignableTopicResponse(_readable, _version)); } this.responses = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); if (errorMessage == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(responses.size() + 1); for (ReassignableTopicResponse responsesElement : responses) { responsesElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); if (errorMessage == null) { _size.addBytes(1); } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(responses.size() + 1)); for (ReassignableTopicResponse responsesElement : responses) { responsesElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterPartitionReassignmentsResponseData)) return false; AlterPartitionReassignmentsResponseData other = (AlterPartitionReassignmentsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } if (this.responses == null) { if (other.responses != null) return false; } else { if (!this.responses.equals(other.responses)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); hashCode = 31 * hashCode + (responses == null ? 0 : responses.hashCode()); return hashCode; } @Override public AlterPartitionReassignmentsResponseData duplicate() { AlterPartitionReassignmentsResponseData _duplicate = new AlterPartitionReassignmentsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } ArrayList<ReassignableTopicResponse> newResponses = new ArrayList<ReassignableTopicResponse>(responses.size()); for (ReassignableTopicResponse _element : responses) { newResponses.add(_element.duplicate()); } _duplicate.responses = newResponses; return _duplicate; } @Override public String toString() { return "AlterPartitionReassignmentsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ", responses=" + MessageUtil.deepToString(responses.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public String errorMessage() { return this.errorMessage; } public List<ReassignableTopicResponse> responses() { return this.responses; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterPartitionReassignmentsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public AlterPartitionReassignmentsResponseData setErrorCode(short v) { this.errorCode = v; return this; } public AlterPartitionReassignmentsResponseData setErrorMessage(String v) { this.errorMessage = v; return this; } public AlterPartitionReassignmentsResponseData setResponses(List<ReassignableTopicResponse> v) { this.responses = v; return this; } public static class ReassignableTopicResponse implements Message { String name; List<ReassignablePartitionResponse> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name"), new Field("partitions", new CompactArrayOf(ReassignablePartitionResponse.SCHEMA_0), "The responses to partitions to reassign"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ReassignableTopicResponse(Readable _readable, short _version) { read(_readable, _version); } public ReassignableTopicResponse() { this.name = ""; this.partitions = new ArrayList<ReassignablePartitionResponse>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignableTopicResponse"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ReassignablePartitionResponse> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ReassignablePartitionResponse(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedVarint(partitions.size() + 1); for (ReassignablePartitionResponse partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignableTopicResponse"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); for (ReassignablePartitionResponse partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ReassignableTopicResponse)) return false; ReassignableTopicResponse other = (ReassignableTopicResponse) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public ReassignableTopicResponse duplicate() { ReassignableTopicResponse _duplicate = new ReassignableTopicResponse(); _duplicate.name = name; ArrayList<ReassignablePartitionResponse> newPartitions = new ArrayList<ReassignablePartitionResponse>(partitions.size()); for (ReassignablePartitionResponse _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "ReassignableTopicResponse(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<ReassignablePartitionResponse> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ReassignableTopicResponse setName(String v) { this.name = v; return this; } public ReassignableTopicResponse setPartitions(List<ReassignablePartitionResponse> v) { this.partitions = v; return this; } } public static class ReassignablePartitionResponse implements Message { int partitionIndex; short errorCode; String errorMessage; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code for this partition, or 0 if there was no error."), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The error message for this partition, or null if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ReassignablePartitionResponse(Readable _readable, short _version) { read(_readable, _version); } public ReassignablePartitionResponse() { this.partitionIndex = 0; this.errorCode = (short) 0; this.errorMessage = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of ReassignablePartitionResponse"); } this.partitionIndex = _readable.readInt(); this.errorCode = _readable.readShort(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeShort(errorCode); if (errorMessage == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of ReassignablePartitionResponse"); } _size.addBytes(4); _size.addBytes(2); if (errorMessage == null) { _size.addBytes(1); } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ReassignablePartitionResponse)) return false; ReassignablePartitionResponse other = (ReassignablePartitionResponse) obj; if (partitionIndex != other.partitionIndex) return false; if (errorCode != other.errorCode) return false; if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); return hashCode; } @Override public ReassignablePartitionResponse duplicate() { ReassignablePartitionResponse _duplicate = new ReassignablePartitionResponse(); _duplicate.partitionIndex = partitionIndex; _duplicate.errorCode = errorCode; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } return _duplicate; } @Override public String toString() { return "ReassignablePartitionResponse(" + "partitionIndex=" + partitionIndex + ", errorCode=" + errorCode + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ")"; } public int partitionIndex() { return this.partitionIndex; } public short errorCode() { return this.errorCode; } public String errorMessage() { return this.errorMessage; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ReassignablePartitionResponse setPartitionIndex(int v) { this.partitionIndex = v; return this; } public ReassignablePartitionResponse setErrorCode(short v) { this.errorCode = v; return this; } public ReassignablePartitionResponse setErrorMessage(String v) { this.errorMessage = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterPartitionReassignmentsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.*; public class AlterPartitionReassignmentsResponseDataJsonConverter { public static AlterPartitionReassignmentsResponseData read(JsonNode _node, short _version) { AlterPartitionReassignmentsResponseData _object = new AlterPartitionReassignmentsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("AlterPartitionReassignmentsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "AlterPartitionReassignmentsResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("AlterPartitionReassignmentsResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "AlterPartitionReassignmentsResponseData"); } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { throw new RuntimeException("AlterPartitionReassignmentsResponseData: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("AlterPartitionReassignmentsResponseData expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } JsonNode _responsesNode = _node.get("responses"); if (_responsesNode == null) { throw new RuntimeException("AlterPartitionReassignmentsResponseData: unable to locate field 'responses', which is mandatory in version " + _version); } else { if (!_responsesNode.isArray()) { throw new RuntimeException("AlterPartitionReassignmentsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ReassignableTopicResponse> _collection = new ArrayList<ReassignableTopicResponse>(_responsesNode.size()); _object.responses = _collection; for (JsonNode _element : _responsesNode) { _collection.add(ReassignableTopicResponseJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterPartitionReassignmentsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } ArrayNode _responsesArray = new ArrayNode(JsonNodeFactory.instance); for (ReassignableTopicResponse _element : _object.responses) { _responsesArray.add(ReassignableTopicResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("responses", _responsesArray); return _node; } public static JsonNode write(AlterPartitionReassignmentsResponseData _object, short _version) { return write(_object, _version, true); } public static class ReassignablePartitionResponseJsonConverter { public static ReassignablePartitionResponse read(JsonNode _node, short _version) { ReassignablePartitionResponse _object = new ReassignablePartitionResponse(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("ReassignablePartitionResponse: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "ReassignablePartitionResponse"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("ReassignablePartitionResponse: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "ReassignablePartitionResponse"); } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { throw new RuntimeException("ReassignablePartitionResponse: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("ReassignablePartitionResponse expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } return _object; } public static JsonNode write(ReassignablePartitionResponse _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } return _node; } public static JsonNode write(ReassignablePartitionResponse _object, short _version) { return write(_object, _version, true); } } public static class ReassignableTopicResponseJsonConverter { public static ReassignableTopicResponse read(JsonNode _node, short _version) { ReassignableTopicResponse _object = new ReassignableTopicResponse(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("ReassignableTopicResponse: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("ReassignableTopicResponse expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("ReassignableTopicResponse: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("ReassignableTopicResponse expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ReassignablePartitionResponse> _collection = new ArrayList<ReassignablePartitionResponse>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(ReassignablePartitionResponseJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(ReassignableTopicResponse _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (ReassignablePartitionResponse _element : _object.partitions) { _partitionsArray.add(ReassignablePartitionResponseJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(ReassignableTopicResponse _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterPartitionRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterPartitionRequestData implements ApiMessage { int brokerId; long brokerEpoch; List<TopicData> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("broker_id", Type.INT32, "The ID of the requesting broker"), new Field("broker_epoch", Type.INT64, "The epoch of the requesting broker"), new Field("topics", new CompactArrayOf(TopicData.SCHEMA_0), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_1 = new Schema( new Field("broker_id", Type.INT32, "The ID of the requesting broker"), new Field("broker_epoch", Type.INT64, "The epoch of the requesting broker"), new Field("topics", new CompactArrayOf(TopicData.SCHEMA_1), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_2 = new Schema( new Field("broker_id", Type.INT32, "The ID of the requesting broker"), new Field("broker_epoch", Type.INT64, "The epoch of the requesting broker"), new Field("topics", new CompactArrayOf(TopicData.SCHEMA_2), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_3 = new Schema( new Field("broker_id", Type.INT32, "The ID of the requesting broker"), new Field("broker_epoch", Type.INT64, "The epoch of the requesting broker"), new Field("topics", new CompactArrayOf(TopicData.SCHEMA_3), ""), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public AlterPartitionRequestData(Readable _readable, short _version) { read(_readable, _version); } public AlterPartitionRequestData() { this.brokerId = 0; this.brokerEpoch = -1L; this.topics = new ArrayList<TopicData>(0); } @Override public short apiKey() { return 56; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { this.brokerId = _readable.readInt(); this.brokerEpoch = _readable.readLong(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<TopicData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicData(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(brokerId); _writable.writeLong(brokerEpoch); _writable.writeUnsignedVarint(topics.size() + 1); for (TopicData topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(8); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (TopicData topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterPartitionRequestData)) return false; AlterPartitionRequestData other = (AlterPartitionRequestData) obj; if (brokerId != other.brokerId) return false; if (brokerEpoch != other.brokerEpoch) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + brokerId; hashCode = 31 * hashCode + ((int) (brokerEpoch >> 32) ^ (int) brokerEpoch); hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public AlterPartitionRequestData duplicate() { AlterPartitionRequestData _duplicate = new AlterPartitionRequestData(); _duplicate.brokerId = brokerId; _duplicate.brokerEpoch = brokerEpoch; ArrayList<TopicData> newTopics = new ArrayList<TopicData>(topics.size()); for (TopicData _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "AlterPartitionRequestData(" + "brokerId=" + brokerId + ", brokerEpoch=" + brokerEpoch + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int brokerId() { return this.brokerId; } public long brokerEpoch() { return this.brokerEpoch; } public List<TopicData> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterPartitionRequestData setBrokerId(int v) { this.brokerId = v; return this; } public AlterPartitionRequestData setBrokerEpoch(long v) { this.brokerEpoch = v; return this; } public AlterPartitionRequestData setTopics(List<TopicData> v) { this.topics = v; return this; } public static class TopicData implements Message { String topicName; Uuid topicId; List<PartitionData> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topic_name", Type.COMPACT_STRING, "The name of the topic to alter ISRs for"), new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_0), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_1 = new Schema( new Field("topic_name", Type.COMPACT_STRING, "The name of the topic to alter ISRs for"), new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_1), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_2 = new Schema( new Field("topic_id", Type.UUID, "The ID of the topic to alter ISRs for"), new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_1), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_3 = new Schema( new Field("topic_id", Type.UUID, "The ID of the topic to alter ISRs for"), new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_3), ""), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public TopicData(Readable _readable, short _version) { read(_readable, _version); } public TopicData() { this.topicName = ""; this.topicId = Uuid.ZERO_UUID; this.partitions = new ArrayList<PartitionData>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { if (_version > 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of TopicData"); } if (_version <= 1) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field topicName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topicName had invalid length " + length); } else { this.topicName = _readable.readString(length); } } else { this.topicName = ""; } if (_version >= 2) { this.topicId = _readable.readUuid(); } else { this.topicId = Uuid.ZERO_UUID; } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionData(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 1) { { byte[] _stringBytes = _cache.getSerializedValue(topicName); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } if (_version >= 2) { _writable.writeUuid(topicId); } _writable.writeUnsignedVarint(partitions.size() + 1); for (PartitionData partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 3) { throw new UnsupportedVersionException("Can't size version " + _version + " of TopicData"); } if (_version <= 1) { { byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topicName' field is too long to be serialized"); } _cache.cacheSerializedValue(topicName, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } if (_version >= 2) { _size.addBytes(16); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); for (PartitionData partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof TopicData)) return false; TopicData other = (TopicData) obj; if (this.topicName == null) { if (other.topicName != null) return false; } else { if (!this.topicName.equals(other.topicName)) return false; } if (!this.topicId.equals(other.topicId)) return false; if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode()); hashCode = 31 * hashCode + topicId.hashCode(); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public TopicData duplicate() { TopicData _duplicate = new TopicData(); _duplicate.topicName = topicName; _duplicate.topicId = topicId; ArrayList<PartitionData> newPartitions = new ArrayList<PartitionData>(partitions.size()); for (PartitionData _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "TopicData(" + "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'") + ", topicId=" + topicId.toString() + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topicName() { return this.topicName; } public Uuid topicId() { return this.topicId; } public List<PartitionData> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public TopicData setTopicName(String v) { this.topicName = v; return this; } public TopicData setTopicId(Uuid v) { this.topicId = v; return this; } public TopicData setPartitions(List<PartitionData> v) { this.partitions = v; return this; } } public static class PartitionData implements Message { int partitionIndex; int leaderEpoch; List<Integer> newIsr; List<BrokerState> newIsrWithEpochs; byte leaderRecoveryState; int partitionEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index"), new Field("leader_epoch", Type.INT32, "The leader epoch of this partition"), new Field("new_isr", new CompactArrayOf(Type.INT32), "The ISR for this partition. Deprecated since version 3."), new Field("partition_epoch", Type.INT32, "The expected epoch of the partition which is being updated. For legacy cluster this is the ZkVersion in the LeaderAndIsr request."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_1 = new Schema( new Field("partition_index", Type.INT32, "The partition index"), new Field("leader_epoch", Type.INT32, "The leader epoch of this partition"), new Field("new_isr", new CompactArrayOf(Type.INT32), "The ISR for this partition. Deprecated since version 3."), new Field("leader_recovery_state", Type.INT8, "1 if the partition is recovering from an unclean leader election; 0 otherwise."), new Field("partition_epoch", Type.INT32, "The expected epoch of the partition which is being updated. For legacy cluster this is the ZkVersion in the LeaderAndIsr request."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("partition_index", Type.INT32, "The partition index"), new Field("leader_epoch", Type.INT32, "The leader epoch of this partition"), new Field("new_isr_with_epochs", new CompactArrayOf(BrokerState.SCHEMA_3), ""), new Field("leader_recovery_state", Type.INT8, "1 if the partition is recovering from an unclean leader election; 0 otherwise."), new Field("partition_epoch", Type.INT32, "The expected epoch of the partition which is being updated. For legacy cluster this is the ZkVersion in the LeaderAndIsr request."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public PartitionData(Readable _readable, short _version) { read(_readable, _version); } public PartitionData() { this.partitionIndex = 0; this.leaderEpoch = 0; this.newIsr = new ArrayList<Integer>(0); this.newIsrWithEpochs = new ArrayList<BrokerState>(0); this.leaderRecoveryState = (byte) 0; this.partitionEpoch = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { if (_version > 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionData"); } this.partitionIndex = _readable.readInt(); this.leaderEpoch = _readable.readInt(); if (_version <= 2) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field newIsr was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.newIsr = newCollection; } } else { this.newIsr = new ArrayList<Integer>(0); } if (_version >= 3) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field newIsrWithEpochs was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<BrokerState> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new BrokerState(_readable, _version)); } this.newIsrWithEpochs = newCollection; } } else { this.newIsrWithEpochs = new ArrayList<BrokerState>(0); } if (_version >= 1) { this.leaderRecoveryState = _readable.readByte(); } else { this.leaderRecoveryState = (byte) 0; } this.partitionEpoch = _readable.readInt(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeInt(leaderEpoch); if (_version <= 2) { _writable.writeUnsignedVarint(newIsr.size() + 1); for (Integer newIsrElement : newIsr) { _writable.writeInt(newIsrElement); } } else { if (!this.newIsr.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default newIsr at version " + _version); } } if (_version >= 3) { _writable.writeUnsignedVarint(newIsrWithEpochs.size() + 1); for (BrokerState newIsrWithEpochsElement : newIsrWithEpochs) { newIsrWithEpochsElement.write(_writable, _cache, _version); } } else { if (!this.newIsrWithEpochs.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default newIsrWithEpochs at version " + _version); } } if (_version >= 1) { _writable.writeByte(leaderRecoveryState); } else { if (this.leaderRecoveryState != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default leaderRecoveryState at version " + _version); } } _writable.writeInt(partitionEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 3) { throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionData"); } _size.addBytes(4); _size.addBytes(4); if (_version <= 2) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(newIsr.size() + 1)); _size.addBytes(newIsr.size() * 4); } } if (_version >= 3) { { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(newIsrWithEpochs.size() + 1)); for (BrokerState newIsrWithEpochsElement : newIsrWithEpochs) { newIsrWithEpochsElement.addSize(_size, _cache, _version); } } } if (_version >= 1) { _size.addBytes(1); } _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof PartitionData)) return false; PartitionData other = (PartitionData) obj; if (partitionIndex != other.partitionIndex) return false; if (leaderEpoch != other.leaderEpoch) return false; if (this.newIsr == null) { if (other.newIsr != null) return false; } else { if (!this.newIsr.equals(other.newIsr)) return false; } if (this.newIsrWithEpochs == null) { if (other.newIsrWithEpochs != null) return false; } else { if (!this.newIsrWithEpochs.equals(other.newIsrWithEpochs)) return false; } if (leaderRecoveryState != other.leaderRecoveryState) return false; if (partitionEpoch != other.partitionEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + leaderEpoch; hashCode = 31 * hashCode + (newIsr == null ? 0 : newIsr.hashCode()); hashCode = 31 * hashCode + (newIsrWithEpochs == null ? 0 : newIsrWithEpochs.hashCode()); hashCode = 31 * hashCode + leaderRecoveryState; hashCode = 31 * hashCode + partitionEpoch; return hashCode; } @Override public PartitionData duplicate() { PartitionData _duplicate = new PartitionData(); _duplicate.partitionIndex = partitionIndex; _duplicate.leaderEpoch = leaderEpoch; ArrayList<Integer> newNewIsr = new ArrayList<Integer>(newIsr.size()); for (Integer _element : newIsr) { newNewIsr.add(_element); } _duplicate.newIsr = newNewIsr; ArrayList<BrokerState> newNewIsrWithEpochs = new ArrayList<BrokerState>(newIsrWithEpochs.size()); for (BrokerState _element : newIsrWithEpochs) { newNewIsrWithEpochs.add(_element.duplicate()); } _duplicate.newIsrWithEpochs = newNewIsrWithEpochs; _duplicate.leaderRecoveryState = leaderRecoveryState; _duplicate.partitionEpoch = partitionEpoch; return _duplicate; } @Override public String toString() { return "PartitionData(" + "partitionIndex=" + partitionIndex + ", leaderEpoch=" + leaderEpoch + ", newIsr=" + MessageUtil.deepToString(newIsr.iterator()) + ", newIsrWithEpochs=" + MessageUtil.deepToString(newIsrWithEpochs.iterator()) + ", leaderRecoveryState=" + leaderRecoveryState + ", partitionEpoch=" + partitionEpoch + ")"; } public int partitionIndex() { return this.partitionIndex; } public int leaderEpoch() { return this.leaderEpoch; } public List<Integer> newIsr() { return this.newIsr; } public List<BrokerState> newIsrWithEpochs() { return this.newIsrWithEpochs; } public byte leaderRecoveryState() { return this.leaderRecoveryState; } public int partitionEpoch() { return this.partitionEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public PartitionData setPartitionIndex(int v) { this.partitionIndex = v; return this; } public PartitionData setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } public PartitionData setNewIsr(List<Integer> v) { this.newIsr = v; return this; } public PartitionData setNewIsrWithEpochs(List<BrokerState> v) { this.newIsrWithEpochs = v; return this; } public PartitionData setLeaderRecoveryState(byte v) { this.leaderRecoveryState = v; return this; } public PartitionData setPartitionEpoch(int v) { this.partitionEpoch = v; return this; } } public static class BrokerState implements Message { int brokerId; long brokerEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_3 = new Schema( new Field("broker_id", Type.INT32, "The ID of the broker."), new Field("broker_epoch", Type.INT64, "The epoch of the broker. It will be -1 if the epoch check is not supported."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 3; public static final short HIGHEST_SUPPORTED_VERSION = 3; public BrokerState(Readable _readable, short _version) { read(_readable, _version); } public BrokerState() { this.brokerId = 0; this.brokerEpoch = -1L; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { if (_version > 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of BrokerState"); } this.brokerId = _readable.readInt(); this.brokerEpoch = _readable.readLong(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of BrokerState"); } int _numTaggedFields = 0; _writable.writeInt(brokerId); _writable.writeLong(brokerEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 3) { throw new UnsupportedVersionException("Can't size version " + _version + " of BrokerState"); } _size.addBytes(4); _size.addBytes(8); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof BrokerState)) return false; BrokerState other = (BrokerState) obj; if (brokerId != other.brokerId) return false; if (brokerEpoch != other.brokerEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + brokerId; hashCode = 31 * hashCode + ((int) (brokerEpoch >> 32) ^ (int) brokerEpoch); return hashCode; } @Override public BrokerState duplicate() { BrokerState _duplicate = new BrokerState(); _duplicate.brokerId = brokerId; _duplicate.brokerEpoch = brokerEpoch; return _duplicate; } @Override public String toString() { return "BrokerState(" + "brokerId=" + brokerId + ", brokerEpoch=" + brokerEpoch + ")"; } public int brokerId() { return this.brokerId; } public long brokerEpoch() { return this.brokerEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public BrokerState setBrokerId(int v) { this.brokerId = v; return this; } public BrokerState setBrokerEpoch(long v) { this.brokerEpoch = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterPartitionRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterPartitionRequestData.*; public class AlterPartitionRequestDataJsonConverter { public static AlterPartitionRequestData read(JsonNode _node, short _version) { AlterPartitionRequestData _object = new AlterPartitionRequestData(); JsonNode _brokerIdNode = _node.get("brokerId"); if (_brokerIdNode == null) { throw new RuntimeException("AlterPartitionRequestData: unable to locate field 'brokerId', which is mandatory in version " + _version); } else { _object.brokerId = MessageUtil.jsonNodeToInt(_brokerIdNode, "AlterPartitionRequestData"); } JsonNode _brokerEpochNode = _node.get("brokerEpoch"); if (_brokerEpochNode == null) { throw new RuntimeException("AlterPartitionRequestData: unable to locate field 'brokerEpoch', which is mandatory in version " + _version); } else { _object.brokerEpoch = MessageUtil.jsonNodeToLong(_brokerEpochNode, "AlterPartitionRequestData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("AlterPartitionRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("AlterPartitionRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<TopicData> _collection = new ArrayList<TopicData>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(TopicDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterPartitionRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("brokerId", new IntNode(_object.brokerId)); _node.set("brokerEpoch", new LongNode(_object.brokerEpoch)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (TopicData _element : _object.topics) { _topicsArray.add(TopicDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(AlterPartitionRequestData _object, short _version) { return write(_object, _version, true); } public static class BrokerStateJsonConverter { public static BrokerState read(JsonNode _node, short _version) { BrokerState _object = new BrokerState(); if (_version < 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of BrokerState"); } JsonNode _brokerIdNode = _node.get("brokerId"); if (_brokerIdNode == null) { throw new RuntimeException("BrokerState: unable to locate field 'brokerId', which is mandatory in version " + _version); } else { _object.brokerId = MessageUtil.jsonNodeToInt(_brokerIdNode, "BrokerState"); } JsonNode _brokerEpochNode = _node.get("brokerEpoch"); if (_brokerEpochNode == null) { throw new RuntimeException("BrokerState: unable to locate field 'brokerEpoch', which is mandatory in version " + _version); } else { _object.brokerEpoch = MessageUtil.jsonNodeToLong(_brokerEpochNode, "BrokerState"); } return _object; } public static JsonNode write(BrokerState _object, short _version, boolean _serializeRecords) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of BrokerState"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("brokerId", new IntNode(_object.brokerId)); _node.set("brokerEpoch", new LongNode(_object.brokerEpoch)); return _node; } public static JsonNode write(BrokerState _object, short _version) { return write(_object, _version, true); } } public static class PartitionDataJsonConverter { public static PartitionData read(JsonNode _node, short _version) { PartitionData _object = new PartitionData(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "PartitionData"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "PartitionData"); } JsonNode _newIsrNode = _node.get("newIsr"); if (_newIsrNode == null) { if (_version <= 2) { throw new RuntimeException("PartitionData: unable to locate field 'newIsr', which is mandatory in version " + _version); } else { _object.newIsr = new ArrayList<Integer>(0); } } else { if (!_newIsrNode.isArray()) { throw new RuntimeException("PartitionData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_newIsrNode.size()); _object.newIsr = _collection; for (JsonNode _element : _newIsrNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "PartitionData element")); } } JsonNode _newIsrWithEpochsNode = _node.get("newIsrWithEpochs"); if (_newIsrWithEpochsNode == null) { if (_version >= 3) { throw new RuntimeException("PartitionData: unable to locate field 'newIsrWithEpochs', which is mandatory in version " + _version); } else { _object.newIsrWithEpochs = new ArrayList<BrokerState>(0); } } else { if (!_newIsrWithEpochsNode.isArray()) { throw new RuntimeException("PartitionData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<BrokerState> _collection = new ArrayList<BrokerState>(_newIsrWithEpochsNode.size()); _object.newIsrWithEpochs = _collection; for (JsonNode _element : _newIsrWithEpochsNode) { _collection.add(BrokerStateJsonConverter.read(_element, _version)); } } JsonNode _leaderRecoveryStateNode = _node.get("leaderRecoveryState"); if (_leaderRecoveryStateNode == null) { if (_version >= 1) { throw new RuntimeException("PartitionData: unable to locate field 'leaderRecoveryState', which is mandatory in version " + _version); } else { _object.leaderRecoveryState = (byte) 0; } } else { _object.leaderRecoveryState = MessageUtil.jsonNodeToByte(_leaderRecoveryStateNode, "PartitionData"); } JsonNode _partitionEpochNode = _node.get("partitionEpoch"); if (_partitionEpochNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'partitionEpoch', which is mandatory in version " + _version); } else { _object.partitionEpoch = MessageUtil.jsonNodeToInt(_partitionEpochNode, "PartitionData"); } return _object; } public static JsonNode write(PartitionData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); if (_version <= 2) { ArrayNode _newIsrArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.newIsr) { _newIsrArray.add(new IntNode(_element)); } _node.set("newIsr", _newIsrArray); } else { if (!_object.newIsr.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default newIsr at version " + _version); } } if (_version >= 3) { ArrayNode _newIsrWithEpochsArray = new ArrayNode(JsonNodeFactory.instance); for (BrokerState _element : _object.newIsrWithEpochs) { _newIsrWithEpochsArray.add(BrokerStateJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("newIsrWithEpochs", _newIsrWithEpochsArray); } else { if (!_object.newIsrWithEpochs.isEmpty()) { throw new UnsupportedVersionException("Attempted to write a non-default newIsrWithEpochs at version " + _version); } } if (_version >= 1) { _node.set("leaderRecoveryState", new ShortNode(_object.leaderRecoveryState)); } else { if (_object.leaderRecoveryState != (byte) 0) { throw new UnsupportedVersionException("Attempted to write a non-default leaderRecoveryState at version " + _version); } } _node.set("partitionEpoch", new IntNode(_object.partitionEpoch)); return _node; } public static JsonNode write(PartitionData _object, short _version) { return write(_object, _version, true); } } public static class TopicDataJsonConverter { public static TopicData read(JsonNode _node, short _version) { TopicData _object = new TopicData(); JsonNode _topicNameNode = _node.get("topicName"); if (_topicNameNode == null) { if (_version <= 1) { throw new RuntimeException("TopicData: unable to locate field 'topicName', which is mandatory in version " + _version); } else { _object.topicName = ""; } } else { if (!_topicNameNode.isTextual()) { throw new RuntimeException("TopicData expected a string type, but got " + _node.getNodeType()); } _object.topicName = _topicNameNode.asText(); } JsonNode _topicIdNode = _node.get("topicId"); if (_topicIdNode == null) { if (_version >= 2) { throw new RuntimeException("TopicData: unable to locate field 'topicId', which is mandatory in version " + _version); } else { _object.topicId = Uuid.ZERO_UUID; } } else { if (!_topicIdNode.isTextual()) { throw new RuntimeException("TopicData expected a JSON string type, but got " + _node.getNodeType()); } _object.topicId = Uuid.fromString(_topicIdNode.asText()); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("TopicData: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("TopicData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionData> _collection = new ArrayList<PartitionData>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(PartitionDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(TopicData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version <= 1) { _node.set("topicName", new TextNode(_object.topicName)); } if (_version >= 2) { _node.set("topicId", new TextNode(_object.topicId.toString())); } ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionData _element : _object.partitions) { _partitionsArray.add(PartitionDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(TopicData _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterPartitionResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterPartitionResponseData implements ApiMessage { int throttleTimeMs; short errorCode; List<TopicData> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top level response error code"), new Field("topics", new CompactArrayOf(TopicData.SCHEMA_0), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_1 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top level response error code"), new Field("topics", new CompactArrayOf(TopicData.SCHEMA_1), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_2 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The top level response error code"), new Field("topics", new CompactArrayOf(TopicData.SCHEMA_2), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public AlterPartitionResponseData(Readable _readable, short _version) { read(_readable, _version); } public AlterPartitionResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.topics = new ArrayList<TopicData>(0); } @Override public short apiKey() { return 56; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<TopicData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicData(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); _writable.writeUnsignedVarint(topics.size() + 1); for (TopicData topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); for (TopicData topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterPartitionResponseData)) return false; AlterPartitionResponseData other = (AlterPartitionResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public AlterPartitionResponseData duplicate() { AlterPartitionResponseData _duplicate = new AlterPartitionResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; ArrayList<TopicData> newTopics = new ArrayList<TopicData>(topics.size()); for (TopicData _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "AlterPartitionResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public List<TopicData> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterPartitionResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public AlterPartitionResponseData setErrorCode(short v) { this.errorCode = v; return this; } public AlterPartitionResponseData setTopics(List<TopicData> v) { this.topics = v; return this; } public static class TopicData implements Message { String topicName; Uuid topicId; List<PartitionData> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topic_name", Type.COMPACT_STRING, "The name of the topic"), new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_0), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_1 = new Schema( new Field("topic_name", Type.COMPACT_STRING, "The name of the topic"), new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_1), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_2 = new Schema( new Field("topic_id", Type.UUID, "The ID of the topic"), new Field("partitions", new CompactArrayOf(PartitionData.SCHEMA_1), ""), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public TopicData(Readable _readable, short _version) { read(_readable, _version); } public TopicData() { this.topicName = ""; this.topicId = Uuid.ZERO_UUID; this.partitions = new ArrayList<PartitionData>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { if (_version > 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of TopicData"); } if (_version <= 1) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field topicName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topicName had invalid length " + length); } else { this.topicName = _readable.readString(length); } } else { this.topicName = ""; } if (_version >= 2) { this.topicId = _readable.readUuid(); } else { this.topicId = Uuid.ZERO_UUID; } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionData(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version <= 1) { { byte[] _stringBytes = _cache.getSerializedValue(topicName); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } if (_version >= 2) { _writable.writeUuid(topicId); } _writable.writeUnsignedVarint(partitions.size() + 1); for (PartitionData partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 3) { throw new UnsupportedVersionException("Can't size version " + _version + " of TopicData"); } if (_version <= 1) { { byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topicName' field is too long to be serialized"); } _cache.cacheSerializedValue(topicName, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } if (_version >= 2) { _size.addBytes(16); } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); for (PartitionData partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof TopicData)) return false; TopicData other = (TopicData) obj; if (this.topicName == null) { if (other.topicName != null) return false; } else { if (!this.topicName.equals(other.topicName)) return false; } if (!this.topicId.equals(other.topicId)) return false; if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode()); hashCode = 31 * hashCode + topicId.hashCode(); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public TopicData duplicate() { TopicData _duplicate = new TopicData(); _duplicate.topicName = topicName; _duplicate.topicId = topicId; ArrayList<PartitionData> newPartitions = new ArrayList<PartitionData>(partitions.size()); for (PartitionData _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "TopicData(" + "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'") + ", topicId=" + topicId.toString() + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topicName() { return this.topicName; } public Uuid topicId() { return this.topicId; } public List<PartitionData> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public TopicData setTopicName(String v) { this.topicName = v; return this; } public TopicData setTopicId(Uuid v) { this.topicId = v; return this; } public TopicData setPartitions(List<PartitionData> v) { this.partitions = v; return this; } } public static class PartitionData implements Message { int partitionIndex; short errorCode; int leaderId; int leaderEpoch; List<Integer> isr; byte leaderRecoveryState; int partitionEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index"), new Field("error_code", Type.INT16, "The partition level error code"), new Field("leader_id", Type.INT32, "The broker ID of the leader."), new Field("leader_epoch", Type.INT32, "The leader epoch."), new Field("isr", new CompactArrayOf(Type.INT32), "The in-sync replica IDs."), new Field("partition_epoch", Type.INT32, "The current epoch for the partition for KRaft controllers. The current ZK version for the legacy controllers."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_1 = new Schema( new Field("partition_index", Type.INT32, "The partition index"), new Field("error_code", Type.INT16, "The partition level error code"), new Field("leader_id", Type.INT32, "The broker ID of the leader."), new Field("leader_epoch", Type.INT32, "The leader epoch."), new Field("isr", new CompactArrayOf(Type.INT32), "The in-sync replica IDs."), new Field("leader_recovery_state", Type.INT8, "1 if the partition is recovering from an unclean leader election; 0 otherwise."), new Field("partition_epoch", Type.INT32, "The current epoch for the partition for KRaft controllers. The current ZK version for the legacy controllers."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = SCHEMA_2; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public PartitionData(Readable _readable, short _version) { read(_readable, _version); } public PartitionData() { this.partitionIndex = 0; this.errorCode = (short) 0; this.leaderId = 0; this.leaderEpoch = 0; this.isr = new ArrayList<Integer>(0); this.leaderRecoveryState = (byte) 0; this.partitionEpoch = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { if (_version > 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionData"); } this.partitionIndex = _readable.readInt(); this.errorCode = _readable.readShort(); this.leaderId = _readable.readInt(); this.leaderEpoch = _readable.readInt(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field isr was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.isr = newCollection; } } if (_version >= 1) { this.leaderRecoveryState = _readable.readByte(); } else { this.leaderRecoveryState = (byte) 0; } this.partitionEpoch = _readable.readInt(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeShort(errorCode); _writable.writeInt(leaderId); _writable.writeInt(leaderEpoch); _writable.writeUnsignedVarint(isr.size() + 1); for (Integer isrElement : isr) { _writable.writeInt(isrElement); } if (_version >= 1) { _writable.writeByte(leaderRecoveryState); } _writable.writeInt(partitionEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 3) { throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionData"); } _size.addBytes(4); _size.addBytes(2); _size.addBytes(4); _size.addBytes(4); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(isr.size() + 1)); _size.addBytes(isr.size() * 4); } if (_version >= 1) { _size.addBytes(1); } _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof PartitionData)) return false; PartitionData other = (PartitionData) obj; if (partitionIndex != other.partitionIndex) return false; if (errorCode != other.errorCode) return false; if (leaderId != other.leaderId) return false; if (leaderEpoch != other.leaderEpoch) return false; if (this.isr == null) { if (other.isr != null) return false; } else { if (!this.isr.equals(other.isr)) return false; } if (leaderRecoveryState != other.leaderRecoveryState) return false; if (partitionEpoch != other.partitionEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + leaderId; hashCode = 31 * hashCode + leaderEpoch; hashCode = 31 * hashCode + (isr == null ? 0 : isr.hashCode()); hashCode = 31 * hashCode + leaderRecoveryState; hashCode = 31 * hashCode + partitionEpoch; return hashCode; } @Override public PartitionData duplicate() { PartitionData _duplicate = new PartitionData(); _duplicate.partitionIndex = partitionIndex; _duplicate.errorCode = errorCode; _duplicate.leaderId = leaderId; _duplicate.leaderEpoch = leaderEpoch; ArrayList<Integer> newIsr = new ArrayList<Integer>(isr.size()); for (Integer _element : isr) { newIsr.add(_element); } _duplicate.isr = newIsr; _duplicate.leaderRecoveryState = leaderRecoveryState; _duplicate.partitionEpoch = partitionEpoch; return _duplicate; } @Override public String toString() { return "PartitionData(" + "partitionIndex=" + partitionIndex + ", errorCode=" + errorCode + ", leaderId=" + leaderId + ", leaderEpoch=" + leaderEpoch + ", isr=" + MessageUtil.deepToString(isr.iterator()) + ", leaderRecoveryState=" + leaderRecoveryState + ", partitionEpoch=" + partitionEpoch + ")"; } public int partitionIndex() { return this.partitionIndex; } public short errorCode() { return this.errorCode; } public int leaderId() { return this.leaderId; } public int leaderEpoch() { return this.leaderEpoch; } public List<Integer> isr() { return this.isr; } public byte leaderRecoveryState() { return this.leaderRecoveryState; } public int partitionEpoch() { return this.partitionEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public PartitionData setPartitionIndex(int v) { this.partitionIndex = v; return this; } public PartitionData setErrorCode(short v) { this.errorCode = v; return this; } public PartitionData setLeaderId(int v) { this.leaderId = v; return this; } public PartitionData setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } public PartitionData setIsr(List<Integer> v) { this.isr = v; return this; } public PartitionData setLeaderRecoveryState(byte v) { this.leaderRecoveryState = v; return this; } public PartitionData setPartitionEpoch(int v) { this.partitionEpoch = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterPartitionResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterPartitionResponseData.*; public class AlterPartitionResponseDataJsonConverter { public static AlterPartitionResponseData read(JsonNode _node, short _version) { AlterPartitionResponseData _object = new AlterPartitionResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("AlterPartitionResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "AlterPartitionResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("AlterPartitionResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "AlterPartitionResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("AlterPartitionResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("AlterPartitionResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<TopicData> _collection = new ArrayList<TopicData>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(TopicDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterPartitionResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (TopicData _element : _object.topics) { _topicsArray.add(TopicDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(AlterPartitionResponseData _object, short _version) { return write(_object, _version, true); } public static class PartitionDataJsonConverter { public static PartitionData read(JsonNode _node, short _version) { PartitionData _object = new PartitionData(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "PartitionData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "PartitionData"); } JsonNode _leaderIdNode = _node.get("leaderId"); if (_leaderIdNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'leaderId', which is mandatory in version " + _version); } else { _object.leaderId = MessageUtil.jsonNodeToInt(_leaderIdNode, "PartitionData"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "PartitionData"); } JsonNode _isrNode = _node.get("isr"); if (_isrNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'isr', which is mandatory in version " + _version); } else { if (!_isrNode.isArray()) { throw new RuntimeException("PartitionData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_isrNode.size()); _object.isr = _collection; for (JsonNode _element : _isrNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "PartitionData element")); } } JsonNode _leaderRecoveryStateNode = _node.get("leaderRecoveryState"); if (_leaderRecoveryStateNode == null) { if (_version >= 1) { throw new RuntimeException("PartitionData: unable to locate field 'leaderRecoveryState', which is mandatory in version " + _version); } else { _object.leaderRecoveryState = (byte) 0; } } else { _object.leaderRecoveryState = MessageUtil.jsonNodeToByte(_leaderRecoveryStateNode, "PartitionData"); } JsonNode _partitionEpochNode = _node.get("partitionEpoch"); if (_partitionEpochNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'partitionEpoch', which is mandatory in version " + _version); } else { _object.partitionEpoch = MessageUtil.jsonNodeToInt(_partitionEpochNode, "PartitionData"); } return _object; } public static JsonNode write(PartitionData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("leaderId", new IntNode(_object.leaderId)); _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); ArrayNode _isrArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.isr) { _isrArray.add(new IntNode(_element)); } _node.set("isr", _isrArray); if (_version >= 1) { _node.set("leaderRecoveryState", new ShortNode(_object.leaderRecoveryState)); } _node.set("partitionEpoch", new IntNode(_object.partitionEpoch)); return _node; } public static JsonNode write(PartitionData _object, short _version) { return write(_object, _version, true); } } public static class TopicDataJsonConverter { public static TopicData read(JsonNode _node, short _version) { TopicData _object = new TopicData(); JsonNode _topicNameNode = _node.get("topicName"); if (_topicNameNode == null) { if (_version <= 1) { throw new RuntimeException("TopicData: unable to locate field 'topicName', which is mandatory in version " + _version); } else { _object.topicName = ""; } } else { if (!_topicNameNode.isTextual()) { throw new RuntimeException("TopicData expected a string type, but got " + _node.getNodeType()); } _object.topicName = _topicNameNode.asText(); } JsonNode _topicIdNode = _node.get("topicId"); if (_topicIdNode == null) { if (_version >= 2) { throw new RuntimeException("TopicData: unable to locate field 'topicId', which is mandatory in version " + _version); } else { _object.topicId = Uuid.ZERO_UUID; } } else { if (!_topicIdNode.isTextual()) { throw new RuntimeException("TopicData expected a JSON string type, but got " + _node.getNodeType()); } _object.topicId = Uuid.fromString(_topicIdNode.asText()); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("TopicData: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("TopicData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionData> _collection = new ArrayList<PartitionData>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(PartitionDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(TopicData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version <= 1) { _node.set("topicName", new TextNode(_object.topicName)); } if (_version >= 2) { _node.set("topicId", new TextNode(_object.topicId.toString())); } ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionData _element : _object.partitions) { _partitionsArray.add(PartitionDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(TopicData _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterReplicaLogDirsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterReplicaLogDirsRequestData implements ApiMessage { AlterReplicaLogDirCollection dirs; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("dirs", new ArrayOf(AlterReplicaLogDir.SCHEMA_0), "The alterations to make for each directory.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("dirs", new CompactArrayOf(AlterReplicaLogDir.SCHEMA_2), "The alterations to make for each directory."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterReplicaLogDirsRequestData(Readable _readable, short _version) { read(_readable, _version); } public AlterReplicaLogDirsRequestData() { this.dirs = new AlterReplicaLogDirCollection(0); } @Override public short apiKey() { return 34; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { { if (_version >= 2) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field dirs was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterReplicaLogDirCollection newCollection = new AlterReplicaLogDirCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterReplicaLogDir(_readable, _version)); } this.dirs = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field dirs was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterReplicaLogDirCollection newCollection = new AlterReplicaLogDirCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterReplicaLogDir(_readable, _version)); } this.dirs = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 2) { _writable.writeUnsignedVarint(dirs.size() + 1); for (AlterReplicaLogDir dirsElement : dirs) { dirsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(dirs.size()); for (AlterReplicaLogDir dirsElement : dirs) { dirsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(dirs.size() + 1)); } else { _size.addBytes(4); } for (AlterReplicaLogDir dirsElement : dirs) { dirsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterReplicaLogDirsRequestData)) return false; AlterReplicaLogDirsRequestData other = (AlterReplicaLogDirsRequestData) obj; if (this.dirs == null) { if (other.dirs != null) return false; } else { if (!this.dirs.equals(other.dirs)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (dirs == null ? 0 : dirs.hashCode()); return hashCode; } @Override public AlterReplicaLogDirsRequestData duplicate() { AlterReplicaLogDirsRequestData _duplicate = new AlterReplicaLogDirsRequestData(); AlterReplicaLogDirCollection newDirs = new AlterReplicaLogDirCollection(dirs.size()); for (AlterReplicaLogDir _element : dirs) { newDirs.add(_element.duplicate()); } _duplicate.dirs = newDirs; return _duplicate; } @Override public String toString() { return "AlterReplicaLogDirsRequestData(" + "dirs=" + MessageUtil.deepToString(dirs.iterator()) + ")"; } public AlterReplicaLogDirCollection dirs() { return this.dirs; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterReplicaLogDirsRequestData setDirs(AlterReplicaLogDirCollection v) { this.dirs = v; return this; } public static class AlterReplicaLogDir implements Message, ImplicitLinkedHashMultiCollection.Element { String path; AlterReplicaLogDirTopicCollection topics; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("path", Type.STRING, "The absolute directory path."), new Field("topics", new ArrayOf(AlterReplicaLogDirTopic.SCHEMA_0), "The topics to add to the directory.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("path", Type.COMPACT_STRING, "The absolute directory path."), new Field("topics", new CompactArrayOf(AlterReplicaLogDirTopic.SCHEMA_2), "The topics to add to the directory."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterReplicaLogDir(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AlterReplicaLogDir() { this.path = ""; this.topics = new AlterReplicaLogDirTopicCollection(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { if (_version > 2) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDir"); } { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field path was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field path had invalid length " + length); } else { this.path = _readable.readString(length); } } { if (_version >= 2) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterReplicaLogDirTopicCollection newCollection = new AlterReplicaLogDirTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterReplicaLogDirTopic(_readable, _version)); } this.topics = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } AlterReplicaLogDirTopicCollection newCollection = new AlterReplicaLogDirTopicCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterReplicaLogDirTopic(_readable, _version)); } this.topics = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(path); if (_version >= 2) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 2) { _writable.writeUnsignedVarint(topics.size() + 1); for (AlterReplicaLogDirTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(topics.size()); for (AlterReplicaLogDirTopic topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 2) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDir"); } { byte[] _stringBytes = path.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'path' field is too long to be serialized"); } _cache.cacheSerializedValue(path, _stringBytes); if (_version >= 2) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(topics.size() + 1)); } else { _size.addBytes(4); } for (AlterReplicaLogDirTopic topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AlterReplicaLogDir)) return false; AlterReplicaLogDir other = (AlterReplicaLogDir) obj; if (this.path == null) { if (other.path != null) return false; } else { if (!this.path.equals(other.path)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterReplicaLogDir)) return false; AlterReplicaLogDir other = (AlterReplicaLogDir) obj; if (this.path == null) { if (other.path != null) return false; } else { if (!this.path.equals(other.path)) return false; } if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (path == null ? 0 : path.hashCode()); return hashCode; } @Override public AlterReplicaLogDir duplicate() { AlterReplicaLogDir _duplicate = new AlterReplicaLogDir(); _duplicate.path = path; AlterReplicaLogDirTopicCollection newTopics = new AlterReplicaLogDirTopicCollection(topics.size()); for (AlterReplicaLogDirTopic _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "AlterReplicaLogDir(" + "path=" + ((path == null) ? "null" : "'" + path.toString() + "'") + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public String path() { return this.path; } public AlterReplicaLogDirTopicCollection topics() { return this.topics; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterReplicaLogDir setPath(String v) { this.path = v; return this; } public AlterReplicaLogDir setTopics(AlterReplicaLogDirTopicCollection v) { this.topics = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AlterReplicaLogDirTopic implements Message, ImplicitLinkedHashMultiCollection.Element { String name; List<Integer> partitions; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(Type.INT32), "The partition indexes.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("name", Type.COMPACT_STRING, "The topic name."), new Field("partitions", new CompactArrayOf(Type.INT32), "The partition indexes."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterReplicaLogDirTopic(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public AlterReplicaLogDirTopic() { this.name = ""; this.partitions = new ArrayList<Integer>(0); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { if (_version > 2) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopic"); } { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int arrayLength; if (_version >= 2) { arrayLength = _readable.readUnsignedVarint() - 1; } else { arrayLength = _readable.readInt(); } if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<Integer> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(_readable.readInt()); } this.partitions = newCollection; } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); if (_version >= 2) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 2) { _writable.writeUnsignedVarint(partitions.size() + 1); } else { _writable.writeInt(partitions.size()); } for (Integer partitionsElement : partitions) { _writable.writeInt(partitionsElement); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 2) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDirTopic"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); if (_version >= 2) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } _size.addBytes(partitions.size() * 4); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof AlterReplicaLogDirTopic)) return false; AlterReplicaLogDirTopic other = (AlterReplicaLogDirTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterReplicaLogDirTopic)) return false; AlterReplicaLogDirTopic other = (AlterReplicaLogDirTopic) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public AlterReplicaLogDirTopic duplicate() { AlterReplicaLogDirTopic _duplicate = new AlterReplicaLogDirTopic(); _duplicate.name = name; ArrayList<Integer> newPartitions = new ArrayList<Integer>(partitions.size()); for (Integer _element : partitions) { newPartitions.add(_element); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "AlterReplicaLogDirTopic(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String name() { return this.name; } public List<Integer> partitions() { return this.partitions; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterReplicaLogDirTopic setName(String v) { this.name = v; return this; } public AlterReplicaLogDirTopic setPartitions(List<Integer> v) { this.partitions = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class AlterReplicaLogDirTopicCollection extends ImplicitLinkedHashMultiCollection<AlterReplicaLogDirTopic> { public AlterReplicaLogDirTopicCollection() { super(); } public AlterReplicaLogDirTopicCollection(int expectedNumElements) { super(expectedNumElements); } public AlterReplicaLogDirTopicCollection(Iterator<AlterReplicaLogDirTopic> iterator) { super(iterator); } public AlterReplicaLogDirTopic find(String name) { AlterReplicaLogDirTopic _key = new AlterReplicaLogDirTopic(); _key.setName(name); return find(_key); } public List<AlterReplicaLogDirTopic> findAll(String name) { AlterReplicaLogDirTopic _key = new AlterReplicaLogDirTopic(); _key.setName(name); return findAll(_key); } public AlterReplicaLogDirTopicCollection duplicate() { AlterReplicaLogDirTopicCollection _duplicate = new AlterReplicaLogDirTopicCollection(size()); for (AlterReplicaLogDirTopic _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class AlterReplicaLogDirCollection extends ImplicitLinkedHashMultiCollection<AlterReplicaLogDir> { public AlterReplicaLogDirCollection() { super(); } public AlterReplicaLogDirCollection(int expectedNumElements) { super(expectedNumElements); } public AlterReplicaLogDirCollection(Iterator<AlterReplicaLogDir> iterator) { super(iterator); } public AlterReplicaLogDir find(String path) { AlterReplicaLogDir _key = new AlterReplicaLogDir(); _key.setPath(path); return find(_key); } public List<AlterReplicaLogDir> findAll(String path) { AlterReplicaLogDir _key = new AlterReplicaLogDir(); _key.setPath(path); return findAll(_key); } public AlterReplicaLogDirCollection duplicate() { AlterReplicaLogDirCollection _duplicate = new AlterReplicaLogDirCollection(size()); for (AlterReplicaLogDir _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterReplicaLogDirsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterReplicaLogDirsRequestData.*; public class AlterReplicaLogDirsRequestDataJsonConverter { public static AlterReplicaLogDirsRequestData read(JsonNode _node, short _version) { AlterReplicaLogDirsRequestData _object = new AlterReplicaLogDirsRequestData(); JsonNode _dirsNode = _node.get("dirs"); if (_dirsNode == null) { throw new RuntimeException("AlterReplicaLogDirsRequestData: unable to locate field 'dirs', which is mandatory in version " + _version); } else { if (!_dirsNode.isArray()) { throw new RuntimeException("AlterReplicaLogDirsRequestData expected a JSON array, but got " + _node.getNodeType()); } AlterReplicaLogDirCollection _collection = new AlterReplicaLogDirCollection(_dirsNode.size()); _object.dirs = _collection; for (JsonNode _element : _dirsNode) { _collection.add(AlterReplicaLogDirJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterReplicaLogDirsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); ArrayNode _dirsArray = new ArrayNode(JsonNodeFactory.instance); for (AlterReplicaLogDir _element : _object.dirs) { _dirsArray.add(AlterReplicaLogDirJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("dirs", _dirsArray); return _node; } public static JsonNode write(AlterReplicaLogDirsRequestData _object, short _version) { return write(_object, _version, true); } public static class AlterReplicaLogDirJsonConverter { public static AlterReplicaLogDir read(JsonNode _node, short _version) { AlterReplicaLogDir _object = new AlterReplicaLogDir(); JsonNode _pathNode = _node.get("path"); if (_pathNode == null) { throw new RuntimeException("AlterReplicaLogDir: unable to locate field 'path', which is mandatory in version " + _version); } else { if (!_pathNode.isTextual()) { throw new RuntimeException("AlterReplicaLogDir expected a string type, but got " + _node.getNodeType()); } _object.path = _pathNode.asText(); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("AlterReplicaLogDir: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("AlterReplicaLogDir expected a JSON array, but got " + _node.getNodeType()); } AlterReplicaLogDirTopicCollection _collection = new AlterReplicaLogDirTopicCollection(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(AlterReplicaLogDirTopicJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterReplicaLogDir _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("path", new TextNode(_object.path)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (AlterReplicaLogDirTopic _element : _object.topics) { _topicsArray.add(AlterReplicaLogDirTopicJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(AlterReplicaLogDir _object, short _version) { return write(_object, _version, true); } } public static class AlterReplicaLogDirTopicJsonConverter { public static AlterReplicaLogDirTopic read(JsonNode _node, short _version) { AlterReplicaLogDirTopic _object = new AlterReplicaLogDirTopic(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("AlterReplicaLogDirTopic: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("AlterReplicaLogDirTopic expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("AlterReplicaLogDirTopic: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("AlterReplicaLogDirTopic expected a JSON array, but got " + _node.getNodeType()); } ArrayList<Integer> _collection = new ArrayList<Integer>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(MessageUtil.jsonNodeToInt(_element, "AlterReplicaLogDirTopic element")); } } return _object; } public static JsonNode write(AlterReplicaLogDirTopic _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (Integer _element : _object.partitions) { _partitionsArray.add(new IntNode(_element)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(AlterReplicaLogDirTopic _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterReplicaLogDirsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterReplicaLogDirsResponseData implements ApiMessage { int throttleTimeMs; List<AlterReplicaLogDirTopicResult> results; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("results", new ArrayOf(AlterReplicaLogDirTopicResult.SCHEMA_0), "The results for each topic.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("results", new CompactArrayOf(AlterReplicaLogDirTopicResult.SCHEMA_2), "The results for each topic."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterReplicaLogDirsResponseData(Readable _readable, short _version) { read(_readable, _version); } public AlterReplicaLogDirsResponseData() { this.throttleTimeMs = 0; this.results = new ArrayList<AlterReplicaLogDirTopicResult>(0); } @Override public short apiKey() { return 34; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); { if (_version >= 2) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field results was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AlterReplicaLogDirTopicResult> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterReplicaLogDirTopicResult(_readable, _version)); } this.results = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field results was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AlterReplicaLogDirTopicResult> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterReplicaLogDirTopicResult(_readable, _version)); } this.results = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); if (_version >= 2) { _writable.writeUnsignedVarint(results.size() + 1); for (AlterReplicaLogDirTopicResult resultsElement : results) { resultsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(results.size()); for (AlterReplicaLogDirTopicResult resultsElement : results) { resultsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); { if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(results.size() + 1)); } else { _size.addBytes(4); } for (AlterReplicaLogDirTopicResult resultsElement : results) { resultsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterReplicaLogDirsResponseData)) return false; AlterReplicaLogDirsResponseData other = (AlterReplicaLogDirsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.results == null) { if (other.results != null) return false; } else { if (!this.results.equals(other.results)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (results == null ? 0 : results.hashCode()); return hashCode; } @Override public AlterReplicaLogDirsResponseData duplicate() { AlterReplicaLogDirsResponseData _duplicate = new AlterReplicaLogDirsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; ArrayList<AlterReplicaLogDirTopicResult> newResults = new ArrayList<AlterReplicaLogDirTopicResult>(results.size()); for (AlterReplicaLogDirTopicResult _element : results) { newResults.add(_element.duplicate()); } _duplicate.results = newResults; return _duplicate; } @Override public String toString() { return "AlterReplicaLogDirsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", results=" + MessageUtil.deepToString(results.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public List<AlterReplicaLogDirTopicResult> results() { return this.results; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterReplicaLogDirsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public AlterReplicaLogDirsResponseData setResults(List<AlterReplicaLogDirTopicResult> v) { this.results = v; return this; } public static class AlterReplicaLogDirTopicResult implements Message { String topicName; List<AlterReplicaLogDirPartitionResult> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topic_name", Type.STRING, "The name of the topic."), new Field("partitions", new ArrayOf(AlterReplicaLogDirPartitionResult.SCHEMA_0), "The results for each partition.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("topic_name", Type.COMPACT_STRING, "The name of the topic."), new Field("partitions", new CompactArrayOf(AlterReplicaLogDirPartitionResult.SCHEMA_2), "The results for each partition."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterReplicaLogDirTopicResult(Readable _readable, short _version) { read(_readable, _version); } public AlterReplicaLogDirTopicResult() { this.topicName = ""; this.partitions = new ArrayList<AlterReplicaLogDirPartitionResult>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { if (_version > 2) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirTopicResult"); } { int length; if (_version >= 2) { length = _readable.readUnsignedVarint() - 1; } else { length = _readable.readShort(); } if (length < 0) { throw new RuntimeException("non-nullable field topicName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topicName had invalid length " + length); } else { this.topicName = _readable.readString(length); } } { if (_version >= 2) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AlterReplicaLogDirPartitionResult> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterReplicaLogDirPartitionResult(_readable, _version)); } this.partitions = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AlterReplicaLogDirPartitionResult> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterReplicaLogDirPartitionResult(_readable, _version)); } this.partitions = newCollection; } } } this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(topicName); if (_version >= 2) { _writable.writeUnsignedVarint(_stringBytes.length + 1); } else { _writable.writeShort((short) _stringBytes.length); } _writable.writeByteArray(_stringBytes); } if (_version >= 2) { _writable.writeUnsignedVarint(partitions.size() + 1); for (AlterReplicaLogDirPartitionResult partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } else { _writable.writeInt(partitions.size()); for (AlterReplicaLogDirPartitionResult partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 2) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDirTopicResult"); } { byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topicName' field is too long to be serialized"); } _cache.cacheSerializedValue(topicName, _stringBytes); if (_version >= 2) { _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } else { _size.addBytes(_stringBytes.length + 2); } } { if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(partitions.size() + 1)); } else { _size.addBytes(4); } for (AlterReplicaLogDirPartitionResult partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterReplicaLogDirTopicResult)) return false; AlterReplicaLogDirTopicResult other = (AlterReplicaLogDirTopicResult) obj; if (this.topicName == null) { if (other.topicName != null) return false; } else { if (!this.topicName.equals(other.topicName)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public AlterReplicaLogDirTopicResult duplicate() { AlterReplicaLogDirTopicResult _duplicate = new AlterReplicaLogDirTopicResult(); _duplicate.topicName = topicName; ArrayList<AlterReplicaLogDirPartitionResult> newPartitions = new ArrayList<AlterReplicaLogDirPartitionResult>(partitions.size()); for (AlterReplicaLogDirPartitionResult _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "AlterReplicaLogDirTopicResult(" + "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topicName() { return this.topicName; } public List<AlterReplicaLogDirPartitionResult> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterReplicaLogDirTopicResult setTopicName(String v) { this.topicName = v; return this; } public AlterReplicaLogDirTopicResult setPartitions(List<AlterReplicaLogDirPartitionResult> v) { this.partitions = v; return this; } } public static class AlterReplicaLogDirPartitionResult implements Message { int partitionIndex; short errorCode; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 2; public AlterReplicaLogDirPartitionResult(Readable _readable, short _version) { read(_readable, _version); } public AlterReplicaLogDirPartitionResult() { this.partitionIndex = 0; this.errorCode = (short) 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 2; } @Override public void read(Readable _readable, short _version) { if (_version > 2) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterReplicaLogDirPartitionResult"); } this.partitionIndex = _readable.readInt(); this.errorCode = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 2) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeShort(errorCode); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 2) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 2) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterReplicaLogDirPartitionResult"); } _size.addBytes(4); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 2) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterReplicaLogDirPartitionResult)) return false; AlterReplicaLogDirPartitionResult other = (AlterReplicaLogDirPartitionResult) obj; if (partitionIndex != other.partitionIndex) return false; if (errorCode != other.errorCode) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + errorCode; return hashCode; } @Override public AlterReplicaLogDirPartitionResult duplicate() { AlterReplicaLogDirPartitionResult _duplicate = new AlterReplicaLogDirPartitionResult(); _duplicate.partitionIndex = partitionIndex; _duplicate.errorCode = errorCode; return _duplicate; } @Override public String toString() { return "AlterReplicaLogDirPartitionResult(" + "partitionIndex=" + partitionIndex + ", errorCode=" + errorCode + ")"; } public int partitionIndex() { return this.partitionIndex; } public short errorCode() { return this.errorCode; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterReplicaLogDirPartitionResult setPartitionIndex(int v) { this.partitionIndex = v; return this; } public AlterReplicaLogDirPartitionResult setErrorCode(short v) { this.errorCode = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterReplicaLogDirsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.*; public class AlterReplicaLogDirsResponseDataJsonConverter { public static AlterReplicaLogDirsResponseData read(JsonNode _node, short _version) { AlterReplicaLogDirsResponseData _object = new AlterReplicaLogDirsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("AlterReplicaLogDirsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "AlterReplicaLogDirsResponseData"); } JsonNode _resultsNode = _node.get("results"); if (_resultsNode == null) { throw new RuntimeException("AlterReplicaLogDirsResponseData: unable to locate field 'results', which is mandatory in version " + _version); } else { if (!_resultsNode.isArray()) { throw new RuntimeException("AlterReplicaLogDirsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<AlterReplicaLogDirTopicResult> _collection = new ArrayList<AlterReplicaLogDirTopicResult>(_resultsNode.size()); _object.results = _collection; for (JsonNode _element : _resultsNode) { _collection.add(AlterReplicaLogDirTopicResultJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterReplicaLogDirsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); ArrayNode _resultsArray = new ArrayNode(JsonNodeFactory.instance); for (AlterReplicaLogDirTopicResult _element : _object.results) { _resultsArray.add(AlterReplicaLogDirTopicResultJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("results", _resultsArray); return _node; } public static JsonNode write(AlterReplicaLogDirsResponseData _object, short _version) { return write(_object, _version, true); } public static class AlterReplicaLogDirPartitionResultJsonConverter { public static AlterReplicaLogDirPartitionResult read(JsonNode _node, short _version) { AlterReplicaLogDirPartitionResult _object = new AlterReplicaLogDirPartitionResult(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("AlterReplicaLogDirPartitionResult: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "AlterReplicaLogDirPartitionResult"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("AlterReplicaLogDirPartitionResult: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "AlterReplicaLogDirPartitionResult"); } return _object; } public static JsonNode write(AlterReplicaLogDirPartitionResult _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); return _node; } public static JsonNode write(AlterReplicaLogDirPartitionResult _object, short _version) { return write(_object, _version, true); } } public static class AlterReplicaLogDirTopicResultJsonConverter { public static AlterReplicaLogDirTopicResult read(JsonNode _node, short _version) { AlterReplicaLogDirTopicResult _object = new AlterReplicaLogDirTopicResult(); JsonNode _topicNameNode = _node.get("topicName"); if (_topicNameNode == null) { throw new RuntimeException("AlterReplicaLogDirTopicResult: unable to locate field 'topicName', which is mandatory in version " + _version); } else { if (!_topicNameNode.isTextual()) { throw new RuntimeException("AlterReplicaLogDirTopicResult expected a string type, but got " + _node.getNodeType()); } _object.topicName = _topicNameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("AlterReplicaLogDirTopicResult: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("AlterReplicaLogDirTopicResult expected a JSON array, but got " + _node.getNodeType()); } ArrayList<AlterReplicaLogDirPartitionResult> _collection = new ArrayList<AlterReplicaLogDirPartitionResult>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(AlterReplicaLogDirPartitionResultJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterReplicaLogDirTopicResult _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("topicName", new TextNode(_object.topicName)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (AlterReplicaLogDirPartitionResult _element : _object.partitions) { _partitionsArray.add(AlterReplicaLogDirPartitionResultJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(AlterReplicaLogDirTopicResult _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterUserScramCredentialsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.Bytes; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterUserScramCredentialsRequestData implements ApiMessage { List<ScramCredentialDeletion> deletions; List<ScramCredentialUpsertion> upsertions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("deletions", new CompactArrayOf(ScramCredentialDeletion.SCHEMA_0), "The SCRAM credentials to remove."), new Field("upsertions", new CompactArrayOf(ScramCredentialUpsertion.SCHEMA_0), "The SCRAM credentials to update/insert."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public AlterUserScramCredentialsRequestData(Readable _readable, short _version) { read(_readable, _version); } public AlterUserScramCredentialsRequestData() { this.deletions = new ArrayList<ScramCredentialDeletion>(0); this.upsertions = new ArrayList<ScramCredentialUpsertion>(0); } @Override public short apiKey() { return 51; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field deletions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ScramCredentialDeletion> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ScramCredentialDeletion(_readable, _version)); } this.deletions = newCollection; } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field upsertions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<ScramCredentialUpsertion> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ScramCredentialUpsertion(_readable, _version)); } this.upsertions = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeUnsignedVarint(deletions.size() + 1); for (ScramCredentialDeletion deletionsElement : deletions) { deletionsElement.write(_writable, _cache, _version); } _writable.writeUnsignedVarint(upsertions.size() + 1); for (ScramCredentialUpsertion upsertionsElement : upsertions) { upsertionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(deletions.size() + 1)); for (ScramCredentialDeletion deletionsElement : deletions) { deletionsElement.addSize(_size, _cache, _version); } } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(upsertions.size() + 1)); for (ScramCredentialUpsertion upsertionsElement : upsertions) { upsertionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterUserScramCredentialsRequestData)) return false; AlterUserScramCredentialsRequestData other = (AlterUserScramCredentialsRequestData) obj; if (this.deletions == null) { if (other.deletions != null) return false; } else { if (!this.deletions.equals(other.deletions)) return false; } if (this.upsertions == null) { if (other.upsertions != null) return false; } else { if (!this.upsertions.equals(other.upsertions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (deletions == null ? 0 : deletions.hashCode()); hashCode = 31 * hashCode + (upsertions == null ? 0 : upsertions.hashCode()); return hashCode; } @Override public AlterUserScramCredentialsRequestData duplicate() { AlterUserScramCredentialsRequestData _duplicate = new AlterUserScramCredentialsRequestData(); ArrayList<ScramCredentialDeletion> newDeletions = new ArrayList<ScramCredentialDeletion>(deletions.size()); for (ScramCredentialDeletion _element : deletions) { newDeletions.add(_element.duplicate()); } _duplicate.deletions = newDeletions; ArrayList<ScramCredentialUpsertion> newUpsertions = new ArrayList<ScramCredentialUpsertion>(upsertions.size()); for (ScramCredentialUpsertion _element : upsertions) { newUpsertions.add(_element.duplicate()); } _duplicate.upsertions = newUpsertions; return _duplicate; } @Override public String toString() { return "AlterUserScramCredentialsRequestData(" + "deletions=" + MessageUtil.deepToString(deletions.iterator()) + ", upsertions=" + MessageUtil.deepToString(upsertions.iterator()) + ")"; } public List<ScramCredentialDeletion> deletions() { return this.deletions; } public List<ScramCredentialUpsertion> upsertions() { return this.upsertions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterUserScramCredentialsRequestData setDeletions(List<ScramCredentialDeletion> v) { this.deletions = v; return this; } public AlterUserScramCredentialsRequestData setUpsertions(List<ScramCredentialUpsertion> v) { this.upsertions = v; return this; } public static class ScramCredentialDeletion implements Message { String name; byte mechanism; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The user name."), new Field("mechanism", Type.INT8, "The SCRAM mechanism."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ScramCredentialDeletion(Readable _readable, short _version) { read(_readable, _version); } public ScramCredentialDeletion() { this.name = ""; this.mechanism = (byte) 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of ScramCredentialDeletion"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } this.mechanism = _readable.readByte(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeByte(mechanism); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of ScramCredentialDeletion"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(1); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ScramCredentialDeletion)) return false; ScramCredentialDeletion other = (ScramCredentialDeletion) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (mechanism != other.mechanism) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + mechanism; return hashCode; } @Override public ScramCredentialDeletion duplicate() { ScramCredentialDeletion _duplicate = new ScramCredentialDeletion(); _duplicate.name = name; _duplicate.mechanism = mechanism; return _duplicate; } @Override public String toString() { return "ScramCredentialDeletion(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", mechanism=" + mechanism + ")"; } public String name() { return this.name; } public byte mechanism() { return this.mechanism; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ScramCredentialDeletion setName(String v) { this.name = v; return this; } public ScramCredentialDeletion setMechanism(byte v) { this.mechanism = v; return this; } } public static class ScramCredentialUpsertion implements Message { String name; byte mechanism; int iterations; byte[] salt; byte[] saltedPassword; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The user name."), new Field("mechanism", Type.INT8, "The SCRAM mechanism."), new Field("iterations", Type.INT32, "The number of iterations."), new Field("salt", Type.COMPACT_BYTES, "A random salt generated by the client."), new Field("salted_password", Type.COMPACT_BYTES, "The salted password."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public ScramCredentialUpsertion(Readable _readable, short _version) { read(_readable, _version); } public ScramCredentialUpsertion() { this.name = ""; this.mechanism = (byte) 0; this.iterations = 0; this.salt = Bytes.EMPTY; this.saltedPassword = Bytes.EMPTY; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of ScramCredentialUpsertion"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } this.mechanism = _readable.readByte(); this.iterations = _readable.readInt(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field salt was serialized as null"); } else { byte[] newBytes = _readable.readArray(length); this.salt = newBytes; } } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field saltedPassword was serialized as null"); } else { byte[] newBytes = _readable.readArray(length); this.saltedPassword = newBytes; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeByte(mechanism); _writable.writeInt(iterations); _writable.writeUnsignedVarint(salt.length + 1); _writable.writeByteArray(salt); _writable.writeUnsignedVarint(saltedPassword.length + 1); _writable.writeByteArray(saltedPassword); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of ScramCredentialUpsertion"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(1); _size.addBytes(4); { _size.addBytes(salt.length); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(salt.length + 1)); } { _size.addBytes(saltedPassword.length); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(saltedPassword.length + 1)); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof ScramCredentialUpsertion)) return false; ScramCredentialUpsertion other = (ScramCredentialUpsertion) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (mechanism != other.mechanism) return false; if (iterations != other.iterations) return false; if (!Arrays.equals(this.salt, other.salt)) return false; if (!Arrays.equals(this.saltedPassword, other.saltedPassword)) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); hashCode = 31 * hashCode + mechanism; hashCode = 31 * hashCode + iterations; hashCode = 31 * hashCode + Arrays.hashCode(salt); hashCode = 31 * hashCode + Arrays.hashCode(saltedPassword); return hashCode; } @Override public ScramCredentialUpsertion duplicate() { ScramCredentialUpsertion _duplicate = new ScramCredentialUpsertion(); _duplicate.name = name; _duplicate.mechanism = mechanism; _duplicate.iterations = iterations; _duplicate.salt = MessageUtil.duplicate(salt); _duplicate.saltedPassword = MessageUtil.duplicate(saltedPassword); return _duplicate; } @Override public String toString() { return "ScramCredentialUpsertion(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", mechanism=" + mechanism + ", iterations=" + iterations + ", salt=" + Arrays.toString(salt) + ", saltedPassword=" + Arrays.toString(saltedPassword) + ")"; } public String name() { return this.name; } public byte mechanism() { return this.mechanism; } public int iterations() { return this.iterations; } public byte[] salt() { return this.salt; } public byte[] saltedPassword() { return this.saltedPassword; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ScramCredentialUpsertion setName(String v) { this.name = v; return this; } public ScramCredentialUpsertion setMechanism(byte v) { this.mechanism = v; return this; } public ScramCredentialUpsertion setIterations(int v) { this.iterations = v; return this; } public ScramCredentialUpsertion setSalt(byte[] v) { this.salt = v; return this; } public ScramCredentialUpsertion setSaltedPassword(byte[] v) { this.saltedPassword = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterUserScramCredentialsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BinaryNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import java.util.Arrays; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterUserScramCredentialsRequestData.*; public class AlterUserScramCredentialsRequestDataJsonConverter { public static AlterUserScramCredentialsRequestData read(JsonNode _node, short _version) { AlterUserScramCredentialsRequestData _object = new AlterUserScramCredentialsRequestData(); JsonNode _deletionsNode = _node.get("deletions"); if (_deletionsNode == null) { throw new RuntimeException("AlterUserScramCredentialsRequestData: unable to locate field 'deletions', which is mandatory in version " + _version); } else { if (!_deletionsNode.isArray()) { throw new RuntimeException("AlterUserScramCredentialsRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ScramCredentialDeletion> _collection = new ArrayList<ScramCredentialDeletion>(_deletionsNode.size()); _object.deletions = _collection; for (JsonNode _element : _deletionsNode) { _collection.add(ScramCredentialDeletionJsonConverter.read(_element, _version)); } } JsonNode _upsertionsNode = _node.get("upsertions"); if (_upsertionsNode == null) { throw new RuntimeException("AlterUserScramCredentialsRequestData: unable to locate field 'upsertions', which is mandatory in version " + _version); } else { if (!_upsertionsNode.isArray()) { throw new RuntimeException("AlterUserScramCredentialsRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<ScramCredentialUpsertion> _collection = new ArrayList<ScramCredentialUpsertion>(_upsertionsNode.size()); _object.upsertions = _collection; for (JsonNode _element : _upsertionsNode) { _collection.add(ScramCredentialUpsertionJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterUserScramCredentialsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); ArrayNode _deletionsArray = new ArrayNode(JsonNodeFactory.instance); for (ScramCredentialDeletion _element : _object.deletions) { _deletionsArray.add(ScramCredentialDeletionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("deletions", _deletionsArray); ArrayNode _upsertionsArray = new ArrayNode(JsonNodeFactory.instance); for (ScramCredentialUpsertion _element : _object.upsertions) { _upsertionsArray.add(ScramCredentialUpsertionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("upsertions", _upsertionsArray); return _node; } public static JsonNode write(AlterUserScramCredentialsRequestData _object, short _version) { return write(_object, _version, true); } public static class ScramCredentialDeletionJsonConverter { public static ScramCredentialDeletion read(JsonNode _node, short _version) { ScramCredentialDeletion _object = new ScramCredentialDeletion(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("ScramCredentialDeletion: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("ScramCredentialDeletion expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _mechanismNode = _node.get("mechanism"); if (_mechanismNode == null) { throw new RuntimeException("ScramCredentialDeletion: unable to locate field 'mechanism', which is mandatory in version " + _version); } else { _object.mechanism = MessageUtil.jsonNodeToByte(_mechanismNode, "ScramCredentialDeletion"); } return _object; } public static JsonNode write(ScramCredentialDeletion _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); _node.set("mechanism", new ShortNode(_object.mechanism)); return _node; } public static JsonNode write(ScramCredentialDeletion _object, short _version) { return write(_object, _version, true); } } public static class ScramCredentialUpsertionJsonConverter { public static ScramCredentialUpsertion read(JsonNode _node, short _version) { ScramCredentialUpsertion _object = new ScramCredentialUpsertion(); JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("ScramCredentialUpsertion: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("ScramCredentialUpsertion expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _mechanismNode = _node.get("mechanism"); if (_mechanismNode == null) { throw new RuntimeException("ScramCredentialUpsertion: unable to locate field 'mechanism', which is mandatory in version " + _version); } else { _object.mechanism = MessageUtil.jsonNodeToByte(_mechanismNode, "ScramCredentialUpsertion"); } JsonNode _iterationsNode = _node.get("iterations"); if (_iterationsNode == null) { throw new RuntimeException("ScramCredentialUpsertion: unable to locate field 'iterations', which is mandatory in version " + _version); } else { _object.iterations = MessageUtil.jsonNodeToInt(_iterationsNode, "ScramCredentialUpsertion"); } JsonNode _saltNode = _node.get("salt"); if (_saltNode == null) { throw new RuntimeException("ScramCredentialUpsertion: unable to locate field 'salt', which is mandatory in version " + _version); } else { _object.salt = MessageUtil.jsonNodeToBinary(_saltNode, "ScramCredentialUpsertion"); } JsonNode _saltedPasswordNode = _node.get("saltedPassword"); if (_saltedPasswordNode == null) { throw new RuntimeException("ScramCredentialUpsertion: unable to locate field 'saltedPassword', which is mandatory in version " + _version); } else { _object.saltedPassword = MessageUtil.jsonNodeToBinary(_saltedPasswordNode, "ScramCredentialUpsertion"); } return _object; } public static JsonNode write(ScramCredentialUpsertion _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); _node.set("mechanism", new ShortNode(_object.mechanism)); _node.set("iterations", new IntNode(_object.iterations)); _node.set("salt", new BinaryNode(Arrays.copyOf(_object.salt, _object.salt.length))); _node.set("saltedPassword", new BinaryNode(Arrays.copyOf(_object.saltedPassword, _object.saltedPassword.length))); return _node; } public static JsonNode write(ScramCredentialUpsertion _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterUserScramCredentialsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class AlterUserScramCredentialsResponseData implements ApiMessage { int throttleTimeMs; List<AlterUserScramCredentialsResult> results; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("results", new CompactArrayOf(AlterUserScramCredentialsResult.SCHEMA_0), "The results for deletions and alterations, one per affected user."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public AlterUserScramCredentialsResponseData(Readable _readable, short _version) { read(_readable, _version); } public AlterUserScramCredentialsResponseData() { this.throttleTimeMs = 0; this.results = new ArrayList<AlterUserScramCredentialsResult>(0); } @Override public short apiKey() { return 51; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field results was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<AlterUserScramCredentialsResult> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new AlterUserScramCredentialsResult(_readable, _version)); } this.results = newCollection; } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeUnsignedVarint(results.size() + 1); for (AlterUserScramCredentialsResult resultsElement : results) { resultsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(results.size() + 1)); for (AlterUserScramCredentialsResult resultsElement : results) { resultsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterUserScramCredentialsResponseData)) return false; AlterUserScramCredentialsResponseData other = (AlterUserScramCredentialsResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (this.results == null) { if (other.results != null) return false; } else { if (!this.results.equals(other.results)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (results == null ? 0 : results.hashCode()); return hashCode; } @Override public AlterUserScramCredentialsResponseData duplicate() { AlterUserScramCredentialsResponseData _duplicate = new AlterUserScramCredentialsResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; ArrayList<AlterUserScramCredentialsResult> newResults = new ArrayList<AlterUserScramCredentialsResult>(results.size()); for (AlterUserScramCredentialsResult _element : results) { newResults.add(_element.duplicate()); } _duplicate.results = newResults; return _duplicate; } @Override public String toString() { return "AlterUserScramCredentialsResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", results=" + MessageUtil.deepToString(results.iterator()) + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public List<AlterUserScramCredentialsResult> results() { return this.results; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterUserScramCredentialsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public AlterUserScramCredentialsResponseData setResults(List<AlterUserScramCredentialsResult> v) { this.results = v; return this; } public static class AlterUserScramCredentialsResult implements Message { String user; short errorCode; String errorMessage; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("user", Type.COMPACT_STRING, "The user name."), new Field("error_code", Type.INT16, "The error code."), new Field("error_message", Type.COMPACT_NULLABLE_STRING, "The error message, if any."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public AlterUserScramCredentialsResult(Readable _readable, short _version) { read(_readable, _version); } public AlterUserScramCredentialsResult() { this.user = ""; this.errorCode = (short) 0; this.errorMessage = ""; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of AlterUserScramCredentialsResult"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field user was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field user had invalid length " + length); } else { this.user = _readable.readString(length); } } this.errorCode = _readable.readShort(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.errorMessage = null; } else if (length > 0x7fff) { throw new RuntimeException("string field errorMessage had invalid length " + length); } else { this.errorMessage = _readable.readString(length); } } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(user); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeShort(errorCode); if (errorMessage == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(errorMessage); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of AlterUserScramCredentialsResult"); } { byte[] _stringBytes = user.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'user' field is too long to be serialized"); } _cache.cacheSerializedValue(user, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(2); if (errorMessage == null) { _size.addBytes(1); } else { byte[] _stringBytes = errorMessage.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'errorMessage' field is too long to be serialized"); } _cache.cacheSerializedValue(errorMessage, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof AlterUserScramCredentialsResult)) return false; AlterUserScramCredentialsResult other = (AlterUserScramCredentialsResult) obj; if (this.user == null) { if (other.user != null) return false; } else { if (!this.user.equals(other.user)) return false; } if (errorCode != other.errorCode) return false; if (this.errorMessage == null) { if (other.errorMessage != null) return false; } else { if (!this.errorMessage.equals(other.errorMessage)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (user == null ? 0 : user.hashCode()); hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (errorMessage == null ? 0 : errorMessage.hashCode()); return hashCode; } @Override public AlterUserScramCredentialsResult duplicate() { AlterUserScramCredentialsResult _duplicate = new AlterUserScramCredentialsResult(); _duplicate.user = user; _duplicate.errorCode = errorCode; if (errorMessage == null) { _duplicate.errorMessage = null; } else { _duplicate.errorMessage = errorMessage; } return _duplicate; } @Override public String toString() { return "AlterUserScramCredentialsResult(" + "user=" + ((user == null) ? "null" : "'" + user.toString() + "'") + ", errorCode=" + errorCode + ", errorMessage=" + ((errorMessage == null) ? "null" : "'" + errorMessage.toString() + "'") + ")"; } public String user() { return this.user; } public short errorCode() { return this.errorCode; } public String errorMessage() { return this.errorMessage; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public AlterUserScramCredentialsResult setUser(String v) { this.user = v; return this; } public AlterUserScramCredentialsResult setErrorCode(short v) { this.errorCode = v; return this; } public AlterUserScramCredentialsResult setErrorMessage(String v) { this.errorMessage = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/AlterUserScramCredentialsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.AlterUserScramCredentialsResponseData.*; public class AlterUserScramCredentialsResponseDataJsonConverter { public static AlterUserScramCredentialsResponseData read(JsonNode _node, short _version) { AlterUserScramCredentialsResponseData _object = new AlterUserScramCredentialsResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("AlterUserScramCredentialsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "AlterUserScramCredentialsResponseData"); } JsonNode _resultsNode = _node.get("results"); if (_resultsNode == null) { throw new RuntimeException("AlterUserScramCredentialsResponseData: unable to locate field 'results', which is mandatory in version " + _version); } else { if (!_resultsNode.isArray()) { throw new RuntimeException("AlterUserScramCredentialsResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<AlterUserScramCredentialsResult> _collection = new ArrayList<AlterUserScramCredentialsResult>(_resultsNode.size()); _object.results = _collection; for (JsonNode _element : _resultsNode) { _collection.add(AlterUserScramCredentialsResultJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(AlterUserScramCredentialsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); ArrayNode _resultsArray = new ArrayNode(JsonNodeFactory.instance); for (AlterUserScramCredentialsResult _element : _object.results) { _resultsArray.add(AlterUserScramCredentialsResultJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("results", _resultsArray); return _node; } public static JsonNode write(AlterUserScramCredentialsResponseData _object, short _version) { return write(_object, _version, true); } public static class AlterUserScramCredentialsResultJsonConverter { public static AlterUserScramCredentialsResult read(JsonNode _node, short _version) { AlterUserScramCredentialsResult _object = new AlterUserScramCredentialsResult(); JsonNode _userNode = _node.get("user"); if (_userNode == null) { throw new RuntimeException("AlterUserScramCredentialsResult: unable to locate field 'user', which is mandatory in version " + _version); } else { if (!_userNode.isTextual()) { throw new RuntimeException("AlterUserScramCredentialsResult expected a string type, but got " + _node.getNodeType()); } _object.user = _userNode.asText(); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("AlterUserScramCredentialsResult: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "AlterUserScramCredentialsResult"); } JsonNode _errorMessageNode = _node.get("errorMessage"); if (_errorMessageNode == null) { throw new RuntimeException("AlterUserScramCredentialsResult: unable to locate field 'errorMessage', which is mandatory in version " + _version); } else { if (_errorMessageNode.isNull()) { _object.errorMessage = null; } else { if (!_errorMessageNode.isTextual()) { throw new RuntimeException("AlterUserScramCredentialsResult expected a string type, but got " + _node.getNodeType()); } _object.errorMessage = _errorMessageNode.asText(); } } return _object; } public static JsonNode write(AlterUserScramCredentialsResult _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("user", new TextNode(_object.user)); _node.set("errorCode", new ShortNode(_object.errorCode)); if (_object.errorMessage == null) { _node.set("errorMessage", NullNode.instance); } else { _node.set("errorMessage", new TextNode(_object.errorMessage)); } return _node; } public static JsonNode write(AlterUserScramCredentialsResult _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ApiMessageType.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.EnumSet; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.types.Schema; public enum ApiMessageType { PRODUCE("Produce", (short) 0, ProduceRequestData.SCHEMAS, ProduceResponseData.SCHEMAS, (short) 0, (short) 9, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), FETCH("Fetch", (short) 1, FetchRequestData.SCHEMAS, FetchResponseData.SCHEMAS, (short) 0, (short) 15, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), LIST_OFFSETS("ListOffsets", (short) 2, ListOffsetsRequestData.SCHEMAS, ListOffsetsResponseData.SCHEMAS, (short) 0, (short) 8, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), METADATA("Metadata", (short) 3, MetadataRequestData.SCHEMAS, MetadataResponseData.SCHEMAS, (short) 0, (short) 12, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), LEADER_AND_ISR("LeaderAndIsr", (short) 4, LeaderAndIsrRequestData.SCHEMAS, LeaderAndIsrResponseData.SCHEMAS, (short) 0, (short) 7, EnumSet.of(ListenerType.ZK_BROKER), false), STOP_REPLICA("StopReplica", (short) 5, StopReplicaRequestData.SCHEMAS, StopReplicaResponseData.SCHEMAS, (short) 0, (short) 4, EnumSet.of(ListenerType.ZK_BROKER), false), UPDATE_METADATA("UpdateMetadata", (short) 6, UpdateMetadataRequestData.SCHEMAS, UpdateMetadataResponseData.SCHEMAS, (short) 0, (short) 8, EnumSet.of(ListenerType.ZK_BROKER), false), CONTROLLED_SHUTDOWN("ControlledShutdown", (short) 7, ControlledShutdownRequestData.SCHEMAS, ControlledShutdownResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.CONTROLLER), false), OFFSET_COMMIT("OffsetCommit", (short) 8, OffsetCommitRequestData.SCHEMAS, OffsetCommitResponseData.SCHEMAS, (short) 0, (short) 8, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), OFFSET_FETCH("OffsetFetch", (short) 9, OffsetFetchRequestData.SCHEMAS, OffsetFetchResponseData.SCHEMAS, (short) 0, (short) 8, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), FIND_COORDINATOR("FindCoordinator", (short) 10, FindCoordinatorRequestData.SCHEMAS, FindCoordinatorResponseData.SCHEMAS, (short) 0, (short) 4, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), JOIN_GROUP("JoinGroup", (short) 11, JoinGroupRequestData.SCHEMAS, JoinGroupResponseData.SCHEMAS, (short) 0, (short) 9, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), HEARTBEAT("Heartbeat", (short) 12, HeartbeatRequestData.SCHEMAS, HeartbeatResponseData.SCHEMAS, (short) 0, (short) 4, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), LEAVE_GROUP("LeaveGroup", (short) 13, LeaveGroupRequestData.SCHEMAS, LeaveGroupResponseData.SCHEMAS, (short) 0, (short) 5, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), SYNC_GROUP("SyncGroup", (short) 14, SyncGroupRequestData.SCHEMAS, SyncGroupResponseData.SCHEMAS, (short) 0, (short) 5, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), DESCRIBE_GROUPS("DescribeGroups", (short) 15, DescribeGroupsRequestData.SCHEMAS, DescribeGroupsResponseData.SCHEMAS, (short) 0, (short) 5, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), LIST_GROUPS("ListGroups", (short) 16, ListGroupsRequestData.SCHEMAS, ListGroupsResponseData.SCHEMAS, (short) 0, (short) 4, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), SASL_HANDSHAKE("SaslHandshake", (short) 17, SaslHandshakeRequestData.SCHEMAS, SaslHandshakeResponseData.SCHEMAS, (short) 0, (short) 1, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), API_VERSIONS("ApiVersions", (short) 18, ApiVersionsRequestData.SCHEMAS, ApiVersionsResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), CREATE_TOPICS("CreateTopics", (short) 19, CreateTopicsRequestData.SCHEMAS, CreateTopicsResponseData.SCHEMAS, (short) 0, (short) 7, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), DELETE_TOPICS("DeleteTopics", (short) 20, DeleteTopicsRequestData.SCHEMAS, DeleteTopicsResponseData.SCHEMAS, (short) 0, (short) 6, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), DELETE_RECORDS("DeleteRecords", (short) 21, DeleteRecordsRequestData.SCHEMAS, DeleteRecordsResponseData.SCHEMAS, (short) 0, (short) 2, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), INIT_PRODUCER_ID("InitProducerId", (short) 22, InitProducerIdRequestData.SCHEMAS, InitProducerIdResponseData.SCHEMAS, (short) 0, (short) 4, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), OFFSET_FOR_LEADER_EPOCH("OffsetForLeaderEpoch", (short) 23, OffsetForLeaderEpochRequestData.SCHEMAS, OffsetForLeaderEpochResponseData.SCHEMAS, (short) 0, (short) 4, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), ADD_PARTITIONS_TO_TXN("AddPartitionsToTxn", (short) 24, AddPartitionsToTxnRequestData.SCHEMAS, AddPartitionsToTxnResponseData.SCHEMAS, (short) 0, (short) 4, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), true), ADD_OFFSETS_TO_TXN("AddOffsetsToTxn", (short) 25, AddOffsetsToTxnRequestData.SCHEMAS, AddOffsetsToTxnResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), END_TXN("EndTxn", (short) 26, EndTxnRequestData.SCHEMAS, EndTxnResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), WRITE_TXN_MARKERS("WriteTxnMarkers", (short) 27, WriteTxnMarkersRequestData.SCHEMAS, WriteTxnMarkersResponseData.SCHEMAS, (short) 0, (short) 1, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), TXN_OFFSET_COMMIT("TxnOffsetCommit", (short) 28, TxnOffsetCommitRequestData.SCHEMAS, TxnOffsetCommitResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), DESCRIBE_ACLS("DescribeAcls", (short) 29, DescribeAclsRequestData.SCHEMAS, DescribeAclsResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), CREATE_ACLS("CreateAcls", (short) 30, CreateAclsRequestData.SCHEMAS, CreateAclsResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), DELETE_ACLS("DeleteAcls", (short) 31, DeleteAclsRequestData.SCHEMAS, DeleteAclsResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), DESCRIBE_CONFIGS("DescribeConfigs", (short) 32, DescribeConfigsRequestData.SCHEMAS, DescribeConfigsResponseData.SCHEMAS, (short) 0, (short) 4, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), ALTER_CONFIGS("AlterConfigs", (short) 33, AlterConfigsRequestData.SCHEMAS, AlterConfigsResponseData.SCHEMAS, (short) 0, (short) 2, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), ALTER_REPLICA_LOG_DIRS("AlterReplicaLogDirs", (short) 34, AlterReplicaLogDirsRequestData.SCHEMAS, AlterReplicaLogDirsResponseData.SCHEMAS, (short) 0, (short) 2, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), DESCRIBE_LOG_DIRS("DescribeLogDirs", (short) 35, DescribeLogDirsRequestData.SCHEMAS, DescribeLogDirsResponseData.SCHEMAS, (short) 0, (short) 4, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), SASL_AUTHENTICATE("SaslAuthenticate", (short) 36, SaslAuthenticateRequestData.SCHEMAS, SaslAuthenticateResponseData.SCHEMAS, (short) 0, (short) 2, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), CREATE_PARTITIONS("CreatePartitions", (short) 37, CreatePartitionsRequestData.SCHEMAS, CreatePartitionsResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), CREATE_DELEGATION_TOKEN("CreateDelegationToken", (short) 38, CreateDelegationTokenRequestData.SCHEMAS, CreateDelegationTokenResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER), false), RENEW_DELEGATION_TOKEN("RenewDelegationToken", (short) 39, RenewDelegationTokenRequestData.SCHEMAS, RenewDelegationTokenResponseData.SCHEMAS, (short) 0, (short) 2, EnumSet.of(ListenerType.ZK_BROKER), false), EXPIRE_DELEGATION_TOKEN("ExpireDelegationToken", (short) 40, ExpireDelegationTokenRequestData.SCHEMAS, ExpireDelegationTokenResponseData.SCHEMAS, (short) 0, (short) 2, EnumSet.of(ListenerType.ZK_BROKER), false), DESCRIBE_DELEGATION_TOKEN("DescribeDelegationToken", (short) 41, DescribeDelegationTokenRequestData.SCHEMAS, DescribeDelegationTokenResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER), false), DELETE_GROUPS("DeleteGroups", (short) 42, DeleteGroupsRequestData.SCHEMAS, DeleteGroupsResponseData.SCHEMAS, (short) 0, (short) 2, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), ELECT_LEADERS("ElectLeaders", (short) 43, ElectLeadersRequestData.SCHEMAS, ElectLeadersResponseData.SCHEMAS, (short) 0, (short) 2, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), INCREMENTAL_ALTER_CONFIGS("IncrementalAlterConfigs", (short) 44, IncrementalAlterConfigsRequestData.SCHEMAS, IncrementalAlterConfigsResponseData.SCHEMAS, (short) 0, (short) 1, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), ALTER_PARTITION_REASSIGNMENTS("AlterPartitionReassignments", (short) 45, AlterPartitionReassignmentsRequestData.SCHEMAS, AlterPartitionReassignmentsResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.BROKER, ListenerType.CONTROLLER, ListenerType.ZK_BROKER), false), LIST_PARTITION_REASSIGNMENTS("ListPartitionReassignments", (short) 46, ListPartitionReassignmentsRequestData.SCHEMAS, ListPartitionReassignmentsResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.BROKER, ListenerType.CONTROLLER, ListenerType.ZK_BROKER), false), OFFSET_DELETE("OffsetDelete", (short) 47, OffsetDeleteRequestData.SCHEMAS, OffsetDeleteResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), DESCRIBE_CLIENT_QUOTAS("DescribeClientQuotas", (short) 48, DescribeClientQuotasRequestData.SCHEMAS, DescribeClientQuotasResponseData.SCHEMAS, (short) 0, (short) 1, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), ALTER_CLIENT_QUOTAS("AlterClientQuotas", (short) 49, AlterClientQuotasRequestData.SCHEMAS, AlterClientQuotasResponseData.SCHEMAS, (short) 0, (short) 1, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), DESCRIBE_USER_SCRAM_CREDENTIALS("DescribeUserScramCredentials", (short) 50, DescribeUserScramCredentialsRequestData.SCHEMAS, DescribeUserScramCredentialsResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), ALTER_USER_SCRAM_CREDENTIALS("AlterUserScramCredentials", (short) 51, AlterUserScramCredentialsRequestData.SCHEMAS, AlterUserScramCredentialsResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), VOTE("Vote", (short) 52, VoteRequestData.SCHEMAS, VoteResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.CONTROLLER), false), BEGIN_QUORUM_EPOCH("BeginQuorumEpoch", (short) 53, BeginQuorumEpochRequestData.SCHEMAS, BeginQuorumEpochResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.CONTROLLER), false), END_QUORUM_EPOCH("EndQuorumEpoch", (short) 54, EndQuorumEpochRequestData.SCHEMAS, EndQuorumEpochResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.CONTROLLER), false), DESCRIBE_QUORUM("DescribeQuorum", (short) 55, DescribeQuorumRequestData.SCHEMAS, DescribeQuorumResponseData.SCHEMAS, (short) 0, (short) 1, EnumSet.of(ListenerType.BROKER, ListenerType.CONTROLLER), false), ALTER_PARTITION("AlterPartition", (short) 56, AlterPartitionRequestData.SCHEMAS, AlterPartitionResponseData.SCHEMAS, (short) 0, (short) 3, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.CONTROLLER), false), UPDATE_FEATURES("UpdateFeatures", (short) 57, UpdateFeaturesRequestData.SCHEMAS, UpdateFeaturesResponseData.SCHEMAS, (short) 0, (short) 1, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER, ListenerType.CONTROLLER), false), ENVELOPE("Envelope", (short) 58, EnvelopeRequestData.SCHEMAS, EnvelopeResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.CONTROLLER, ListenerType.ZK_BROKER), false), FETCH_SNAPSHOT("FetchSnapshot", (short) 59, FetchSnapshotRequestData.SCHEMAS, FetchSnapshotResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.CONTROLLER), false), DESCRIBE_CLUSTER("DescribeCluster", (short) 60, DescribeClusterRequestData.SCHEMAS, DescribeClusterResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), DESCRIBE_PRODUCERS("DescribeProducers", (short) 61, DescribeProducersRequestData.SCHEMAS, DescribeProducersResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), BROKER_REGISTRATION("BrokerRegistration", (short) 62, BrokerRegistrationRequestData.SCHEMAS, BrokerRegistrationResponseData.SCHEMAS, (short) 0, (short) 1, EnumSet.of(ListenerType.CONTROLLER), false), BROKER_HEARTBEAT("BrokerHeartbeat", (short) 63, BrokerHeartbeatRequestData.SCHEMAS, BrokerHeartbeatResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.CONTROLLER), false), UNREGISTER_BROKER("UnregisterBroker", (short) 64, UnregisterBrokerRequestData.SCHEMAS, UnregisterBrokerResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.BROKER, ListenerType.CONTROLLER), false), DESCRIBE_TRANSACTIONS("DescribeTransactions", (short) 65, DescribeTransactionsRequestData.SCHEMAS, DescribeTransactionsResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), LIST_TRANSACTIONS("ListTransactions", (short) 66, ListTransactionsRequestData.SCHEMAS, ListTransactionsResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), false), ALLOCATE_PRODUCER_IDS("AllocateProducerIds", (short) 67, AllocateProducerIdsRequestData.SCHEMAS, AllocateProducerIdsResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.CONTROLLER), false), CONSUMER_GROUP_HEARTBEAT("ConsumerGroupHeartbeat", (short) 68, ConsumerGroupHeartbeatRequestData.SCHEMAS, ConsumerGroupHeartbeatResponseData.SCHEMAS, (short) 0, (short) 0, EnumSet.of(ListenerType.ZK_BROKER, ListenerType.BROKER), true); public final String name; private final short apiKey; private final Schema[] requestSchemas; private final Schema[] responseSchemas; private final short lowestSupportedVersion; private final short highestSupportedVersion; private final EnumSet<ListenerType> listeners; private final boolean latestVersionUnstable; ApiMessageType(String name, short apiKey, Schema[] requestSchemas, Schema[] responseSchemas, short lowestSupportedVersion, short highestSupportedVersion, EnumSet<ListenerType> listeners, boolean latestVersionUnstable) { this.name = name; this.apiKey = apiKey; this.requestSchemas = requestSchemas; this.responseSchemas = responseSchemas; this.lowestSupportedVersion = lowestSupportedVersion; this.highestSupportedVersion = highestSupportedVersion; this.listeners = listeners; this.latestVersionUnstable = latestVersionUnstable; } public static ApiMessageType fromApiKey(short apiKey) { switch (apiKey) { case 0: return PRODUCE; case 1: return FETCH; case 2: return LIST_OFFSETS; case 3: return METADATA; case 4: return LEADER_AND_ISR; case 5: return STOP_REPLICA; case 6: return UPDATE_METADATA; case 7: return CONTROLLED_SHUTDOWN; case 8: return OFFSET_COMMIT; case 9: return OFFSET_FETCH; case 10: return FIND_COORDINATOR; case 11: return JOIN_GROUP; case 12: return HEARTBEAT; case 13: return LEAVE_GROUP; case 14: return SYNC_GROUP; case 15: return DESCRIBE_GROUPS; case 16: return LIST_GROUPS; case 17: return SASL_HANDSHAKE; case 18: return API_VERSIONS; case 19: return CREATE_TOPICS; case 20: return DELETE_TOPICS; case 21: return DELETE_RECORDS; case 22: return INIT_PRODUCER_ID; case 23: return OFFSET_FOR_LEADER_EPOCH; case 24: return ADD_PARTITIONS_TO_TXN; case 25: return ADD_OFFSETS_TO_TXN; case 26: return END_TXN; case 27: return WRITE_TXN_MARKERS; case 28: return TXN_OFFSET_COMMIT; case 29: return DESCRIBE_ACLS; case 30: return CREATE_ACLS; case 31: return DELETE_ACLS; case 32: return DESCRIBE_CONFIGS; case 33: return ALTER_CONFIGS; case 34: return ALTER_REPLICA_LOG_DIRS; case 35: return DESCRIBE_LOG_DIRS; case 36: return SASL_AUTHENTICATE; case 37: return CREATE_PARTITIONS; case 38: return CREATE_DELEGATION_TOKEN; case 39: return RENEW_DELEGATION_TOKEN; case 40: return EXPIRE_DELEGATION_TOKEN; case 41: return DESCRIBE_DELEGATION_TOKEN; case 42: return DELETE_GROUPS; case 43: return ELECT_LEADERS; case 44: return INCREMENTAL_ALTER_CONFIGS; case 45: return ALTER_PARTITION_REASSIGNMENTS; case 46: return LIST_PARTITION_REASSIGNMENTS; case 47: return OFFSET_DELETE; case 48: return DESCRIBE_CLIENT_QUOTAS; case 49: return ALTER_CLIENT_QUOTAS; case 50: return DESCRIBE_USER_SCRAM_CREDENTIALS; case 51: return ALTER_USER_SCRAM_CREDENTIALS; case 52: return VOTE; case 53: return BEGIN_QUORUM_EPOCH; case 54: return END_QUORUM_EPOCH; case 55: return DESCRIBE_QUORUM; case 56: return ALTER_PARTITION; case 57: return UPDATE_FEATURES; case 58: return ENVELOPE; case 59: return FETCH_SNAPSHOT; case 60: return DESCRIBE_CLUSTER; case 61: return DESCRIBE_PRODUCERS; case 62: return BROKER_REGISTRATION; case 63: return BROKER_HEARTBEAT; case 64: return UNREGISTER_BROKER; case 65: return DESCRIBE_TRANSACTIONS; case 66: return LIST_TRANSACTIONS; case 67: return ALLOCATE_PRODUCER_IDS; case 68: return CONSUMER_GROUP_HEARTBEAT; default: throw new UnsupportedVersionException("Unsupported API key " + apiKey); } } public ApiMessage newRequest() { switch (apiKey) { case 0: return new ProduceRequestData(); case 1: return new FetchRequestData(); case 2: return new ListOffsetsRequestData(); case 3: return new MetadataRequestData(); case 4: return new LeaderAndIsrRequestData(); case 5: return new StopReplicaRequestData(); case 6: return new UpdateMetadataRequestData(); case 7: return new ControlledShutdownRequestData(); case 8: return new OffsetCommitRequestData(); case 9: return new OffsetFetchRequestData(); case 10: return new FindCoordinatorRequestData(); case 11: return new JoinGroupRequestData(); case 12: return new HeartbeatRequestData(); case 13: return new LeaveGroupRequestData(); case 14: return new SyncGroupRequestData(); case 15: return new DescribeGroupsRequestData(); case 16: return new ListGroupsRequestData(); case 17: return new SaslHandshakeRequestData(); case 18: return new ApiVersionsRequestData(); case 19: return new CreateTopicsRequestData(); case 20: return new DeleteTopicsRequestData(); case 21: return new DeleteRecordsRequestData(); case 22: return new InitProducerIdRequestData(); case 23: return new OffsetForLeaderEpochRequestData(); case 24: return new AddPartitionsToTxnRequestData(); case 25: return new AddOffsetsToTxnRequestData(); case 26: return new EndTxnRequestData(); case 27: return new WriteTxnMarkersRequestData(); case 28: return new TxnOffsetCommitRequestData(); case 29: return new DescribeAclsRequestData(); case 30: return new CreateAclsRequestData(); case 31: return new DeleteAclsRequestData(); case 32: return new DescribeConfigsRequestData(); case 33: return new AlterConfigsRequestData(); case 34: return new AlterReplicaLogDirsRequestData(); case 35: return new DescribeLogDirsRequestData(); case 36: return new SaslAuthenticateRequestData(); case 37: return new CreatePartitionsRequestData(); case 38: return new CreateDelegationTokenRequestData(); case 39: return new RenewDelegationTokenRequestData(); case 40: return new ExpireDelegationTokenRequestData(); case 41: return new DescribeDelegationTokenRequestData(); case 42: return new DeleteGroupsRequestData(); case 43: return new ElectLeadersRequestData(); case 44: return new IncrementalAlterConfigsRequestData(); case 45: return new AlterPartitionReassignmentsRequestData(); case 46: return new ListPartitionReassignmentsRequestData(); case 47: return new OffsetDeleteRequestData(); case 48: return new DescribeClientQuotasRequestData(); case 49: return new AlterClientQuotasRequestData(); case 50: return new DescribeUserScramCredentialsRequestData(); case 51: return new AlterUserScramCredentialsRequestData(); case 52: return new VoteRequestData(); case 53: return new BeginQuorumEpochRequestData(); case 54: return new EndQuorumEpochRequestData(); case 55: return new DescribeQuorumRequestData(); case 56: return new AlterPartitionRequestData(); case 57: return new UpdateFeaturesRequestData(); case 58: return new EnvelopeRequestData(); case 59: return new FetchSnapshotRequestData(); case 60: return new DescribeClusterRequestData(); case 61: return new DescribeProducersRequestData(); case 62: return new BrokerRegistrationRequestData(); case 63: return new BrokerHeartbeatRequestData(); case 64: return new UnregisterBrokerRequestData(); case 65: return new DescribeTransactionsRequestData(); case 66: return new ListTransactionsRequestData(); case 67: return new AllocateProducerIdsRequestData(); case 68: return new ConsumerGroupHeartbeatRequestData(); default: throw new UnsupportedVersionException("Unsupported request API key " + apiKey); } } public ApiMessage newResponse() { switch (apiKey) { case 0: return new ProduceResponseData(); case 1: return new FetchResponseData(); case 2: return new ListOffsetsResponseData(); case 3: return new MetadataResponseData(); case 4: return new LeaderAndIsrResponseData(); case 5: return new StopReplicaResponseData(); case 6: return new UpdateMetadataResponseData(); case 7: return new ControlledShutdownResponseData(); case 8: return new OffsetCommitResponseData(); case 9: return new OffsetFetchResponseData(); case 10: return new FindCoordinatorResponseData(); case 11: return new JoinGroupResponseData(); case 12: return new HeartbeatResponseData(); case 13: return new LeaveGroupResponseData(); case 14: return new SyncGroupResponseData(); case 15: return new DescribeGroupsResponseData(); case 16: return new ListGroupsResponseData(); case 17: return new SaslHandshakeResponseData(); case 18: return new ApiVersionsResponseData(); case 19: return new CreateTopicsResponseData(); case 20: return new DeleteTopicsResponseData(); case 21: return new DeleteRecordsResponseData(); case 22: return new InitProducerIdResponseData(); case 23: return new OffsetForLeaderEpochResponseData(); case 24: return new AddPartitionsToTxnResponseData(); case 25: return new AddOffsetsToTxnResponseData(); case 26: return new EndTxnResponseData(); case 27: return new WriteTxnMarkersResponseData(); case 28: return new TxnOffsetCommitResponseData(); case 29: return new DescribeAclsResponseData(); case 30: return new CreateAclsResponseData(); case 31: return new DeleteAclsResponseData(); case 32: return new DescribeConfigsResponseData(); case 33: return new AlterConfigsResponseData(); case 34: return new AlterReplicaLogDirsResponseData(); case 35: return new DescribeLogDirsResponseData(); case 36: return new SaslAuthenticateResponseData(); case 37: return new CreatePartitionsResponseData(); case 38: return new CreateDelegationTokenResponseData(); case 39: return new RenewDelegationTokenResponseData(); case 40: return new ExpireDelegationTokenResponseData(); case 41: return new DescribeDelegationTokenResponseData(); case 42: return new DeleteGroupsResponseData(); case 43: return new ElectLeadersResponseData(); case 44: return new IncrementalAlterConfigsResponseData(); case 45: return new AlterPartitionReassignmentsResponseData(); case 46: return new ListPartitionReassignmentsResponseData(); case 47: return new OffsetDeleteResponseData(); case 48: return new DescribeClientQuotasResponseData(); case 49: return new AlterClientQuotasResponseData(); case 50: return new DescribeUserScramCredentialsResponseData(); case 51: return new AlterUserScramCredentialsResponseData(); case 52: return new VoteResponseData(); case 53: return new BeginQuorumEpochResponseData(); case 54: return new EndQuorumEpochResponseData(); case 55: return new DescribeQuorumResponseData(); case 56: return new AlterPartitionResponseData(); case 57: return new UpdateFeaturesResponseData(); case 58: return new EnvelopeResponseData(); case 59: return new FetchSnapshotResponseData(); case 60: return new DescribeClusterResponseData(); case 61: return new DescribeProducersResponseData(); case 62: return new BrokerRegistrationResponseData(); case 63: return new BrokerHeartbeatResponseData(); case 64: return new UnregisterBrokerResponseData(); case 65: return new DescribeTransactionsResponseData(); case 66: return new ListTransactionsResponseData(); case 67: return new AllocateProducerIdsResponseData(); case 68: return new ConsumerGroupHeartbeatResponseData(); default: throw new UnsupportedVersionException("Unsupported response API key " + apiKey); } } public short lowestSupportedVersion() { return this.lowestSupportedVersion; } public short highestSupportedVersion(boolean enableUnstableLastVersion) { if (!this.latestVersionUnstable || enableUnstableLastVersion) { return this.highestSupportedVersion; } else { // A negative value means that the API has no enabled versions. return (short) (this.highestSupportedVersion - 1); } } public EnumSet<ListenerType> listeners() { return this.listeners; } public boolean latestVersionUnstable() { return this.latestVersionUnstable; } public short apiKey() { return this.apiKey; } public Schema[] requestSchemas() { return this.requestSchemas; } public Schema[] responseSchemas() { return this.responseSchemas; } @Override public String toString() { return this.name(); } public short requestHeaderVersion(short _version) { switch (apiKey) { case 0: // Produce if (_version >= 9) { return (short) 2; } else { return (short) 1; } case 1: // Fetch if (_version >= 12) { return (short) 2; } else { return (short) 1; } case 2: // ListOffsets if (_version >= 6) { return (short) 2; } else { return (short) 1; } case 3: // Metadata if (_version >= 9) { return (short) 2; } else { return (short) 1; } case 4: // LeaderAndIsr if (_version >= 4) { return (short) 2; } else { return (short) 1; } case 5: // StopReplica if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 6: // UpdateMetadata if (_version >= 6) { return (short) 2; } else { return (short) 1; } case 7: // ControlledShutdown // Version 0 of ControlledShutdownRequest has a non-standard request header // which does not include clientId. Version 1 of ControlledShutdownRequest // and later use the standard request header. if (_version == 0) { return (short) 0; } if (_version >= 3) { return (short) 2; } else { return (short) 1; } case 8: // OffsetCommit if (_version >= 8) { return (short) 2; } else { return (short) 1; } case 9: // OffsetFetch if (_version >= 6) { return (short) 2; } else { return (short) 1; } case 10: // FindCoordinator if (_version >= 3) { return (short) 2; } else { return (short) 1; } case 11: // JoinGroup if (_version >= 6) { return (short) 2; } else { return (short) 1; } case 12: // Heartbeat if (_version >= 4) { return (short) 2; } else { return (short) 1; } case 13: // LeaveGroup if (_version >= 4) { return (short) 2; } else { return (short) 1; } case 14: // SyncGroup if (_version >= 4) { return (short) 2; } else { return (short) 1; } case 15: // DescribeGroups if (_version >= 5) { return (short) 2; } else { return (short) 1; } case 16: // ListGroups if (_version >= 3) { return (short) 2; } else { return (short) 1; } case 17: // SaslHandshake return (short) 1; case 18: // ApiVersions if (_version >= 3) { return (short) 2; } else { return (short) 1; } case 19: // CreateTopics if (_version >= 5) { return (short) 2; } else { return (short) 1; } case 20: // DeleteTopics if (_version >= 4) { return (short) 2; } else { return (short) 1; } case 21: // DeleteRecords if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 22: // InitProducerId if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 23: // OffsetForLeaderEpoch if (_version >= 4) { return (short) 2; } else { return (short) 1; } case 24: // AddPartitionsToTxn if (_version >= 3) { return (short) 2; } else { return (short) 1; } case 25: // AddOffsetsToTxn if (_version >= 3) { return (short) 2; } else { return (short) 1; } case 26: // EndTxn if (_version >= 3) { return (short) 2; } else { return (short) 1; } case 27: // WriteTxnMarkers if (_version >= 1) { return (short) 2; } else { return (short) 1; } case 28: // TxnOffsetCommit if (_version >= 3) { return (short) 2; } else { return (short) 1; } case 29: // DescribeAcls if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 30: // CreateAcls if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 31: // DeleteAcls if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 32: // DescribeConfigs if (_version >= 4) { return (short) 2; } else { return (short) 1; } case 33: // AlterConfigs if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 34: // AlterReplicaLogDirs if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 35: // DescribeLogDirs if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 36: // SaslAuthenticate if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 37: // CreatePartitions if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 38: // CreateDelegationToken if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 39: // RenewDelegationToken if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 40: // ExpireDelegationToken if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 41: // DescribeDelegationToken if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 42: // DeleteGroups if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 43: // ElectLeaders if (_version >= 2) { return (short) 2; } else { return (short) 1; } case 44: // IncrementalAlterConfigs if (_version >= 1) { return (short) 2; } else { return (short) 1; } case 45: // AlterPartitionReassignments return (short) 2; case 46: // ListPartitionReassignments return (short) 2; case 47: // OffsetDelete return (short) 1; case 48: // DescribeClientQuotas if (_version >= 1) { return (short) 2; } else { return (short) 1; } case 49: // AlterClientQuotas if (_version >= 1) { return (short) 2; } else { return (short) 1; } case 50: // DescribeUserScramCredentials return (short) 2; case 51: // AlterUserScramCredentials return (short) 2; case 52: // Vote return (short) 2; case 53: // BeginQuorumEpoch return (short) 1; case 54: // EndQuorumEpoch return (short) 1; case 55: // DescribeQuorum return (short) 2; case 56: // AlterPartition return (short) 2; case 57: // UpdateFeatures return (short) 2; case 58: // Envelope return (short) 2; case 59: // FetchSnapshot return (short) 2; case 60: // DescribeCluster return (short) 2; case 61: // DescribeProducers return (short) 2; case 62: // BrokerRegistration return (short) 2; case 63: // BrokerHeartbeat return (short) 2; case 64: // UnregisterBroker return (short) 2; case 65: // DescribeTransactions return (short) 2; case 66: // ListTransactions return (short) 2; case 67: // AllocateProducerIds return (short) 2; case 68: // ConsumerGroupHeartbeat return (short) 2; default: throw new UnsupportedVersionException("Unsupported API key " + apiKey); } } public short responseHeaderVersion(short _version) { switch (apiKey) { case 0: // Produce if (_version >= 9) { return (short) 1; } else { return (short) 0; } case 1: // Fetch if (_version >= 12) { return (short) 1; } else { return (short) 0; } case 2: // ListOffsets if (_version >= 6) { return (short) 1; } else { return (short) 0; } case 3: // Metadata if (_version >= 9) { return (short) 1; } else { return (short) 0; } case 4: // LeaderAndIsr if (_version >= 4) { return (short) 1; } else { return (short) 0; } case 5: // StopReplica if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 6: // UpdateMetadata if (_version >= 6) { return (short) 1; } else { return (short) 0; } case 7: // ControlledShutdown if (_version >= 3) { return (short) 1; } else { return (short) 0; } case 8: // OffsetCommit if (_version >= 8) { return (short) 1; } else { return (short) 0; } case 9: // OffsetFetch if (_version >= 6) { return (short) 1; } else { return (short) 0; } case 10: // FindCoordinator if (_version >= 3) { return (short) 1; } else { return (short) 0; } case 11: // JoinGroup if (_version >= 6) { return (short) 1; } else { return (short) 0; } case 12: // Heartbeat if (_version >= 4) { return (short) 1; } else { return (short) 0; } case 13: // LeaveGroup if (_version >= 4) { return (short) 1; } else { return (short) 0; } case 14: // SyncGroup if (_version >= 4) { return (short) 1; } else { return (short) 0; } case 15: // DescribeGroups if (_version >= 5) { return (short) 1; } else { return (short) 0; } case 16: // ListGroups if (_version >= 3) { return (short) 1; } else { return (short) 0; } case 17: // SaslHandshake return (short) 0; case 18: // ApiVersions // ApiVersionsResponse always includes a v0 header. // See KIP-511 for details. return (short) 0; case 19: // CreateTopics if (_version >= 5) { return (short) 1; } else { return (short) 0; } case 20: // DeleteTopics if (_version >= 4) { return (short) 1; } else { return (short) 0; } case 21: // DeleteRecords if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 22: // InitProducerId if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 23: // OffsetForLeaderEpoch if (_version >= 4) { return (short) 1; } else { return (short) 0; } case 24: // AddPartitionsToTxn if (_version >= 3) { return (short) 1; } else { return (short) 0; } case 25: // AddOffsetsToTxn if (_version >= 3) { return (short) 1; } else { return (short) 0; } case 26: // EndTxn if (_version >= 3) { return (short) 1; } else { return (short) 0; } case 27: // WriteTxnMarkers if (_version >= 1) { return (short) 1; } else { return (short) 0; } case 28: // TxnOffsetCommit if (_version >= 3) { return (short) 1; } else { return (short) 0; } case 29: // DescribeAcls if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 30: // CreateAcls if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 31: // DeleteAcls if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 32: // DescribeConfigs if (_version >= 4) { return (short) 1; } else { return (short) 0; } case 33: // AlterConfigs if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 34: // AlterReplicaLogDirs if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 35: // DescribeLogDirs if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 36: // SaslAuthenticate if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 37: // CreatePartitions if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 38: // CreateDelegationToken if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 39: // RenewDelegationToken if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 40: // ExpireDelegationToken if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 41: // DescribeDelegationToken if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 42: // DeleteGroups if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 43: // ElectLeaders if (_version >= 2) { return (short) 1; } else { return (short) 0; } case 44: // IncrementalAlterConfigs if (_version >= 1) { return (short) 1; } else { return (short) 0; } case 45: // AlterPartitionReassignments return (short) 1; case 46: // ListPartitionReassignments return (short) 1; case 47: // OffsetDelete return (short) 0; case 48: // DescribeClientQuotas if (_version >= 1) { return (short) 1; } else { return (short) 0; } case 49: // AlterClientQuotas if (_version >= 1) { return (short) 1; } else { return (short) 0; } case 50: // DescribeUserScramCredentials return (short) 1; case 51: // AlterUserScramCredentials return (short) 1; case 52: // Vote return (short) 1; case 53: // BeginQuorumEpoch return (short) 0; case 54: // EndQuorumEpoch return (short) 0; case 55: // DescribeQuorum return (short) 1; case 56: // AlterPartition return (short) 1; case 57: // UpdateFeatures return (short) 1; case 58: // Envelope return (short) 1; case 59: // FetchSnapshot return (short) 1; case 60: // DescribeCluster return (short) 1; case 61: // DescribeProducers return (short) 1; case 62: // BrokerRegistration return (short) 1; case 63: // BrokerHeartbeat return (short) 1; case 64: // UnregisterBroker return (short) 1; case 65: // DescribeTransactions return (short) 1; case 66: // ListTransactions return (short) 1; case 67: // AllocateProducerIds return (short) 1; case 68: // ConsumerGroupHeartbeat return (short) 1; default: throw new UnsupportedVersionException("Unsupported API key " + apiKey); } } public enum ListenerType { ZK_BROKER, BROKER, CONTROLLER; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ApiVersionsRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ApiVersionsRequestData implements ApiMessage { String clientSoftwareName; String clientSoftwareVersion; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("client_software_name", Type.COMPACT_STRING, "The name of the client."), new Field("client_software_version", Type.COMPACT_STRING, "The version of the client."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public ApiVersionsRequestData(Readable _readable, short _version) { read(_readable, _version); } public ApiVersionsRequestData() { this.clientSoftwareName = ""; this.clientSoftwareVersion = ""; } @Override public short apiKey() { return 18; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { if (_version >= 3) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field clientSoftwareName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field clientSoftwareName had invalid length " + length); } else { this.clientSoftwareName = _readable.readString(length); } } else { this.clientSoftwareName = ""; } if (_version >= 3) { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field clientSoftwareVersion was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field clientSoftwareVersion had invalid length " + length); } else { this.clientSoftwareVersion = _readable.readString(length); } } else { this.clientSoftwareVersion = ""; } this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { { byte[] _stringBytes = _cache.getSerializedValue(clientSoftwareName); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } if (_version >= 3) { { byte[] _stringBytes = _cache.getSerializedValue(clientSoftwareVersion); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version >= 3) { { byte[] _stringBytes = clientSoftwareName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'clientSoftwareName' field is too long to be serialized"); } _cache.cacheSerializedValue(clientSoftwareName, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } if (_version >= 3) { { byte[] _stringBytes = clientSoftwareVersion.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'clientSoftwareVersion' field is too long to be serialized"); } _cache.cacheSerializedValue(clientSoftwareVersion, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ApiVersionsRequestData)) return false; ApiVersionsRequestData other = (ApiVersionsRequestData) obj; if (this.clientSoftwareName == null) { if (other.clientSoftwareName != null) return false; } else { if (!this.clientSoftwareName.equals(other.clientSoftwareName)) return false; } if (this.clientSoftwareVersion == null) { if (other.clientSoftwareVersion != null) return false; } else { if (!this.clientSoftwareVersion.equals(other.clientSoftwareVersion)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (clientSoftwareName == null ? 0 : clientSoftwareName.hashCode()); hashCode = 31 * hashCode + (clientSoftwareVersion == null ? 0 : clientSoftwareVersion.hashCode()); return hashCode; } @Override public ApiVersionsRequestData duplicate() { ApiVersionsRequestData _duplicate = new ApiVersionsRequestData(); _duplicate.clientSoftwareName = clientSoftwareName; _duplicate.clientSoftwareVersion = clientSoftwareVersion; return _duplicate; } @Override public String toString() { return "ApiVersionsRequestData(" + "clientSoftwareName=" + ((clientSoftwareName == null) ? "null" : "'" + clientSoftwareName.toString() + "'") + ", clientSoftwareVersion=" + ((clientSoftwareVersion == null) ? "null" : "'" + clientSoftwareVersion.toString() + "'") + ")"; } public String clientSoftwareName() { return this.clientSoftwareName; } public String clientSoftwareVersion() { return this.clientSoftwareVersion; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ApiVersionsRequestData setClientSoftwareName(String v) { this.clientSoftwareName = v; return this; } public ApiVersionsRequestData setClientSoftwareVersion(String v) { this.clientSoftwareVersion = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ApiVersionsRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import static org.apache.kafka.common.message.ApiVersionsRequestData.*; public class ApiVersionsRequestDataJsonConverter { public static ApiVersionsRequestData read(JsonNode _node, short _version) { ApiVersionsRequestData _object = new ApiVersionsRequestData(); JsonNode _clientSoftwareNameNode = _node.get("clientSoftwareName"); if (_clientSoftwareNameNode == null) { if (_version >= 3) { throw new RuntimeException("ApiVersionsRequestData: unable to locate field 'clientSoftwareName', which is mandatory in version " + _version); } else { _object.clientSoftwareName = ""; } } else { if (!_clientSoftwareNameNode.isTextual()) { throw new RuntimeException("ApiVersionsRequestData expected a string type, but got " + _node.getNodeType()); } _object.clientSoftwareName = _clientSoftwareNameNode.asText(); } JsonNode _clientSoftwareVersionNode = _node.get("clientSoftwareVersion"); if (_clientSoftwareVersionNode == null) { if (_version >= 3) { throw new RuntimeException("ApiVersionsRequestData: unable to locate field 'clientSoftwareVersion', which is mandatory in version " + _version); } else { _object.clientSoftwareVersion = ""; } } else { if (!_clientSoftwareVersionNode.isTextual()) { throw new RuntimeException("ApiVersionsRequestData expected a string type, but got " + _node.getNodeType()); } _object.clientSoftwareVersion = _clientSoftwareVersionNode.asText(); } return _object; } public static JsonNode write(ApiVersionsRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_version >= 3) { _node.set("clientSoftwareName", new TextNode(_object.clientSoftwareName)); } if (_version >= 3) { _node.set("clientSoftwareVersion", new TextNode(_object.clientSoftwareVersion)); } return _node; } public static JsonNode write(ApiVersionsRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ApiVersionsResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class ApiVersionsResponseData implements ApiMessage { short errorCode; ApiVersionCollection apiKeys; int throttleTimeMs; SupportedFeatureKeyCollection supportedFeatures; long finalizedFeaturesEpoch; FinalizedFeatureKeyCollection finalizedFeatures; boolean zkMigrationReady; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The top-level error code."), new Field("api_keys", new ArrayOf(ApiVersion.SCHEMA_0), "The APIs supported by the broker.") ); public static final Schema SCHEMA_1 = new Schema( new Field("error_code", Type.INT16, "The top-level error code."), new Field("api_keys", new ArrayOf(ApiVersion.SCHEMA_0), "The APIs supported by the broker."), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.") ); public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("error_code", Type.INT16, "The top-level error code."), new Field("api_keys", new CompactArrayOf(ApiVersion.SCHEMA_3), "The APIs supported by the broker."), new Field("throttle_time_ms", Type.INT32, "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), TaggedFieldsSection.of( 0, new Field("supported_features", new CompactArrayOf(SupportedFeatureKey.SCHEMA_3), "Features supported by the broker."), 1, new Field("finalized_features_epoch", Type.INT64, "The monotonically increasing epoch for the finalized features information. Valid values are >= 0. A value of -1 is special and represents unknown epoch."), 2, new Field("finalized_features", new CompactArrayOf(FinalizedFeatureKey.SCHEMA_3), "List of cluster-wide finalized features. The information is valid only if FinalizedFeaturesEpoch >= 0."), 3, new Field("zk_migration_ready", Type.BOOLEAN, "Set by a KRaft controller if the required configurations for ZK migration are present") ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public ApiVersionsResponseData(Readable _readable, short _version) { read(_readable, _version); } public ApiVersionsResponseData() { this.errorCode = (short) 0; this.apiKeys = new ApiVersionCollection(0); this.throttleTimeMs = 0; this.supportedFeatures = new SupportedFeatureKeyCollection(0); this.finalizedFeaturesEpoch = -1L; this.finalizedFeatures = new FinalizedFeatureKeyCollection(0); this.zkMigrationReady = false; } @Override public short apiKey() { return 18; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { this.errorCode = _readable.readShort(); { if (_version >= 3) { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field apiKeys was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ApiVersionCollection newCollection = new ApiVersionCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ApiVersion(_readable, _version)); } this.apiKeys = newCollection; } } else { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field apiKeys was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ApiVersionCollection newCollection = new ApiVersionCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new ApiVersion(_readable, _version)); } this.apiKeys = newCollection; } } } if (_version >= 1) { this.throttleTimeMs = _readable.readInt(); } else { this.throttleTimeMs = 0; } { this.supportedFeatures = new SupportedFeatureKeyCollection(0); } this.finalizedFeaturesEpoch = -1L; { this.finalizedFeatures = new FinalizedFeatureKeyCollection(0); } this.zkMigrationReady = false; this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { case 0: { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field supportedFeatures was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } SupportedFeatureKeyCollection newCollection = new SupportedFeatureKeyCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new SupportedFeatureKey(_readable, _version)); } this.supportedFeatures = newCollection; } break; } case 1: { this.finalizedFeaturesEpoch = _readable.readLong(); break; } case 2: { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field finalizedFeatures was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } FinalizedFeatureKeyCollection newCollection = new FinalizedFeatureKeyCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new FinalizedFeatureKey(_readable, _version)); } this.finalizedFeatures = newCollection; } break; } case 3: { this.zkMigrationReady = _readable.readByte() != 0; break; } default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); if (_version >= 3) { _writable.writeUnsignedVarint(apiKeys.size() + 1); for (ApiVersion apiKeysElement : apiKeys) { apiKeysElement.write(_writable, _cache, _version); } } else { _writable.writeInt(apiKeys.size()); for (ApiVersion apiKeysElement : apiKeys) { apiKeysElement.write(_writable, _cache, _version); } } if (_version >= 1) { _writable.writeInt(throttleTimeMs); } if (_version >= 3) { if (!this.supportedFeatures.isEmpty()) { _numTaggedFields++; } } if (_version >= 3) { if (this.finalizedFeaturesEpoch != -1L) { _numTaggedFields++; } } if (_version >= 3) { if (!this.finalizedFeatures.isEmpty()) { _numTaggedFields++; } } if (_version >= 3) { if (this.zkMigrationReady) { _numTaggedFields++; } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); { if (!this.supportedFeatures.isEmpty()) { _writable.writeUnsignedVarint(0); _writable.writeUnsignedVarint(_cache.getArraySizeInBytes(this.supportedFeatures)); _writable.writeUnsignedVarint(supportedFeatures.size() + 1); for (SupportedFeatureKey supportedFeaturesElement : supportedFeatures) { supportedFeaturesElement.write(_writable, _cache, _version); } } } { if (this.finalizedFeaturesEpoch != -1L) { _writable.writeUnsignedVarint(1); _writable.writeUnsignedVarint(8); _writable.writeLong(finalizedFeaturesEpoch); } } { if (!this.finalizedFeatures.isEmpty()) { _writable.writeUnsignedVarint(2); _writable.writeUnsignedVarint(_cache.getArraySizeInBytes(this.finalizedFeatures)); _writable.writeUnsignedVarint(finalizedFeatures.size() + 1); for (FinalizedFeatureKey finalizedFeaturesElement : finalizedFeatures) { finalizedFeaturesElement.write(_writable, _cache, _version); } } } { if (this.zkMigrationReady) { _writable.writeUnsignedVarint(3); _writable.writeUnsignedVarint(1); _writable.writeByte(zkMigrationReady ? (byte) 1 : (byte) 0); } } _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(2); { if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(apiKeys.size() + 1)); } else { _size.addBytes(4); } for (ApiVersion apiKeysElement : apiKeys) { apiKeysElement.addSize(_size, _cache, _version); } } if (_version >= 1) { _size.addBytes(4); } if (_version >= 3) { { if (!this.supportedFeatures.isEmpty()) { _numTaggedFields++; _size.addBytes(1); int _sizeBeforeArray = _size.totalSize(); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(supportedFeatures.size() + 1)); for (SupportedFeatureKey supportedFeaturesElement : supportedFeatures) { supportedFeaturesElement.addSize(_size, _cache, _version); } int _arraySize = _size.totalSize() - _sizeBeforeArray; _cache.setArraySizeInBytes(supportedFeatures, _arraySize); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_arraySize)); } } } if (_version >= 3) { if (this.finalizedFeaturesEpoch != -1L) { _numTaggedFields++; _size.addBytes(1); _size.addBytes(1); _size.addBytes(8); } } if (_version >= 3) { { if (!this.finalizedFeatures.isEmpty()) { _numTaggedFields++; _size.addBytes(1); int _sizeBeforeArray = _size.totalSize(); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(finalizedFeatures.size() + 1)); for (FinalizedFeatureKey finalizedFeaturesElement : finalizedFeatures) { finalizedFeaturesElement.addSize(_size, _cache, _version); } int _arraySize = _size.totalSize() - _sizeBeforeArray; _cache.setArraySizeInBytes(finalizedFeatures, _arraySize); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_arraySize)); } } } if (_version >= 3) { if (this.zkMigrationReady) { _numTaggedFields++; _size.addBytes(1); _size.addBytes(1); _size.addBytes(1); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean equals(Object obj) { if (!(obj instanceof ApiVersionsResponseData)) return false; ApiVersionsResponseData other = (ApiVersionsResponseData) obj; if (errorCode != other.errorCode) return false; if (this.apiKeys == null) { if (other.apiKeys != null) return false; } else { if (!this.apiKeys.equals(other.apiKeys)) return false; } if (throttleTimeMs != other.throttleTimeMs) return false; if (this.supportedFeatures == null) { if (other.supportedFeatures != null) return false; } else { if (!this.supportedFeatures.equals(other.supportedFeatures)) return false; } if (finalizedFeaturesEpoch != other.finalizedFeaturesEpoch) return false; if (this.finalizedFeatures == null) { if (other.finalizedFeatures != null) return false; } else { if (!this.finalizedFeatures.equals(other.finalizedFeatures)) return false; } if (zkMigrationReady != other.zkMigrationReady) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (apiKeys == null ? 0 : apiKeys.hashCode()); hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + (supportedFeatures == null ? 0 : supportedFeatures.hashCode()); hashCode = 31 * hashCode + ((int) (finalizedFeaturesEpoch >> 32) ^ (int) finalizedFeaturesEpoch); hashCode = 31 * hashCode + (finalizedFeatures == null ? 0 : finalizedFeatures.hashCode()); hashCode = 31 * hashCode + (zkMigrationReady ? 1231 : 1237); return hashCode; } @Override public ApiVersionsResponseData duplicate() { ApiVersionsResponseData _duplicate = new ApiVersionsResponseData(); _duplicate.errorCode = errorCode; ApiVersionCollection newApiKeys = new ApiVersionCollection(apiKeys.size()); for (ApiVersion _element : apiKeys) { newApiKeys.add(_element.duplicate()); } _duplicate.apiKeys = newApiKeys; _duplicate.throttleTimeMs = throttleTimeMs; SupportedFeatureKeyCollection newSupportedFeatures = new SupportedFeatureKeyCollection(supportedFeatures.size()); for (SupportedFeatureKey _element : supportedFeatures) { newSupportedFeatures.add(_element.duplicate()); } _duplicate.supportedFeatures = newSupportedFeatures; _duplicate.finalizedFeaturesEpoch = finalizedFeaturesEpoch; FinalizedFeatureKeyCollection newFinalizedFeatures = new FinalizedFeatureKeyCollection(finalizedFeatures.size()); for (FinalizedFeatureKey _element : finalizedFeatures) { newFinalizedFeatures.add(_element.duplicate()); } _duplicate.finalizedFeatures = newFinalizedFeatures; _duplicate.zkMigrationReady = zkMigrationReady; return _duplicate; } @Override public String toString() { return "ApiVersionsResponseData(" + "errorCode=" + errorCode + ", apiKeys=" + MessageUtil.deepToString(apiKeys.iterator()) + ", throttleTimeMs=" + throttleTimeMs + ", supportedFeatures=" + MessageUtil.deepToString(supportedFeatures.iterator()) + ", finalizedFeaturesEpoch=" + finalizedFeaturesEpoch + ", finalizedFeatures=" + MessageUtil.deepToString(finalizedFeatures.iterator()) + ", zkMigrationReady=" + (zkMigrationReady ? "true" : "false") + ")"; } public short errorCode() { return this.errorCode; } public ApiVersionCollection apiKeys() { return this.apiKeys; } public int throttleTimeMs() { return this.throttleTimeMs; } public SupportedFeatureKeyCollection supportedFeatures() { return this.supportedFeatures; } public long finalizedFeaturesEpoch() { return this.finalizedFeaturesEpoch; } public FinalizedFeatureKeyCollection finalizedFeatures() { return this.finalizedFeatures; } public boolean zkMigrationReady() { return this.zkMigrationReady; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ApiVersionsResponseData setErrorCode(short v) { this.errorCode = v; return this; } public ApiVersionsResponseData setApiKeys(ApiVersionCollection v) { this.apiKeys = v; return this; } public ApiVersionsResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public ApiVersionsResponseData setSupportedFeatures(SupportedFeatureKeyCollection v) { this.supportedFeatures = v; return this; } public ApiVersionsResponseData setFinalizedFeaturesEpoch(long v) { this.finalizedFeaturesEpoch = v; return this; } public ApiVersionsResponseData setFinalizedFeatures(FinalizedFeatureKeyCollection v) { this.finalizedFeatures = v; return this; } public ApiVersionsResponseData setZkMigrationReady(boolean v) { this.zkMigrationReady = v; return this; } public static class ApiVersion implements Message, ImplicitLinkedHashMultiCollection.Element { short apiKey; short minVersion; short maxVersion; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("api_key", Type.INT16, "The API index."), new Field("min_version", Type.INT16, "The minimum supported version, inclusive."), new Field("max_version", Type.INT16, "The maximum supported version, inclusive.") ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema SCHEMA_2 = SCHEMA_1; public static final Schema SCHEMA_3 = new Schema( new Field("api_key", Type.INT16, "The API index."), new Field("min_version", Type.INT16, "The minimum supported version, inclusive."), new Field("max_version", Type.INT16, "The maximum supported version, inclusive."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1, SCHEMA_2, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 3; public ApiVersion(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public ApiVersion() { this.apiKey = (short) 0; this.minVersion = (short) 0; this.maxVersion = (short) 0; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { if (_version > 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of ApiVersion"); } this.apiKey = _readable.readShort(); this.minVersion = _readable.readShort(); this.maxVersion = _readable.readShort(); this._unknownTaggedFields = null; if (_version >= 3) { int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(apiKey); _writable.writeShort(minVersion); _writable.writeShort(maxVersion); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_version >= 3) { _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 3) { throw new UnsupportedVersionException("Can't size version " + _version + " of ApiVersion"); } _size.addBytes(2); _size.addBytes(2); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_version >= 3) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } else { if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof ApiVersion)) return false; ApiVersion other = (ApiVersion) obj; if (apiKey != other.apiKey) return false; return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof ApiVersion)) return false; ApiVersion other = (ApiVersion) obj; if (apiKey != other.apiKey) return false; if (minVersion != other.minVersion) return false; if (maxVersion != other.maxVersion) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + apiKey; return hashCode; } @Override public ApiVersion duplicate() { ApiVersion _duplicate = new ApiVersion(); _duplicate.apiKey = apiKey; _duplicate.minVersion = minVersion; _duplicate.maxVersion = maxVersion; return _duplicate; } @Override public String toString() { return "ApiVersion(" + "apiKey=" + apiKey + ", minVersion=" + minVersion + ", maxVersion=" + maxVersion + ")"; } public short apiKey() { return this.apiKey; } public short minVersion() { return this.minVersion; } public short maxVersion() { return this.maxVersion; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public ApiVersion setApiKey(short v) { this.apiKey = v; return this; } public ApiVersion setMinVersion(short v) { this.minVersion = v; return this; } public ApiVersion setMaxVersion(short v) { this.maxVersion = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class ApiVersionCollection extends ImplicitLinkedHashMultiCollection<ApiVersion> { public ApiVersionCollection() { super(); } public ApiVersionCollection(int expectedNumElements) { super(expectedNumElements); } public ApiVersionCollection(Iterator<ApiVersion> iterator) { super(iterator); } public ApiVersion find(short apiKey) { ApiVersion _key = new ApiVersion(); _key.setApiKey(apiKey); return find(_key); } public List<ApiVersion> findAll(short apiKey) { ApiVersion _key = new ApiVersion(); _key.setApiKey(apiKey); return findAll(_key); } public ApiVersionCollection duplicate() { ApiVersionCollection _duplicate = new ApiVersionCollection(size()); for (ApiVersion _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class SupportedFeatureKey implements Message, ImplicitLinkedHashMultiCollection.Element { String name; short minVersion; short maxVersion; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_3 = new Schema( new Field("name", Type.COMPACT_STRING, "The name of the feature."), new Field("min_version", Type.INT16, "The minimum supported version for the feature."), new Field("max_version", Type.INT16, "The maximum supported version for the feature."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 3; public static final short HIGHEST_SUPPORTED_VERSION = 3; public SupportedFeatureKey(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public SupportedFeatureKey() { this.name = ""; this.minVersion = (short) 0; this.maxVersion = (short) 0; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { if (_version > 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of SupportedFeatureKey"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } this.minVersion = _readable.readShort(); this.maxVersion = _readable.readShort(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of SupportedFeatureKey"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeShort(minVersion); _writable.writeShort(maxVersion); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 3) { throw new UnsupportedVersionException("Can't size version " + _version + " of SupportedFeatureKey"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(2); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof SupportedFeatureKey)) return false; SupportedFeatureKey other = (SupportedFeatureKey) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof SupportedFeatureKey)) return false; SupportedFeatureKey other = (SupportedFeatureKey) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (minVersion != other.minVersion) return false; if (maxVersion != other.maxVersion) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public SupportedFeatureKey duplicate() { SupportedFeatureKey _duplicate = new SupportedFeatureKey(); _duplicate.name = name; _duplicate.minVersion = minVersion; _duplicate.maxVersion = maxVersion; return _duplicate; } @Override public String toString() { return "SupportedFeatureKey(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", minVersion=" + minVersion + ", maxVersion=" + maxVersion + ")"; } public String name() { return this.name; } public short minVersion() { return this.minVersion; } public short maxVersion() { return this.maxVersion; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public SupportedFeatureKey setName(String v) { this.name = v; return this; } public SupportedFeatureKey setMinVersion(short v) { this.minVersion = v; return this; } public SupportedFeatureKey setMaxVersion(short v) { this.maxVersion = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class SupportedFeatureKeyCollection extends ImplicitLinkedHashMultiCollection<SupportedFeatureKey> { public SupportedFeatureKeyCollection() { super(); } public SupportedFeatureKeyCollection(int expectedNumElements) { super(expectedNumElements); } public SupportedFeatureKeyCollection(Iterator<SupportedFeatureKey> iterator) { super(iterator); } public SupportedFeatureKey find(String name) { SupportedFeatureKey _key = new SupportedFeatureKey(); _key.setName(name); return find(_key); } public List<SupportedFeatureKey> findAll(String name) { SupportedFeatureKey _key = new SupportedFeatureKey(); _key.setName(name); return findAll(_key); } public SupportedFeatureKeyCollection duplicate() { SupportedFeatureKeyCollection _duplicate = new SupportedFeatureKeyCollection(size()); for (SupportedFeatureKey _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class FinalizedFeatureKey implements Message, ImplicitLinkedHashMultiCollection.Element { String name; short maxVersionLevel; short minVersionLevel; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_3 = new Schema( new Field("name", Type.COMPACT_STRING, "The name of the feature."), new Field("max_version_level", Type.INT16, "The cluster-wide finalized max version level for the feature."), new Field("min_version_level", Type.INT16, "The cluster-wide finalized min version level for the feature."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { null, null, null, SCHEMA_3 }; public static final short LOWEST_SUPPORTED_VERSION = 3; public static final short HIGHEST_SUPPORTED_VERSION = 3; public FinalizedFeatureKey(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public FinalizedFeatureKey() { this.name = ""; this.maxVersionLevel = (short) 0; this.minVersionLevel = (short) 0; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 3; } @Override public void read(Readable _readable, short _version) { if (_version > 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of FinalizedFeatureKey"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } this.maxVersionLevel = _readable.readShort(); this.minVersionLevel = _readable.readShort(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of FinalizedFeatureKey"); } int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeShort(maxVersionLevel); _writable.writeShort(minVersionLevel); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 3) { throw new UnsupportedVersionException("Can't size version " + _version + " of FinalizedFeatureKey"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(2); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof FinalizedFeatureKey)) return false; FinalizedFeatureKey other = (FinalizedFeatureKey) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof FinalizedFeatureKey)) return false; FinalizedFeatureKey other = (FinalizedFeatureKey) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (maxVersionLevel != other.maxVersionLevel) return false; if (minVersionLevel != other.minVersionLevel) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public FinalizedFeatureKey duplicate() { FinalizedFeatureKey _duplicate = new FinalizedFeatureKey(); _duplicate.name = name; _duplicate.maxVersionLevel = maxVersionLevel; _duplicate.minVersionLevel = minVersionLevel; return _duplicate; } @Override public String toString() { return "FinalizedFeatureKey(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", maxVersionLevel=" + maxVersionLevel + ", minVersionLevel=" + minVersionLevel + ")"; } public String name() { return this.name; } public short maxVersionLevel() { return this.maxVersionLevel; } public short minVersionLevel() { return this.minVersionLevel; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public FinalizedFeatureKey setName(String v) { this.name = v; return this; } public FinalizedFeatureKey setMaxVersionLevel(short v) { this.maxVersionLevel = v; return this; } public FinalizedFeatureKey setMinVersionLevel(short v) { this.minVersionLevel = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class FinalizedFeatureKeyCollection extends ImplicitLinkedHashMultiCollection<FinalizedFeatureKey> { public FinalizedFeatureKeyCollection() { super(); } public FinalizedFeatureKeyCollection(int expectedNumElements) { super(expectedNumElements); } public FinalizedFeatureKeyCollection(Iterator<FinalizedFeatureKey> iterator) { super(iterator); } public FinalizedFeatureKey find(String name) { FinalizedFeatureKey _key = new FinalizedFeatureKey(); _key.setName(name); return find(_key); } public List<FinalizedFeatureKey> findAll(String name) { FinalizedFeatureKey _key = new FinalizedFeatureKey(); _key.setName(name); return findAll(_key); } public FinalizedFeatureKeyCollection duplicate() { FinalizedFeatureKeyCollection _duplicate = new FinalizedFeatureKeyCollection(size()); for (FinalizedFeatureKey _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/ApiVersionsResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.ApiVersionsResponseData.*; public class ApiVersionsResponseDataJsonConverter { public static ApiVersionsResponseData read(JsonNode _node, short _version) { ApiVersionsResponseData _object = new ApiVersionsResponseData(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("ApiVersionsResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "ApiVersionsResponseData"); } JsonNode _apiKeysNode = _node.get("apiKeys"); if (_apiKeysNode == null) { throw new RuntimeException("ApiVersionsResponseData: unable to locate field 'apiKeys', which is mandatory in version " + _version); } else { if (!_apiKeysNode.isArray()) { throw new RuntimeException("ApiVersionsResponseData expected a JSON array, but got " + _node.getNodeType()); } ApiVersionCollection _collection = new ApiVersionCollection(_apiKeysNode.size()); _object.apiKeys = _collection; for (JsonNode _element : _apiKeysNode) { _collection.add(ApiVersionJsonConverter.read(_element, _version)); } } JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { if (_version >= 1) { throw new RuntimeException("ApiVersionsResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = 0; } } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "ApiVersionsResponseData"); } JsonNode _supportedFeaturesNode = _node.get("supportedFeatures"); if (_supportedFeaturesNode == null) { _object.supportedFeatures = new SupportedFeatureKeyCollection(0); } else { if (!_supportedFeaturesNode.isArray()) { throw new RuntimeException("ApiVersionsResponseData expected a JSON array, but got " + _node.getNodeType()); } SupportedFeatureKeyCollection _collection = new SupportedFeatureKeyCollection(_supportedFeaturesNode.size()); _object.supportedFeatures = _collection; for (JsonNode _element : _supportedFeaturesNode) { _collection.add(SupportedFeatureKeyJsonConverter.read(_element, _version)); } } JsonNode _finalizedFeaturesEpochNode = _node.get("finalizedFeaturesEpoch"); if (_finalizedFeaturesEpochNode == null) { _object.finalizedFeaturesEpoch = -1L; } else { _object.finalizedFeaturesEpoch = MessageUtil.jsonNodeToLong(_finalizedFeaturesEpochNode, "ApiVersionsResponseData"); } JsonNode _finalizedFeaturesNode = _node.get("finalizedFeatures"); if (_finalizedFeaturesNode == null) { _object.finalizedFeatures = new FinalizedFeatureKeyCollection(0); } else { if (!_finalizedFeaturesNode.isArray()) { throw new RuntimeException("ApiVersionsResponseData expected a JSON array, but got " + _node.getNodeType()); } FinalizedFeatureKeyCollection _collection = new FinalizedFeatureKeyCollection(_finalizedFeaturesNode.size()); _object.finalizedFeatures = _collection; for (JsonNode _element : _finalizedFeaturesNode) { _collection.add(FinalizedFeatureKeyJsonConverter.read(_element, _version)); } } JsonNode _zkMigrationReadyNode = _node.get("zkMigrationReady"); if (_zkMigrationReadyNode == null) { _object.zkMigrationReady = false; } else { if (!_zkMigrationReadyNode.isBoolean()) { throw new RuntimeException("ApiVersionsResponseData expected Boolean type, but got " + _node.getNodeType()); } _object.zkMigrationReady = _zkMigrationReadyNode.asBoolean(); } return _object; } public static JsonNode write(ApiVersionsResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); ArrayNode _apiKeysArray = new ArrayNode(JsonNodeFactory.instance); for (ApiVersion _element : _object.apiKeys) { _apiKeysArray.add(ApiVersionJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("apiKeys", _apiKeysArray); if (_version >= 1) { _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); } if (_version >= 3) { if (!_object.supportedFeatures.isEmpty()) { ArrayNode _supportedFeaturesArray = new ArrayNode(JsonNodeFactory.instance); for (SupportedFeatureKey _element : _object.supportedFeatures) { _supportedFeaturesArray.add(SupportedFeatureKeyJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("supportedFeatures", _supportedFeaturesArray); } } if (_version >= 3) { if (_object.finalizedFeaturesEpoch != -1L) { _node.set("finalizedFeaturesEpoch", new LongNode(_object.finalizedFeaturesEpoch)); } } if (_version >= 3) { if (!_object.finalizedFeatures.isEmpty()) { ArrayNode _finalizedFeaturesArray = new ArrayNode(JsonNodeFactory.instance); for (FinalizedFeatureKey _element : _object.finalizedFeatures) { _finalizedFeaturesArray.add(FinalizedFeatureKeyJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("finalizedFeatures", _finalizedFeaturesArray); } } if (_version >= 3) { if (_object.zkMigrationReady) { _node.set("zkMigrationReady", BooleanNode.valueOf(_object.zkMigrationReady)); } } return _node; } public static JsonNode write(ApiVersionsResponseData _object, short _version) { return write(_object, _version, true); } public static class ApiVersionJsonConverter { public static ApiVersion read(JsonNode _node, short _version) { ApiVersion _object = new ApiVersion(); JsonNode _apiKeyNode = _node.get("apiKey"); if (_apiKeyNode == null) { throw new RuntimeException("ApiVersion: unable to locate field 'apiKey', which is mandatory in version " + _version); } else { _object.apiKey = MessageUtil.jsonNodeToShort(_apiKeyNode, "ApiVersion"); } JsonNode _minVersionNode = _node.get("minVersion"); if (_minVersionNode == null) { throw new RuntimeException("ApiVersion: unable to locate field 'minVersion', which is mandatory in version " + _version); } else { _object.minVersion = MessageUtil.jsonNodeToShort(_minVersionNode, "ApiVersion"); } JsonNode _maxVersionNode = _node.get("maxVersion"); if (_maxVersionNode == null) { throw new RuntimeException("ApiVersion: unable to locate field 'maxVersion', which is mandatory in version " + _version); } else { _object.maxVersion = MessageUtil.jsonNodeToShort(_maxVersionNode, "ApiVersion"); } return _object; } public static JsonNode write(ApiVersion _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("apiKey", new ShortNode(_object.apiKey)); _node.set("minVersion", new ShortNode(_object.minVersion)); _node.set("maxVersion", new ShortNode(_object.maxVersion)); return _node; } public static JsonNode write(ApiVersion _object, short _version) { return write(_object, _version, true); } } public static class FinalizedFeatureKeyJsonConverter { public static FinalizedFeatureKey read(JsonNode _node, short _version) { FinalizedFeatureKey _object = new FinalizedFeatureKey(); if (_version < 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of FinalizedFeatureKey"); } JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("FinalizedFeatureKey: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("FinalizedFeatureKey expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _maxVersionLevelNode = _node.get("maxVersionLevel"); if (_maxVersionLevelNode == null) { throw new RuntimeException("FinalizedFeatureKey: unable to locate field 'maxVersionLevel', which is mandatory in version " + _version); } else { _object.maxVersionLevel = MessageUtil.jsonNodeToShort(_maxVersionLevelNode, "FinalizedFeatureKey"); } JsonNode _minVersionLevelNode = _node.get("minVersionLevel"); if (_minVersionLevelNode == null) { throw new RuntimeException("FinalizedFeatureKey: unable to locate field 'minVersionLevel', which is mandatory in version " + _version); } else { _object.minVersionLevel = MessageUtil.jsonNodeToShort(_minVersionLevelNode, "FinalizedFeatureKey"); } return _object; } public static JsonNode write(FinalizedFeatureKey _object, short _version, boolean _serializeRecords) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of FinalizedFeatureKey"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); _node.set("maxVersionLevel", new ShortNode(_object.maxVersionLevel)); _node.set("minVersionLevel", new ShortNode(_object.minVersionLevel)); return _node; } public static JsonNode write(FinalizedFeatureKey _object, short _version) { return write(_object, _version, true); } } public static class SupportedFeatureKeyJsonConverter { public static SupportedFeatureKey read(JsonNode _node, short _version) { SupportedFeatureKey _object = new SupportedFeatureKey(); if (_version < 3) { throw new UnsupportedVersionException("Can't read version " + _version + " of SupportedFeatureKey"); } JsonNode _nameNode = _node.get("name"); if (_nameNode == null) { throw new RuntimeException("SupportedFeatureKey: unable to locate field 'name', which is mandatory in version " + _version); } else { if (!_nameNode.isTextual()) { throw new RuntimeException("SupportedFeatureKey expected a string type, but got " + _node.getNodeType()); } _object.name = _nameNode.asText(); } JsonNode _minVersionNode = _node.get("minVersion"); if (_minVersionNode == null) { throw new RuntimeException("SupportedFeatureKey: unable to locate field 'minVersion', which is mandatory in version " + _version); } else { _object.minVersion = MessageUtil.jsonNodeToShort(_minVersionNode, "SupportedFeatureKey"); } JsonNode _maxVersionNode = _node.get("maxVersion"); if (_maxVersionNode == null) { throw new RuntimeException("SupportedFeatureKey: unable to locate field 'maxVersion', which is mandatory in version " + _version); } else { _object.maxVersion = MessageUtil.jsonNodeToShort(_maxVersionNode, "SupportedFeatureKey"); } return _object; } public static JsonNode write(SupportedFeatureKey _object, short _version, boolean _serializeRecords) { if (_version < 3) { throw new UnsupportedVersionException("Can't write version " + _version + " of SupportedFeatureKey"); } ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("name", new TextNode(_object.name)); _node.set("minVersion", new ShortNode(_object.minVersion)); _node.set("maxVersion", new ShortNode(_object.maxVersion)); return _node; } public static JsonNode write(SupportedFeatureKey _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/BeginQuorumEpochRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; public class BeginQuorumEpochRequestData implements ApiMessage { String clusterId; List<TopicData> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("cluster_id", Type.NULLABLE_STRING, ""), new Field("topics", new ArrayOf(TopicData.SCHEMA_0), "") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public BeginQuorumEpochRequestData(Readable _readable, short _version) { read(_readable, _version); } public BeginQuorumEpochRequestData() { this.clusterId = null; this.topics = new ArrayList<TopicData>(0); } @Override public short apiKey() { return 53; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { { int length; length = _readable.readShort(); if (length < 0) { this.clusterId = null; } else if (length > 0x7fff) { throw new RuntimeException("string field clusterId had invalid length " + length); } else { this.clusterId = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<TopicData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicData(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (clusterId == null) { _writable.writeShort((short) -1); } else { byte[] _stringBytes = _cache.getSerializedValue(clusterId); _writable.writeShort((short) _stringBytes.length); _writable.writeByteArray(_stringBytes); } _writable.writeInt(topics.size()); for (TopicData topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (clusterId == null) { _size.addBytes(2); } else { byte[] _stringBytes = clusterId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'clusterId' field is too long to be serialized"); } _cache.cacheSerializedValue(clusterId, _stringBytes); _size.addBytes(_stringBytes.length + 2); } { _size.addBytes(4); for (TopicData topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean equals(Object obj) { if (!(obj instanceof BeginQuorumEpochRequestData)) return false; BeginQuorumEpochRequestData other = (BeginQuorumEpochRequestData) obj; if (this.clusterId == null) { if (other.clusterId != null) return false; } else { if (!this.clusterId.equals(other.clusterId)) return false; } if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (clusterId == null ? 0 : clusterId.hashCode()); hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public BeginQuorumEpochRequestData duplicate() { BeginQuorumEpochRequestData _duplicate = new BeginQuorumEpochRequestData(); if (clusterId == null) { _duplicate.clusterId = null; } else { _duplicate.clusterId = clusterId; } ArrayList<TopicData> newTopics = new ArrayList<TopicData>(topics.size()); for (TopicData _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "BeginQuorumEpochRequestData(" + "clusterId=" + ((clusterId == null) ? "null" : "'" + clusterId.toString() + "'") + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public String clusterId() { return this.clusterId; } public List<TopicData> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public BeginQuorumEpochRequestData setClusterId(String v) { this.clusterId = v; return this; } public BeginQuorumEpochRequestData setTopics(List<TopicData> v) { this.topics = v; return this; } public static class TopicData implements Message { String topicName; List<PartitionData> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topic_name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(PartitionData.SCHEMA_0), "") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public TopicData(Readable _readable, short _version) { read(_readable, _version); } public TopicData() { this.topicName = ""; this.partitions = new ArrayList<PartitionData>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of TopicData"); } { int length; length = _readable.readShort(); if (length < 0) { throw new RuntimeException("non-nullable field topicName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topicName had invalid length " + length); } else { this.topicName = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionData(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(topicName); _writable.writeShort((short) _stringBytes.length); _writable.writeByteArray(_stringBytes); } _writable.writeInt(partitions.size()); for (PartitionData partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of TopicData"); } { byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topicName' field is too long to be serialized"); } _cache.cacheSerializedValue(topicName, _stringBytes); _size.addBytes(_stringBytes.length + 2); } { _size.addBytes(4); for (PartitionData partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean equals(Object obj) { if (!(obj instanceof TopicData)) return false; TopicData other = (TopicData) obj; if (this.topicName == null) { if (other.topicName != null) return false; } else { if (!this.topicName.equals(other.topicName)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public TopicData duplicate() { TopicData _duplicate = new TopicData(); _duplicate.topicName = topicName; ArrayList<PartitionData> newPartitions = new ArrayList<PartitionData>(partitions.size()); for (PartitionData _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "TopicData(" + "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topicName() { return this.topicName; } public List<PartitionData> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public TopicData setTopicName(String v) { this.topicName = v; return this; } public TopicData setPartitions(List<PartitionData> v) { this.partitions = v; return this; } } public static class PartitionData implements Message { int partitionIndex; int leaderId; int leaderEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("leader_id", Type.INT32, "The ID of the newly elected leader"), new Field("leader_epoch", Type.INT32, "The epoch of the newly elected leader") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public PartitionData(Readable _readable, short _version) { read(_readable, _version); } public PartitionData() { this.partitionIndex = 0; this.leaderId = 0; this.leaderEpoch = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionData"); } this.partitionIndex = _readable.readInt(); this.leaderId = _readable.readInt(); this.leaderEpoch = _readable.readInt(); this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeInt(leaderId); _writable.writeInt(leaderEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionData"); } _size.addBytes(4); _size.addBytes(4); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean equals(Object obj) { if (!(obj instanceof PartitionData)) return false; PartitionData other = (PartitionData) obj; if (partitionIndex != other.partitionIndex) return false; if (leaderId != other.leaderId) return false; if (leaderEpoch != other.leaderEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + leaderId; hashCode = 31 * hashCode + leaderEpoch; return hashCode; } @Override public PartitionData duplicate() { PartitionData _duplicate = new PartitionData(); _duplicate.partitionIndex = partitionIndex; _duplicate.leaderId = leaderId; _duplicate.leaderEpoch = leaderEpoch; return _duplicate; } @Override public String toString() { return "PartitionData(" + "partitionIndex=" + partitionIndex + ", leaderId=" + leaderId + ", leaderEpoch=" + leaderEpoch + ")"; } public int partitionIndex() { return this.partitionIndex; } public int leaderId() { return this.leaderId; } public int leaderEpoch() { return this.leaderEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public PartitionData setPartitionIndex(int v) { this.partitionIndex = v; return this; } public PartitionData setLeaderId(int v) { this.leaderId = v; return this; } public PartitionData setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/BeginQuorumEpochRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.NullNode; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.BeginQuorumEpochRequestData.*; public class BeginQuorumEpochRequestDataJsonConverter { public static BeginQuorumEpochRequestData read(JsonNode _node, short _version) { BeginQuorumEpochRequestData _object = new BeginQuorumEpochRequestData(); JsonNode _clusterIdNode = _node.get("clusterId"); if (_clusterIdNode == null) { throw new RuntimeException("BeginQuorumEpochRequestData: unable to locate field 'clusterId', which is mandatory in version " + _version); } else { if (_clusterIdNode.isNull()) { _object.clusterId = null; } else { if (!_clusterIdNode.isTextual()) { throw new RuntimeException("BeginQuorumEpochRequestData expected a string type, but got " + _node.getNodeType()); } _object.clusterId = _clusterIdNode.asText(); } } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("BeginQuorumEpochRequestData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("BeginQuorumEpochRequestData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<TopicData> _collection = new ArrayList<TopicData>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(TopicDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(BeginQuorumEpochRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); if (_object.clusterId == null) { _node.set("clusterId", NullNode.instance); } else { _node.set("clusterId", new TextNode(_object.clusterId)); } ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (TopicData _element : _object.topics) { _topicsArray.add(TopicDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(BeginQuorumEpochRequestData _object, short _version) { return write(_object, _version, true); } public static class PartitionDataJsonConverter { public static PartitionData read(JsonNode _node, short _version) { PartitionData _object = new PartitionData(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "PartitionData"); } JsonNode _leaderIdNode = _node.get("leaderId"); if (_leaderIdNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'leaderId', which is mandatory in version " + _version); } else { _object.leaderId = MessageUtil.jsonNodeToInt(_leaderIdNode, "PartitionData"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "PartitionData"); } return _object; } public static JsonNode write(PartitionData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("leaderId", new IntNode(_object.leaderId)); _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); return _node; } public static JsonNode write(PartitionData _object, short _version) { return write(_object, _version, true); } } public static class TopicDataJsonConverter { public static TopicData read(JsonNode _node, short _version) { TopicData _object = new TopicData(); JsonNode _topicNameNode = _node.get("topicName"); if (_topicNameNode == null) { throw new RuntimeException("TopicData: unable to locate field 'topicName', which is mandatory in version " + _version); } else { if (!_topicNameNode.isTextual()) { throw new RuntimeException("TopicData expected a string type, but got " + _node.getNodeType()); } _object.topicName = _topicNameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("TopicData: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("TopicData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionData> _collection = new ArrayList<PartitionData>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(PartitionDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(TopicData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("topicName", new TextNode(_object.topicName)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionData _element : _object.partitions) { _partitionsArray.add(PartitionDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(TopicData _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/BeginQuorumEpochResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.ArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; public class BeginQuorumEpochResponseData implements ApiMessage { short errorCode; List<TopicData> topics; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("error_code", Type.INT16, "The top level error code."), new Field("topics", new ArrayOf(TopicData.SCHEMA_0), "") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public BeginQuorumEpochResponseData(Readable _readable, short _version) { read(_readable, _version); } public BeginQuorumEpochResponseData() { this.errorCode = (short) 0; this.topics = new ArrayList<TopicData>(0); } @Override public short apiKey() { return 53; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.errorCode = _readable.readShort(); { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field topics was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<TopicData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new TopicData(_readable, _version)); } this.topics = newCollection; } } this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeShort(errorCode); _writable.writeInt(topics.size()); for (TopicData topicsElement : topics) { topicsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(2); { _size.addBytes(4); for (TopicData topicsElement : topics) { topicsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean equals(Object obj) { if (!(obj instanceof BeginQuorumEpochResponseData)) return false; BeginQuorumEpochResponseData other = (BeginQuorumEpochResponseData) obj; if (errorCode != other.errorCode) return false; if (this.topics == null) { if (other.topics != null) return false; } else { if (!this.topics.equals(other.topics)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (topics == null ? 0 : topics.hashCode()); return hashCode; } @Override public BeginQuorumEpochResponseData duplicate() { BeginQuorumEpochResponseData _duplicate = new BeginQuorumEpochResponseData(); _duplicate.errorCode = errorCode; ArrayList<TopicData> newTopics = new ArrayList<TopicData>(topics.size()); for (TopicData _element : topics) { newTopics.add(_element.duplicate()); } _duplicate.topics = newTopics; return _duplicate; } @Override public String toString() { return "BeginQuorumEpochResponseData(" + "errorCode=" + errorCode + ", topics=" + MessageUtil.deepToString(topics.iterator()) + ")"; } public short errorCode() { return this.errorCode; } public List<TopicData> topics() { return this.topics; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public BeginQuorumEpochResponseData setErrorCode(short v) { this.errorCode = v; return this; } public BeginQuorumEpochResponseData setTopics(List<TopicData> v) { this.topics = v; return this; } public static class TopicData implements Message { String topicName; List<PartitionData> partitions; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("topic_name", Type.STRING, "The topic name."), new Field("partitions", new ArrayOf(PartitionData.SCHEMA_0), "") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public TopicData(Readable _readable, short _version) { read(_readable, _version); } public TopicData() { this.topicName = ""; this.partitions = new ArrayList<PartitionData>(0); } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of TopicData"); } { int length; length = _readable.readShort(); if (length < 0) { throw new RuntimeException("non-nullable field topicName was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field topicName had invalid length " + length); } else { this.topicName = _readable.readString(length); } } { int arrayLength; arrayLength = _readable.readInt(); if (arrayLength < 0) { throw new RuntimeException("non-nullable field partitions was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ArrayList<PartitionData> newCollection = new ArrayList<>(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new PartitionData(_readable, _version)); } this.partitions = newCollection; } } this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(topicName); _writable.writeShort((short) _stringBytes.length); _writable.writeByteArray(_stringBytes); } _writable.writeInt(partitions.size()); for (PartitionData partitionsElement : partitions) { partitionsElement.write(_writable, _cache, _version); } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of TopicData"); } { byte[] _stringBytes = topicName.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'topicName' field is too long to be serialized"); } _cache.cacheSerializedValue(topicName, _stringBytes); _size.addBytes(_stringBytes.length + 2); } { _size.addBytes(4); for (PartitionData partitionsElement : partitions) { partitionsElement.addSize(_size, _cache, _version); } } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean equals(Object obj) { if (!(obj instanceof TopicData)) return false; TopicData other = (TopicData) obj; if (this.topicName == null) { if (other.topicName != null) return false; } else { if (!this.topicName.equals(other.topicName)) return false; } if (this.partitions == null) { if (other.partitions != null) return false; } else { if (!this.partitions.equals(other.partitions)) return false; } return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (topicName == null ? 0 : topicName.hashCode()); hashCode = 31 * hashCode + (partitions == null ? 0 : partitions.hashCode()); return hashCode; } @Override public TopicData duplicate() { TopicData _duplicate = new TopicData(); _duplicate.topicName = topicName; ArrayList<PartitionData> newPartitions = new ArrayList<PartitionData>(partitions.size()); for (PartitionData _element : partitions) { newPartitions.add(_element.duplicate()); } _duplicate.partitions = newPartitions; return _duplicate; } @Override public String toString() { return "TopicData(" + "topicName=" + ((topicName == null) ? "null" : "'" + topicName.toString() + "'") + ", partitions=" + MessageUtil.deepToString(partitions.iterator()) + ")"; } public String topicName() { return this.topicName; } public List<PartitionData> partitions() { return this.partitions; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public TopicData setTopicName(String v) { this.topicName = v; return this; } public TopicData setPartitions(List<PartitionData> v) { this.partitions = v; return this; } } public static class PartitionData implements Message { int partitionIndex; short errorCode; int leaderId; int leaderEpoch; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("partition_index", Type.INT32, "The partition index."), new Field("error_code", Type.INT16, ""), new Field("leader_id", Type.INT32, "The ID of the current leader or -1 if the leader is unknown."), new Field("leader_epoch", Type.INT32, "The latest known leader epoch") ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public PartitionData(Readable _readable, short _version) { read(_readable, _version); } public PartitionData() { this.partitionIndex = 0; this.errorCode = (short) 0; this.leaderId = 0; this.leaderEpoch = 0; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { if (_version > 0) { throw new UnsupportedVersionException("Can't read version " + _version + " of PartitionData"); } this.partitionIndex = _readable.readInt(); this.errorCode = _readable.readShort(); this.leaderId = _readable.readInt(); this.leaderEpoch = _readable.readInt(); this._unknownTaggedFields = null; } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(partitionIndex); _writable.writeShort(errorCode); _writable.writeInt(leaderId); _writable.writeInt(leaderEpoch); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 0) { throw new UnsupportedVersionException("Can't size version " + _version + " of PartitionData"); } _size.addBytes(4); _size.addBytes(2); _size.addBytes(4); _size.addBytes(4); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } if (_numTaggedFields > 0) { throw new UnsupportedVersionException("Tagged fields were set, but version " + _version + " of this message does not support them."); } } @Override public boolean equals(Object obj) { if (!(obj instanceof PartitionData)) return false; PartitionData other = (PartitionData) obj; if (partitionIndex != other.partitionIndex) return false; if (errorCode != other.errorCode) return false; if (leaderId != other.leaderId) return false; if (leaderEpoch != other.leaderEpoch) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + partitionIndex; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + leaderId; hashCode = 31 * hashCode + leaderEpoch; return hashCode; } @Override public PartitionData duplicate() { PartitionData _duplicate = new PartitionData(); _duplicate.partitionIndex = partitionIndex; _duplicate.errorCode = errorCode; _duplicate.leaderId = leaderId; _duplicate.leaderEpoch = leaderEpoch; return _duplicate; } @Override public String toString() { return "PartitionData(" + "partitionIndex=" + partitionIndex + ", errorCode=" + errorCode + ", leaderId=" + leaderId + ", leaderEpoch=" + leaderEpoch + ")"; } public int partitionIndex() { return this.partitionIndex; } public short errorCode() { return this.errorCode; } public int leaderId() { return this.leaderId; } public int leaderEpoch() { return this.leaderEpoch; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public PartitionData setPartitionIndex(int v) { this.partitionIndex = v; return this; } public PartitionData setErrorCode(short v) { this.errorCode = v; return this; } public PartitionData setLeaderId(int v) { this.leaderId = v; return this; } public PartitionData setLeaderEpoch(int v) { this.leaderEpoch = v; return this; } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/BeginQuorumEpochResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import com.fasterxml.jackson.databind.node.TextNode; import java.util.ArrayList; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.BeginQuorumEpochResponseData.*; public class BeginQuorumEpochResponseDataJsonConverter { public static BeginQuorumEpochResponseData read(JsonNode _node, short _version) { BeginQuorumEpochResponseData _object = new BeginQuorumEpochResponseData(); JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("BeginQuorumEpochResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "BeginQuorumEpochResponseData"); } JsonNode _topicsNode = _node.get("topics"); if (_topicsNode == null) { throw new RuntimeException("BeginQuorumEpochResponseData: unable to locate field 'topics', which is mandatory in version " + _version); } else { if (!_topicsNode.isArray()) { throw new RuntimeException("BeginQuorumEpochResponseData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<TopicData> _collection = new ArrayList<TopicData>(_topicsNode.size()); _object.topics = _collection; for (JsonNode _element : _topicsNode) { _collection.add(TopicDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(BeginQuorumEpochResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("errorCode", new ShortNode(_object.errorCode)); ArrayNode _topicsArray = new ArrayNode(JsonNodeFactory.instance); for (TopicData _element : _object.topics) { _topicsArray.add(TopicDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("topics", _topicsArray); return _node; } public static JsonNode write(BeginQuorumEpochResponseData _object, short _version) { return write(_object, _version, true); } public static class PartitionDataJsonConverter { public static PartitionData read(JsonNode _node, short _version) { PartitionData _object = new PartitionData(); JsonNode _partitionIndexNode = _node.get("partitionIndex"); if (_partitionIndexNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'partitionIndex', which is mandatory in version " + _version); } else { _object.partitionIndex = MessageUtil.jsonNodeToInt(_partitionIndexNode, "PartitionData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "PartitionData"); } JsonNode _leaderIdNode = _node.get("leaderId"); if (_leaderIdNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'leaderId', which is mandatory in version " + _version); } else { _object.leaderId = MessageUtil.jsonNodeToInt(_leaderIdNode, "PartitionData"); } JsonNode _leaderEpochNode = _node.get("leaderEpoch"); if (_leaderEpochNode == null) { throw new RuntimeException("PartitionData: unable to locate field 'leaderEpoch', which is mandatory in version " + _version); } else { _object.leaderEpoch = MessageUtil.jsonNodeToInt(_leaderEpochNode, "PartitionData"); } return _object; } public static JsonNode write(PartitionData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("partitionIndex", new IntNode(_object.partitionIndex)); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("leaderId", new IntNode(_object.leaderId)); _node.set("leaderEpoch", new IntNode(_object.leaderEpoch)); return _node; } public static JsonNode write(PartitionData _object, short _version) { return write(_object, _version, true); } } public static class TopicDataJsonConverter { public static TopicData read(JsonNode _node, short _version) { TopicData _object = new TopicData(); JsonNode _topicNameNode = _node.get("topicName"); if (_topicNameNode == null) { throw new RuntimeException("TopicData: unable to locate field 'topicName', which is mandatory in version " + _version); } else { if (!_topicNameNode.isTextual()) { throw new RuntimeException("TopicData expected a string type, but got " + _node.getNodeType()); } _object.topicName = _topicNameNode.asText(); } JsonNode _partitionsNode = _node.get("partitions"); if (_partitionsNode == null) { throw new RuntimeException("TopicData: unable to locate field 'partitions', which is mandatory in version " + _version); } else { if (!_partitionsNode.isArray()) { throw new RuntimeException("TopicData expected a JSON array, but got " + _node.getNodeType()); } ArrayList<PartitionData> _collection = new ArrayList<PartitionData>(_partitionsNode.size()); _object.partitions = _collection; for (JsonNode _element : _partitionsNode) { _collection.add(PartitionDataJsonConverter.read(_element, _version)); } } return _object; } public static JsonNode write(TopicData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("topicName", new TextNode(_object.topicName)); ArrayNode _partitionsArray = new ArrayNode(JsonNodeFactory.instance); for (PartitionData _element : _object.partitions) { _partitionsArray.add(PartitionDataJsonConverter.write(_element, _version, _serializeRecords)); } _node.set("partitions", _partitionsArray); return _node; } public static JsonNode write(TopicData _object, short _version) { return write(_object, _version, true); } } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/BrokerHeartbeatRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class BrokerHeartbeatRequestData implements ApiMessage { int brokerId; long brokerEpoch; long currentMetadataOffset; boolean wantFence; boolean wantShutDown; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("broker_id", Type.INT32, "The broker ID."), new Field("broker_epoch", Type.INT64, "The broker epoch."), new Field("current_metadata_offset", Type.INT64, "The highest metadata offset which the broker has reached."), new Field("want_fence", Type.BOOLEAN, "True if the broker wants to be fenced, false otherwise."), new Field("want_shut_down", Type.BOOLEAN, "True if the broker wants to be shut down, false otherwise."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public BrokerHeartbeatRequestData(Readable _readable, short _version) { read(_readable, _version); } public BrokerHeartbeatRequestData() { this.brokerId = 0; this.brokerEpoch = -1L; this.currentMetadataOffset = 0L; this.wantFence = false; this.wantShutDown = false; } @Override public short apiKey() { return 63; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.brokerId = _readable.readInt(); this.brokerEpoch = _readable.readLong(); this.currentMetadataOffset = _readable.readLong(); this.wantFence = _readable.readByte() != 0; this.wantShutDown = _readable.readByte() != 0; this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(brokerId); _writable.writeLong(brokerEpoch); _writable.writeLong(currentMetadataOffset); _writable.writeByte(wantFence ? (byte) 1 : (byte) 0); _writable.writeByte(wantShutDown ? (byte) 1 : (byte) 0); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(8); _size.addBytes(8); _size.addBytes(1); _size.addBytes(1); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof BrokerHeartbeatRequestData)) return false; BrokerHeartbeatRequestData other = (BrokerHeartbeatRequestData) obj; if (brokerId != other.brokerId) return false; if (brokerEpoch != other.brokerEpoch) return false; if (currentMetadataOffset != other.currentMetadataOffset) return false; if (wantFence != other.wantFence) return false; if (wantShutDown != other.wantShutDown) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + brokerId; hashCode = 31 * hashCode + ((int) (brokerEpoch >> 32) ^ (int) brokerEpoch); hashCode = 31 * hashCode + ((int) (currentMetadataOffset >> 32) ^ (int) currentMetadataOffset); hashCode = 31 * hashCode + (wantFence ? 1231 : 1237); hashCode = 31 * hashCode + (wantShutDown ? 1231 : 1237); return hashCode; } @Override public BrokerHeartbeatRequestData duplicate() { BrokerHeartbeatRequestData _duplicate = new BrokerHeartbeatRequestData(); _duplicate.brokerId = brokerId; _duplicate.brokerEpoch = brokerEpoch; _duplicate.currentMetadataOffset = currentMetadataOffset; _duplicate.wantFence = wantFence; _duplicate.wantShutDown = wantShutDown; return _duplicate; } @Override public String toString() { return "BrokerHeartbeatRequestData(" + "brokerId=" + brokerId + ", brokerEpoch=" + brokerEpoch + ", currentMetadataOffset=" + currentMetadataOffset + ", wantFence=" + (wantFence ? "true" : "false") + ", wantShutDown=" + (wantShutDown ? "true" : "false") + ")"; } public int brokerId() { return this.brokerId; } public long brokerEpoch() { return this.brokerEpoch; } public long currentMetadataOffset() { return this.currentMetadataOffset; } public boolean wantFence() { return this.wantFence; } public boolean wantShutDown() { return this.wantShutDown; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public BrokerHeartbeatRequestData setBrokerId(int v) { this.brokerId = v; return this; } public BrokerHeartbeatRequestData setBrokerEpoch(long v) { this.brokerEpoch = v; return this; } public BrokerHeartbeatRequestData setCurrentMetadataOffset(long v) { this.currentMetadataOffset = v; return this; } public BrokerHeartbeatRequestData setWantFence(boolean v) { this.wantFence = v; return this; } public BrokerHeartbeatRequestData setWantShutDown(boolean v) { this.wantShutDown = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/BrokerHeartbeatRequestDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.LongNode; import com.fasterxml.jackson.databind.node.ObjectNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.BrokerHeartbeatRequestData.*; public class BrokerHeartbeatRequestDataJsonConverter { public static BrokerHeartbeatRequestData read(JsonNode _node, short _version) { BrokerHeartbeatRequestData _object = new BrokerHeartbeatRequestData(); JsonNode _brokerIdNode = _node.get("brokerId"); if (_brokerIdNode == null) { throw new RuntimeException("BrokerHeartbeatRequestData: unable to locate field 'brokerId', which is mandatory in version " + _version); } else { _object.brokerId = MessageUtil.jsonNodeToInt(_brokerIdNode, "BrokerHeartbeatRequestData"); } JsonNode _brokerEpochNode = _node.get("brokerEpoch"); if (_brokerEpochNode == null) { throw new RuntimeException("BrokerHeartbeatRequestData: unable to locate field 'brokerEpoch', which is mandatory in version " + _version); } else { _object.brokerEpoch = MessageUtil.jsonNodeToLong(_brokerEpochNode, "BrokerHeartbeatRequestData"); } JsonNode _currentMetadataOffsetNode = _node.get("currentMetadataOffset"); if (_currentMetadataOffsetNode == null) { throw new RuntimeException("BrokerHeartbeatRequestData: unable to locate field 'currentMetadataOffset', which is mandatory in version " + _version); } else { _object.currentMetadataOffset = MessageUtil.jsonNodeToLong(_currentMetadataOffsetNode, "BrokerHeartbeatRequestData"); } JsonNode _wantFenceNode = _node.get("wantFence"); if (_wantFenceNode == null) { throw new RuntimeException("BrokerHeartbeatRequestData: unable to locate field 'wantFence', which is mandatory in version " + _version); } else { if (!_wantFenceNode.isBoolean()) { throw new RuntimeException("BrokerHeartbeatRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.wantFence = _wantFenceNode.asBoolean(); } JsonNode _wantShutDownNode = _node.get("wantShutDown"); if (_wantShutDownNode == null) { throw new RuntimeException("BrokerHeartbeatRequestData: unable to locate field 'wantShutDown', which is mandatory in version " + _version); } else { if (!_wantShutDownNode.isBoolean()) { throw new RuntimeException("BrokerHeartbeatRequestData expected Boolean type, but got " + _node.getNodeType()); } _object.wantShutDown = _wantShutDownNode.asBoolean(); } return _object; } public static JsonNode write(BrokerHeartbeatRequestData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("brokerId", new IntNode(_object.brokerId)); _node.set("brokerEpoch", new LongNode(_object.brokerEpoch)); _node.set("currentMetadataOffset", new LongNode(_object.currentMetadataOffset)); _node.set("wantFence", BooleanNode.valueOf(_object.wantFence)); _node.set("wantShutDown", BooleanNode.valueOf(_object.wantShutDown)); return _node; } public static JsonNode write(BrokerHeartbeatRequestData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/BrokerHeartbeatResponseData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.util.ArrayList; import java.util.List; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class BrokerHeartbeatResponseData implements ApiMessage { int throttleTimeMs; short errorCode; boolean isCaughtUp; boolean isFenced; boolean shouldShutDown; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("throttle_time_ms", Type.INT32, "Duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota."), new Field("error_code", Type.INT16, "The error code, or 0 if there was no error."), new Field("is_caught_up", Type.BOOLEAN, "True if the broker has approximately caught up with the latest metadata."), new Field("is_fenced", Type.BOOLEAN, "True if the broker is fenced."), new Field("should_shut_down", Type.BOOLEAN, "True if the broker should proceed with its shutdown."), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 0; public BrokerHeartbeatResponseData(Readable _readable, short _version) { read(_readable, _version); } public BrokerHeartbeatResponseData() { this.throttleTimeMs = 0; this.errorCode = (short) 0; this.isCaughtUp = false; this.isFenced = true; this.shouldShutDown = false; } @Override public short apiKey() { return 63; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 0; } @Override public void read(Readable _readable, short _version) { this.throttleTimeMs = _readable.readInt(); this.errorCode = _readable.readShort(); this.isCaughtUp = _readable.readByte() != 0; this.isFenced = _readable.readByte() != 0; this.shouldShutDown = _readable.readByte() != 0; this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(throttleTimeMs); _writable.writeShort(errorCode); _writable.writeByte(isCaughtUp ? (byte) 1 : (byte) 0); _writable.writeByte(isFenced ? (byte) 1 : (byte) 0); _writable.writeByte(shouldShutDown ? (byte) 1 : (byte) 0); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); _size.addBytes(2); _size.addBytes(1); _size.addBytes(1); _size.addBytes(1); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof BrokerHeartbeatResponseData)) return false; BrokerHeartbeatResponseData other = (BrokerHeartbeatResponseData) obj; if (throttleTimeMs != other.throttleTimeMs) return false; if (errorCode != other.errorCode) return false; if (isCaughtUp != other.isCaughtUp) return false; if (isFenced != other.isFenced) return false; if (shouldShutDown != other.shouldShutDown) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + throttleTimeMs; hashCode = 31 * hashCode + errorCode; hashCode = 31 * hashCode + (isCaughtUp ? 1231 : 1237); hashCode = 31 * hashCode + (isFenced ? 1231 : 1237); hashCode = 31 * hashCode + (shouldShutDown ? 1231 : 1237); return hashCode; } @Override public BrokerHeartbeatResponseData duplicate() { BrokerHeartbeatResponseData _duplicate = new BrokerHeartbeatResponseData(); _duplicate.throttleTimeMs = throttleTimeMs; _duplicate.errorCode = errorCode; _duplicate.isCaughtUp = isCaughtUp; _duplicate.isFenced = isFenced; _duplicate.shouldShutDown = shouldShutDown; return _duplicate; } @Override public String toString() { return "BrokerHeartbeatResponseData(" + "throttleTimeMs=" + throttleTimeMs + ", errorCode=" + errorCode + ", isCaughtUp=" + (isCaughtUp ? "true" : "false") + ", isFenced=" + (isFenced ? "true" : "false") + ", shouldShutDown=" + (shouldShutDown ? "true" : "false") + ")"; } public int throttleTimeMs() { return this.throttleTimeMs; } public short errorCode() { return this.errorCode; } public boolean isCaughtUp() { return this.isCaughtUp; } public boolean isFenced() { return this.isFenced; } public boolean shouldShutDown() { return this.shouldShutDown; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public BrokerHeartbeatResponseData setThrottleTimeMs(int v) { this.throttleTimeMs = v; return this; } public BrokerHeartbeatResponseData setErrorCode(short v) { this.errorCode = v; return this; } public BrokerHeartbeatResponseData setIsCaughtUp(boolean v) { this.isCaughtUp = v; return this; } public BrokerHeartbeatResponseData setIsFenced(boolean v) { this.isFenced = v; return this; } public BrokerHeartbeatResponseData setShouldShutDown(boolean v) { this.shouldShutDown = v; return this; } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/BrokerHeartbeatResponseDataJsonConverter.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.node.BooleanNode; import com.fasterxml.jackson.databind.node.IntNode; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; import com.fasterxml.jackson.databind.node.ShortNode; import org.apache.kafka.common.protocol.MessageUtil; import static org.apache.kafka.common.message.BrokerHeartbeatResponseData.*; public class BrokerHeartbeatResponseDataJsonConverter { public static BrokerHeartbeatResponseData read(JsonNode _node, short _version) { BrokerHeartbeatResponseData _object = new BrokerHeartbeatResponseData(); JsonNode _throttleTimeMsNode = _node.get("throttleTimeMs"); if (_throttleTimeMsNode == null) { throw new RuntimeException("BrokerHeartbeatResponseData: unable to locate field 'throttleTimeMs', which is mandatory in version " + _version); } else { _object.throttleTimeMs = MessageUtil.jsonNodeToInt(_throttleTimeMsNode, "BrokerHeartbeatResponseData"); } JsonNode _errorCodeNode = _node.get("errorCode"); if (_errorCodeNode == null) { throw new RuntimeException("BrokerHeartbeatResponseData: unable to locate field 'errorCode', which is mandatory in version " + _version); } else { _object.errorCode = MessageUtil.jsonNodeToShort(_errorCodeNode, "BrokerHeartbeatResponseData"); } JsonNode _isCaughtUpNode = _node.get("isCaughtUp"); if (_isCaughtUpNode == null) { throw new RuntimeException("BrokerHeartbeatResponseData: unable to locate field 'isCaughtUp', which is mandatory in version " + _version); } else { if (!_isCaughtUpNode.isBoolean()) { throw new RuntimeException("BrokerHeartbeatResponseData expected Boolean type, but got " + _node.getNodeType()); } _object.isCaughtUp = _isCaughtUpNode.asBoolean(); } JsonNode _isFencedNode = _node.get("isFenced"); if (_isFencedNode == null) { throw new RuntimeException("BrokerHeartbeatResponseData: unable to locate field 'isFenced', which is mandatory in version " + _version); } else { if (!_isFencedNode.isBoolean()) { throw new RuntimeException("BrokerHeartbeatResponseData expected Boolean type, but got " + _node.getNodeType()); } _object.isFenced = _isFencedNode.asBoolean(); } JsonNode _shouldShutDownNode = _node.get("shouldShutDown"); if (_shouldShutDownNode == null) { throw new RuntimeException("BrokerHeartbeatResponseData: unable to locate field 'shouldShutDown', which is mandatory in version " + _version); } else { if (!_shouldShutDownNode.isBoolean()) { throw new RuntimeException("BrokerHeartbeatResponseData expected Boolean type, but got " + _node.getNodeType()); } _object.shouldShutDown = _shouldShutDownNode.asBoolean(); } return _object; } public static JsonNode write(BrokerHeartbeatResponseData _object, short _version, boolean _serializeRecords) { ObjectNode _node = new ObjectNode(JsonNodeFactory.instance); _node.set("throttleTimeMs", new IntNode(_object.throttleTimeMs)); _node.set("errorCode", new ShortNode(_object.errorCode)); _node.set("isCaughtUp", BooleanNode.valueOf(_object.isCaughtUp)); _node.set("isFenced", BooleanNode.valueOf(_object.isFenced)); _node.set("shouldShutDown", BooleanNode.valueOf(_object.shouldShutDown)); return _node; } public static JsonNode write(BrokerHeartbeatResponseData _object, short _version) { return write(_object, _version, true); } }
0
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common
java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/message/BrokerRegistrationRequestData.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // THIS CODE IS AUTOMATICALLY GENERATED. DO NOT EDIT. package org.apache.kafka.common.message; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.common.protocol.Message; import org.apache.kafka.common.protocol.MessageSizeAccumulator; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.Writable; import org.apache.kafka.common.protocol.types.CompactArrayOf; import org.apache.kafka.common.protocol.types.Field; import org.apache.kafka.common.protocol.types.RawTaggedField; import org.apache.kafka.common.protocol.types.RawTaggedFieldWriter; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; import org.apache.kafka.common.utils.ByteUtils; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.ImplicitLinkedHashMultiCollection; import static org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection; public class BrokerRegistrationRequestData implements ApiMessage { int brokerId; String clusterId; Uuid incarnationId; ListenerCollection listeners; FeatureCollection features; String rack; boolean isMigratingZkBroker; private List<RawTaggedField> _unknownTaggedFields; public static final Schema SCHEMA_0 = new Schema( new Field("broker_id", Type.INT32, "The broker ID."), new Field("cluster_id", Type.COMPACT_STRING, "The cluster id of the broker process."), new Field("incarnation_id", Type.UUID, "The incarnation id of the broker process."), new Field("listeners", new CompactArrayOf(Listener.SCHEMA_0), "The listeners of this broker"), new Field("features", new CompactArrayOf(Feature.SCHEMA_0), "The features on this broker"), new Field("rack", Type.COMPACT_NULLABLE_STRING, "The rack which this broker is in."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_1 = new Schema( new Field("broker_id", Type.INT32, "The broker ID."), new Field("cluster_id", Type.COMPACT_STRING, "The cluster id of the broker process."), new Field("incarnation_id", Type.UUID, "The incarnation id of the broker process."), new Field("listeners", new CompactArrayOf(Listener.SCHEMA_0), "The listeners of this broker"), new Field("features", new CompactArrayOf(Feature.SCHEMA_0), "The features on this broker"), new Field("rack", Type.COMPACT_NULLABLE_STRING, "The rack which this broker is in."), new Field("is_migrating_zk_broker", Type.BOOLEAN, "If the required configurations for ZK migration are present, this value is set to true"), TaggedFieldsSection.of( ) ); public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public BrokerRegistrationRequestData(Readable _readable, short _version) { read(_readable, _version); } public BrokerRegistrationRequestData() { this.brokerId = 0; this.clusterId = ""; this.incarnationId = Uuid.ZERO_UUID; this.listeners = new ListenerCollection(0); this.features = new FeatureCollection(0); this.rack = ""; this.isMigratingZkBroker = false; } @Override public short apiKey() { return 62; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { this.brokerId = _readable.readInt(); { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field clusterId was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field clusterId had invalid length " + length); } else { this.clusterId = _readable.readString(length); } } this.incarnationId = _readable.readUuid(); { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field listeners was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } ListenerCollection newCollection = new ListenerCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new Listener(_readable, _version)); } this.listeners = newCollection; } } { int arrayLength; arrayLength = _readable.readUnsignedVarint() - 1; if (arrayLength < 0) { throw new RuntimeException("non-nullable field features was serialized as null"); } else { if (arrayLength > _readable.remaining()) { throw new RuntimeException("Tried to allocate a collection of size " + arrayLength + ", but there are only " + _readable.remaining() + " bytes remaining."); } FeatureCollection newCollection = new FeatureCollection(arrayLength); for (int i = 0; i < arrayLength; i++) { newCollection.add(new Feature(_readable, _version)); } this.features = newCollection; } } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { this.rack = null; } else if (length > 0x7fff) { throw new RuntimeException("string field rack had invalid length " + length); } else { this.rack = _readable.readString(length); } } if (_version >= 1) { this.isMigratingZkBroker = _readable.readByte() != 0; } else { this.isMigratingZkBroker = false; } this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _writable.writeInt(brokerId); { byte[] _stringBytes = _cache.getSerializedValue(clusterId); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUuid(incarnationId); _writable.writeUnsignedVarint(listeners.size() + 1); for (Listener listenersElement : listeners) { listenersElement.write(_writable, _cache, _version); } _writable.writeUnsignedVarint(features.size() + 1); for (Feature featuresElement : features) { featuresElement.write(_writable, _cache, _version); } if (rack == null) { _writable.writeUnsignedVarint(0); } else { byte[] _stringBytes = _cache.getSerializedValue(rack); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } if (_version >= 1) { _writable.writeByte(isMigratingZkBroker ? (byte) 1 : (byte) 0); } else { if (this.isMigratingZkBroker) { throw new UnsupportedVersionException("Attempted to write a non-default isMigratingZkBroker at version " + _version); } } RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; _size.addBytes(4); { byte[] _stringBytes = clusterId.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'clusterId' field is too long to be serialized"); } _cache.cacheSerializedValue(clusterId, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(16); { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(listeners.size() + 1)); for (Listener listenersElement : listeners) { listenersElement.addSize(_size, _cache, _version); } } { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(features.size() + 1)); for (Feature featuresElement : features) { featuresElement.addSize(_size, _cache, _version); } } if (rack == null) { _size.addBytes(1); } else { byte[] _stringBytes = rack.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'rack' field is too long to be serialized"); } _cache.cacheSerializedValue(rack, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } if (_version >= 1) { _size.addBytes(1); } if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean equals(Object obj) { if (!(obj instanceof BrokerRegistrationRequestData)) return false; BrokerRegistrationRequestData other = (BrokerRegistrationRequestData) obj; if (brokerId != other.brokerId) return false; if (this.clusterId == null) { if (other.clusterId != null) return false; } else { if (!this.clusterId.equals(other.clusterId)) return false; } if (!this.incarnationId.equals(other.incarnationId)) return false; if (this.listeners == null) { if (other.listeners != null) return false; } else { if (!this.listeners.equals(other.listeners)) return false; } if (this.features == null) { if (other.features != null) return false; } else { if (!this.features.equals(other.features)) return false; } if (this.rack == null) { if (other.rack != null) return false; } else { if (!this.rack.equals(other.rack)) return false; } if (isMigratingZkBroker != other.isMigratingZkBroker) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + brokerId; hashCode = 31 * hashCode + (clusterId == null ? 0 : clusterId.hashCode()); hashCode = 31 * hashCode + incarnationId.hashCode(); hashCode = 31 * hashCode + (listeners == null ? 0 : listeners.hashCode()); hashCode = 31 * hashCode + (features == null ? 0 : features.hashCode()); hashCode = 31 * hashCode + (rack == null ? 0 : rack.hashCode()); hashCode = 31 * hashCode + (isMigratingZkBroker ? 1231 : 1237); return hashCode; } @Override public BrokerRegistrationRequestData duplicate() { BrokerRegistrationRequestData _duplicate = new BrokerRegistrationRequestData(); _duplicate.brokerId = brokerId; _duplicate.clusterId = clusterId; _duplicate.incarnationId = incarnationId; ListenerCollection newListeners = new ListenerCollection(listeners.size()); for (Listener _element : listeners) { newListeners.add(_element.duplicate()); } _duplicate.listeners = newListeners; FeatureCollection newFeatures = new FeatureCollection(features.size()); for (Feature _element : features) { newFeatures.add(_element.duplicate()); } _duplicate.features = newFeatures; if (rack == null) { _duplicate.rack = null; } else { _duplicate.rack = rack; } _duplicate.isMigratingZkBroker = isMigratingZkBroker; return _duplicate; } @Override public String toString() { return "BrokerRegistrationRequestData(" + "brokerId=" + brokerId + ", clusterId=" + ((clusterId == null) ? "null" : "'" + clusterId.toString() + "'") + ", incarnationId=" + incarnationId.toString() + ", listeners=" + MessageUtil.deepToString(listeners.iterator()) + ", features=" + MessageUtil.deepToString(features.iterator()) + ", rack=" + ((rack == null) ? "null" : "'" + rack.toString() + "'") + ", isMigratingZkBroker=" + (isMigratingZkBroker ? "true" : "false") + ")"; } public int brokerId() { return this.brokerId; } public String clusterId() { return this.clusterId; } public Uuid incarnationId() { return this.incarnationId; } public ListenerCollection listeners() { return this.listeners; } public FeatureCollection features() { return this.features; } public String rack() { return this.rack; } public boolean isMigratingZkBroker() { return this.isMigratingZkBroker; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public BrokerRegistrationRequestData setBrokerId(int v) { this.brokerId = v; return this; } public BrokerRegistrationRequestData setClusterId(String v) { this.clusterId = v; return this; } public BrokerRegistrationRequestData setIncarnationId(Uuid v) { this.incarnationId = v; return this; } public BrokerRegistrationRequestData setListeners(ListenerCollection v) { this.listeners = v; return this; } public BrokerRegistrationRequestData setFeatures(FeatureCollection v) { this.features = v; return this; } public BrokerRegistrationRequestData setRack(String v) { this.rack = v; return this; } public BrokerRegistrationRequestData setIsMigratingZkBroker(boolean v) { this.isMigratingZkBroker = v; return this; } public static class Listener implements Message, ImplicitLinkedHashMultiCollection.Element { String name; String host; int port; short securityProtocol; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The name of the endpoint."), new Field("host", Type.COMPACT_STRING, "The hostname."), new Field("port", Type.UINT16, "The port."), new Field("security_protocol", Type.INT16, "The security protocol."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public Listener(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public Listener() { this.name = ""; this.host = ""; this.port = 0; this.securityProtocol = (short) 0; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of Listener"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field host was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field host had invalid length " + length); } else { this.host = _readable.readString(length); } } this.port = _readable.readUnsignedShort(); this.securityProtocol = _readable.readShort(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } { byte[] _stringBytes = _cache.getSerializedValue(host); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeUnsignedShort(port); _writable.writeShort(securityProtocol); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of Listener"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } { byte[] _stringBytes = host.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'host' field is too long to be serialized"); } _cache.cacheSerializedValue(host, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(2); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof Listener)) return false; Listener other = (Listener) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof Listener)) return false; Listener other = (Listener) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (this.host == null) { if (other.host != null) return false; } else { if (!this.host.equals(other.host)) return false; } if (port != other.port) return false; if (securityProtocol != other.securityProtocol) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public Listener duplicate() { Listener _duplicate = new Listener(); _duplicate.name = name; _duplicate.host = host; _duplicate.port = port; _duplicate.securityProtocol = securityProtocol; return _duplicate; } @Override public String toString() { return "Listener(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", host=" + ((host == null) ? "null" : "'" + host.toString() + "'") + ", port=" + port + ", securityProtocol=" + securityProtocol + ")"; } public String name() { return this.name; } public String host() { return this.host; } public int port() { return this.port; } public short securityProtocol() { return this.securityProtocol; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public Listener setName(String v) { this.name = v; return this; } public Listener setHost(String v) { this.host = v; return this; } public Listener setPort(int v) { if (v < 0 || v > 65535) { throw new RuntimeException("Invalid value " + v + " for unsigned short field."); } this.port = v; return this; } public Listener setSecurityProtocol(short v) { this.securityProtocol = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class ListenerCollection extends ImplicitLinkedHashMultiCollection<Listener> { public ListenerCollection() { super(); } public ListenerCollection(int expectedNumElements) { super(expectedNumElements); } public ListenerCollection(Iterator<Listener> iterator) { super(iterator); } public Listener find(String name) { Listener _key = new Listener(); _key.setName(name); return find(_key); } public List<Listener> findAll(String name) { Listener _key = new Listener(); _key.setName(name); return findAll(_key); } public ListenerCollection duplicate() { ListenerCollection _duplicate = new ListenerCollection(size()); for (Listener _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } public static class Feature implements Message, ImplicitLinkedHashMultiCollection.Element { String name; short minSupportedVersion; short maxSupportedVersion; private List<RawTaggedField> _unknownTaggedFields; private int next; private int prev; public static final Schema SCHEMA_0 = new Schema( new Field("name", Type.COMPACT_STRING, "The feature name."), new Field("min_supported_version", Type.INT16, "The minimum supported feature level."), new Field("max_supported_version", Type.INT16, "The maximum supported feature level."), TaggedFieldsSection.of( ) ); public static final Schema SCHEMA_1 = SCHEMA_0; public static final Schema[] SCHEMAS = new Schema[] { SCHEMA_0, SCHEMA_1 }; public static final short LOWEST_SUPPORTED_VERSION = 0; public static final short HIGHEST_SUPPORTED_VERSION = 1; public Feature(Readable _readable, short _version) { read(_readable, _version); this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } public Feature() { this.name = ""; this.minSupportedVersion = (short) 0; this.maxSupportedVersion = (short) 0; this.prev = ImplicitLinkedHashCollection.INVALID_INDEX; this.next = ImplicitLinkedHashCollection.INVALID_INDEX; } @Override public short lowestSupportedVersion() { return 0; } @Override public short highestSupportedVersion() { return 1; } @Override public void read(Readable _readable, short _version) { if (_version > 1) { throw new UnsupportedVersionException("Can't read version " + _version + " of Feature"); } { int length; length = _readable.readUnsignedVarint() - 1; if (length < 0) { throw new RuntimeException("non-nullable field name was serialized as null"); } else if (length > 0x7fff) { throw new RuntimeException("string field name had invalid length " + length); } else { this.name = _readable.readString(length); } } this.minSupportedVersion = _readable.readShort(); this.maxSupportedVersion = _readable.readShort(); this._unknownTaggedFields = null; int _numTaggedFields = _readable.readUnsignedVarint(); for (int _i = 0; _i < _numTaggedFields; _i++) { int _tag = _readable.readUnsignedVarint(); int _size = _readable.readUnsignedVarint(); switch (_tag) { default: this._unknownTaggedFields = _readable.readUnknownTaggedField(this._unknownTaggedFields, _tag, _size); break; } } } @Override public void write(Writable _writable, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; { byte[] _stringBytes = _cache.getSerializedValue(name); _writable.writeUnsignedVarint(_stringBytes.length + 1); _writable.writeByteArray(_stringBytes); } _writable.writeShort(minSupportedVersion); _writable.writeShort(maxSupportedVersion); RawTaggedFieldWriter _rawWriter = RawTaggedFieldWriter.forFields(_unknownTaggedFields); _numTaggedFields += _rawWriter.numFields(); _writable.writeUnsignedVarint(_numTaggedFields); _rawWriter.writeRawTags(_writable, Integer.MAX_VALUE); } @Override public void addSize(MessageSizeAccumulator _size, ObjectSerializationCache _cache, short _version) { int _numTaggedFields = 0; if (_version > 1) { throw new UnsupportedVersionException("Can't size version " + _version + " of Feature"); } { byte[] _stringBytes = name.getBytes(StandardCharsets.UTF_8); if (_stringBytes.length > 0x7fff) { throw new RuntimeException("'name' field is too long to be serialized"); } _cache.cacheSerializedValue(name, _stringBytes); _size.addBytes(_stringBytes.length + ByteUtils.sizeOfUnsignedVarint(_stringBytes.length + 1)); } _size.addBytes(2); _size.addBytes(2); if (_unknownTaggedFields != null) { _numTaggedFields += _unknownTaggedFields.size(); for (RawTaggedField _field : _unknownTaggedFields) { _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.tag())); _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_field.size())); _size.addBytes(_field.size()); } } _size.addBytes(ByteUtils.sizeOfUnsignedVarint(_numTaggedFields)); } @Override public boolean elementKeysAreEqual(Object obj) { if (!(obj instanceof Feature)) return false; Feature other = (Feature) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } return true; } @Override public boolean equals(Object obj) { if (!(obj instanceof Feature)) return false; Feature other = (Feature) obj; if (this.name == null) { if (other.name != null) return false; } else { if (!this.name.equals(other.name)) return false; } if (minSupportedVersion != other.minSupportedVersion) return false; if (maxSupportedVersion != other.maxSupportedVersion) return false; return MessageUtil.compareRawTaggedFields(_unknownTaggedFields, other._unknownTaggedFields); } @Override public int hashCode() { int hashCode = 0; hashCode = 31 * hashCode + (name == null ? 0 : name.hashCode()); return hashCode; } @Override public Feature duplicate() { Feature _duplicate = new Feature(); _duplicate.name = name; _duplicate.minSupportedVersion = minSupportedVersion; _duplicate.maxSupportedVersion = maxSupportedVersion; return _duplicate; } @Override public String toString() { return "Feature(" + "name=" + ((name == null) ? "null" : "'" + name.toString() + "'") + ", minSupportedVersion=" + minSupportedVersion + ", maxSupportedVersion=" + maxSupportedVersion + ")"; } public String name() { return this.name; } public short minSupportedVersion() { return this.minSupportedVersion; } public short maxSupportedVersion() { return this.maxSupportedVersion; } @Override public int next() { return this.next; } @Override public int prev() { return this.prev; } @Override public List<RawTaggedField> unknownTaggedFields() { if (_unknownTaggedFields == null) { _unknownTaggedFields = new ArrayList<>(0); } return _unknownTaggedFields; } public Feature setName(String v) { this.name = v; return this; } public Feature setMinSupportedVersion(short v) { this.minSupportedVersion = v; return this; } public Feature setMaxSupportedVersion(short v) { this.maxSupportedVersion = v; return this; } @Override public void setNext(int v) { this.next = v; } @Override public void setPrev(int v) { this.prev = v; } } public static class FeatureCollection extends ImplicitLinkedHashMultiCollection<Feature> { public FeatureCollection() { super(); } public FeatureCollection(int expectedNumElements) { super(expectedNumElements); } public FeatureCollection(Iterator<Feature> iterator) { super(iterator); } public Feature find(String name) { Feature _key = new Feature(); _key.setName(name); return find(_key); } public List<Feature> findAll(String name) { Feature _key = new Feature(); _key.setName(name); return findAll(_key); } public FeatureCollection duplicate() { FeatureCollection _duplicate = new FeatureCollection(size()); for (Feature _element : this) { _duplicate.add(_element.duplicate()); } return _duplicate; } } }