index int64 | repo_id string | file_path string | content string |
|---|---|---|---|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterClientQuotasResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.apache.kafka.common.message.AlterClientQuotasResponseData;
import org.apache.kafka.common.message.AlterClientQuotasResponseData.EntityData;
import org.apache.kafka.common.message.AlterClientQuotasResponseData.EntryData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.quota.ClientQuotaEntity;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class AlterClientQuotasResponse extends AbstractResponse {
private final AlterClientQuotasResponseData data;
public AlterClientQuotasResponse(AlterClientQuotasResponseData data) {
super(ApiKeys.ALTER_CLIENT_QUOTAS);
this.data = data;
}
public void complete(Map<ClientQuotaEntity, KafkaFutureImpl<Void>> futures) {
for (EntryData entryData : data.entries()) {
Map<String, String> entityEntries = new HashMap<>(entryData.entity().size());
for (EntityData entityData : entryData.entity()) {
entityEntries.put(entityData.entityType(), entityData.entityName());
}
ClientQuotaEntity entity = new ClientQuotaEntity(entityEntries);
KafkaFutureImpl<Void> future = futures.get(entity);
if (future == null) {
throw new IllegalArgumentException("Future map must contain entity " + entity);
}
Errors error = Errors.forCode(entryData.errorCode());
if (error == Errors.NONE) {
future.complete(null);
} else {
future.completeExceptionally(error.exception(entryData.errorMessage()));
}
}
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new HashMap<>();
data.entries().forEach(entry ->
updateErrorCounts(counts, Errors.forCode(entry.errorCode()))
);
return counts;
}
@Override
public AlterClientQuotasResponseData data() {
return data;
}
private static List<EntityData> toEntityData(ClientQuotaEntity entity) {
List<AlterClientQuotasResponseData.EntityData> entityData = new ArrayList<>(entity.entries().size());
for (Map.Entry<String, String> entry : entity.entries().entrySet()) {
entityData.add(new AlterClientQuotasResponseData.EntityData()
.setEntityType(entry.getKey())
.setEntityName(entry.getValue()));
}
return entityData;
}
public static AlterClientQuotasResponse parse(ByteBuffer buffer, short version) {
return new AlterClientQuotasResponse(new AlterClientQuotasResponseData(new ByteBufferAccessor(buffer), version));
}
public static AlterClientQuotasResponse fromQuotaEntities(Map<ClientQuotaEntity, ApiError> result, int throttleTimeMs) {
List<EntryData> entries = new ArrayList<>(result.size());
for (Map.Entry<ClientQuotaEntity, ApiError> entry : result.entrySet()) {
ApiError e = entry.getValue();
entries.add(new EntryData()
.setErrorCode(e.error().code())
.setErrorMessage(e.message())
.setEntity(toEntityData(entry.getKey())));
}
return new AlterClientQuotasResponse(new AlterClientQuotasResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setEntries(entries));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterConfigsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.message.AlterConfigsRequestData;
import org.apache.kafka.common.message.AlterConfigsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
public class AlterConfigsRequest extends AbstractRequest {
public static class Config {
private final Collection<ConfigEntry> entries;
public Config(Collection<ConfigEntry> entries) {
this.entries = Objects.requireNonNull(entries, "entries");
}
public Collection<ConfigEntry> entries() {
return entries;
}
}
public static class ConfigEntry {
private final String name;
private final String value;
public ConfigEntry(String name, String value) {
this.name = Objects.requireNonNull(name, "name");
this.value = Objects.requireNonNull(value, "value");
}
public String name() {
return name;
}
public String value() {
return value;
}
}
public static class Builder extends AbstractRequest.Builder<AlterConfigsRequest> {
private final AlterConfigsRequestData data = new AlterConfigsRequestData();
public Builder(Map<ConfigResource, Config> configs, boolean validateOnly) {
super(ApiKeys.ALTER_CONFIGS);
Objects.requireNonNull(configs, "configs");
for (Map.Entry<ConfigResource, Config> entry : configs.entrySet()) {
AlterConfigsRequestData.AlterConfigsResource resource =
new AlterConfigsRequestData.AlterConfigsResource()
.setResourceName(entry.getKey().name())
.setResourceType(entry.getKey().type().id());
for (ConfigEntry x : entry.getValue().entries) {
resource.configs().add(new AlterConfigsRequestData.AlterableConfig()
.setName(x.name())
.setValue(x.value()));
}
this.data.resources().add(resource);
}
this.data.setValidateOnly(validateOnly);
}
@Override
public AlterConfigsRequest build(short version) {
return new AlterConfigsRequest(data, version);
}
}
private final AlterConfigsRequestData data;
public AlterConfigsRequest(AlterConfigsRequestData data, short version) {
super(ApiKeys.ALTER_CONFIGS, version);
this.data = data;
}
public Map<ConfigResource, Config> configs() {
return data.resources().stream().collect(Collectors.toMap(
resource -> new ConfigResource(
ConfigResource.Type.forId(resource.resourceType()),
resource.resourceName()),
resource -> new Config(resource.configs().stream()
.map(entry -> new ConfigEntry(entry.name(), entry.value()))
.collect(Collectors.toList()))));
}
public boolean validateOnly() {
return data.validateOnly();
}
@Override
public AlterConfigsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ApiError error = ApiError.fromThrowable(e);
AlterConfigsResponseData data = new AlterConfigsResponseData()
.setThrottleTimeMs(throttleTimeMs);
for (AlterConfigsRequestData.AlterConfigsResource resource : this.data.resources()) {
data.responses().add(new AlterConfigsResponseData.AlterConfigsResourceResponse()
.setResourceType(resource.resourceType())
.setResourceName(resource.resourceName())
.setErrorMessage(error.message())
.setErrorCode(error.error().code()));
}
return new AlterConfigsResponse(data);
}
public static AlterConfigsRequest parse(ByteBuffer buffer, short version) {
return new AlterConfigsRequest(new AlterConfigsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterConfigsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.message.AlterConfigsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.stream.Collectors;
public class AlterConfigsResponse extends AbstractResponse {
private final AlterConfigsResponseData data;
public AlterConfigsResponse(AlterConfigsResponseData data) {
super(ApiKeys.ALTER_CONFIGS);
this.data = data;
}
public Map<ConfigResource, ApiError> errors() {
return data.responses().stream().collect(Collectors.toMap(
response -> new ConfigResource(
ConfigResource.Type.forId(response.resourceType()),
response.resourceName()),
response -> new ApiError(Errors.forCode(response.errorCode()), response.errorMessage())
));
}
@Override
public Map<Errors, Integer> errorCounts() {
return apiErrorCounts(errors());
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public AlterConfigsResponseData data() {
return data;
}
public static AlterConfigsResponse parse(ByteBuffer buffer, short version) {
return new AlterConfigsResponse(new AlterConfigsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterPartitionReassignmentsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData;
import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic;
import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData;
import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse;
import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
public class AlterPartitionReassignmentsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<AlterPartitionReassignmentsRequest> {
private final AlterPartitionReassignmentsRequestData data;
public Builder(AlterPartitionReassignmentsRequestData data) {
super(ApiKeys.ALTER_PARTITION_REASSIGNMENTS);
this.data = data;
}
@Override
public AlterPartitionReassignmentsRequest build(short version) {
return new AlterPartitionReassignmentsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final AlterPartitionReassignmentsRequestData data;
private AlterPartitionReassignmentsRequest(AlterPartitionReassignmentsRequestData data, short version) {
super(ApiKeys.ALTER_PARTITION_REASSIGNMENTS, version);
this.data = data;
}
public static AlterPartitionReassignmentsRequest parse(ByteBuffer buffer, short version) {
return new AlterPartitionReassignmentsRequest(new AlterPartitionReassignmentsRequestData(
new ByteBufferAccessor(buffer), version), version);
}
public AlterPartitionReassignmentsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ApiError apiError = ApiError.fromThrowable(e);
List<ReassignableTopicResponse> topicResponses = new ArrayList<>();
for (ReassignableTopic topic : data.topics()) {
List<ReassignablePartitionResponse> partitionResponses = topic.partitions().stream().map(partition ->
new ReassignablePartitionResponse()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message())
).collect(Collectors.toList());
topicResponses.add(
new ReassignableTopicResponse()
.setName(topic.name())
.setPartitions(partitionResponses)
);
}
AlterPartitionReassignmentsResponseData responseData = new AlterPartitionReassignmentsResponseData()
.setResponses(topicResponses)
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message())
.setThrottleTimeMs(throttleTimeMs);
return new AlterPartitionReassignmentsResponse(responseData);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterPartitionReassignmentsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class AlterPartitionReassignmentsResponse extends AbstractResponse {
private final AlterPartitionReassignmentsResponseData data;
public AlterPartitionReassignmentsResponse(AlterPartitionReassignmentsResponseData data) {
super(ApiKeys.ALTER_PARTITION_REASSIGNMENTS);
this.data = data;
}
public static AlterPartitionReassignmentsResponse parse(ByteBuffer buffer, short version) {
return new AlterPartitionReassignmentsResponse(
new AlterPartitionReassignmentsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public AlterPartitionReassignmentsResponseData data() {
return data;
}
@Override
public boolean shouldClientThrottle(short version) {
return true;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new HashMap<>();
updateErrorCounts(counts, Errors.forCode(data.errorCode()));
data.responses().forEach(topicResponse ->
topicResponse.partitions().forEach(partitionResponse ->
updateErrorCounts(counts, Errors.forCode(partitionResponse.errorCode()))
));
return counts;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterPartitionRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.AlterPartitionRequestData;
import org.apache.kafka.common.message.AlterPartitionRequestData.BrokerState;
import org.apache.kafka.common.message.AlterPartitionResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
public class AlterPartitionRequest extends AbstractRequest {
private final AlterPartitionRequestData data;
public AlterPartitionRequest(AlterPartitionRequestData data, short apiVersion) {
super(ApiKeys.ALTER_PARTITION, apiVersion);
this.data = data;
}
@Override
public AlterPartitionRequestData data() {
return data;
}
/**
* Get an error response for a request with specified throttle time in the response if applicable
*/
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new AlterPartitionResponse(new AlterPartitionResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(Errors.forException(e).code()));
}
public static AlterPartitionRequest parse(ByteBuffer buffer, short version) {
return new AlterPartitionRequest(new AlterPartitionRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static class Builder extends AbstractRequest.Builder<AlterPartitionRequest> {
private final AlterPartitionRequestData data;
/**
* Constructs a builder for AlterPartitionRequest.
*
* @param data The data to be sent. Note that because the version of the
* request is not known at this time, it is expected that all
* topics have a topic id and a topic name set.
* @param canUseTopicIds True if version 2 and above can be used.
*/
public Builder(AlterPartitionRequestData data, boolean canUseTopicIds) {
super(
ApiKeys.ALTER_PARTITION,
ApiKeys.ALTER_PARTITION.oldestVersion(),
// Version 1 is the maximum version that can be used without topic ids.
canUseTopicIds ? ApiKeys.ALTER_PARTITION.latestVersion() : 1
);
this.data = data;
}
@Override
public AlterPartitionRequest build(short version) {
if (version < 3) {
data.topics().forEach(topicData -> {
topicData.partitions().forEach(partitionData -> {
List<Integer> newIsr = new ArrayList<>(partitionData.newIsrWithEpochs().size());
partitionData.newIsrWithEpochs().forEach(brokerState -> {
newIsr.add(brokerState.brokerId());
});
partitionData.setNewIsr(newIsr);
partitionData.setNewIsrWithEpochs(Collections.emptyList());
});
});
}
return new AlterPartitionRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
public static List<BrokerState> newIsrToSimpleNewIsrWithBrokerEpochs(List<Integer> newIsr) {
return newIsr.stream().map(brokerId -> new BrokerState().setBrokerId(brokerId)).collect(Collectors.toList());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterPartitionResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.AlterPartitionResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class AlterPartitionResponse extends AbstractResponse {
private final AlterPartitionResponseData data;
public AlterPartitionResponse(AlterPartitionResponseData data) {
super(ApiKeys.ALTER_PARTITION);
this.data = data;
}
@Override
public AlterPartitionResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new HashMap<>();
updateErrorCounts(counts, Errors.forCode(data.errorCode()));
data.topics().forEach(topicResponse -> topicResponse.partitions().forEach(partitionResponse -> {
updateErrorCounts(counts, Errors.forCode(partitionResponse.errorCode()));
}));
return counts;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public static AlterPartitionResponse parse(ByteBuffer buffer, short version) {
return new AlterPartitionResponse(new AlterPartitionResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterReplicaLogDirsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData;
import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData;
import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirTopicResult;
public class AlterReplicaLogDirsRequest extends AbstractRequest {
private final AlterReplicaLogDirsRequestData data;
public static class Builder extends AbstractRequest.Builder<AlterReplicaLogDirsRequest> {
private final AlterReplicaLogDirsRequestData data;
public Builder(AlterReplicaLogDirsRequestData data) {
super(ApiKeys.ALTER_REPLICA_LOG_DIRS);
this.data = data;
}
@Override
public AlterReplicaLogDirsRequest build(short version) {
return new AlterReplicaLogDirsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
public AlterReplicaLogDirsRequest(AlterReplicaLogDirsRequestData data, short version) {
super(ApiKeys.ALTER_REPLICA_LOG_DIRS, version);
this.data = data;
}
@Override
public AlterReplicaLogDirsRequestData data() {
return data;
}
public AlterReplicaLogDirsResponse getErrorResponse(int throttleTimeMs, Throwable e) {
AlterReplicaLogDirsResponseData data = new AlterReplicaLogDirsResponseData();
data.setResults(this.data.dirs().stream().flatMap(alterDir ->
alterDir.topics().stream().map(topic ->
new AlterReplicaLogDirTopicResult()
.setTopicName(topic.name())
.setPartitions(topic.partitions().stream().map(partitionId ->
new AlterReplicaLogDirsResponseData.AlterReplicaLogDirPartitionResult()
.setErrorCode(Errors.forException(e).code())
.setPartitionIndex(partitionId)).collect(Collectors.toList())))).collect(Collectors.toList()));
return new AlterReplicaLogDirsResponse(data.setThrottleTimeMs(throttleTimeMs));
}
public Map<TopicPartition, String> partitionDirs() {
Map<TopicPartition, String> result = new HashMap<>();
data.dirs().forEach(alterDir ->
alterDir.topics().forEach(topic ->
topic.partitions().forEach(partition ->
result.put(new TopicPartition(topic.name(), partition.intValue()), alterDir.path())))
);
return result;
}
public static AlterReplicaLogDirsRequest parse(ByteBuffer buffer, short version) {
return new AlterReplicaLogDirsRequest(new AlterReplicaLogDirsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterReplicaLogDirsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
/**
* Possible error codes:
*
* {@link Errors#LOG_DIR_NOT_FOUND}
* {@link Errors#KAFKA_STORAGE_ERROR}
* {@link Errors#REPLICA_NOT_AVAILABLE}
* {@link Errors#UNKNOWN_SERVER_ERROR}
*/
public class AlterReplicaLogDirsResponse extends AbstractResponse {
private final AlterReplicaLogDirsResponseData data;
public AlterReplicaLogDirsResponse(AlterReplicaLogDirsResponseData data) {
super(ApiKeys.ALTER_REPLICA_LOG_DIRS);
this.data = data;
}
@Override
public AlterReplicaLogDirsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
data.results().forEach(topicResult ->
topicResult.partitions().forEach(partitionResult ->
updateErrorCounts(errorCounts, Errors.forCode(partitionResult.errorCode()))));
return errorCounts;
}
public static AlterReplicaLogDirsResponse parse(ByteBuffer buffer, short version) {
return new AlterReplicaLogDirsResponse(new AlterReplicaLogDirsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterUserScramCredentialsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.AlterUserScramCredentialsRequestData;
import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.stream.Stream;
public class AlterUserScramCredentialsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<AlterUserScramCredentialsRequest> {
private final AlterUserScramCredentialsRequestData data;
public Builder(AlterUserScramCredentialsRequestData data) {
super(ApiKeys.ALTER_USER_SCRAM_CREDENTIALS);
this.data = data;
}
@Override
public AlterUserScramCredentialsRequest build(short version) {
return new AlterUserScramCredentialsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final AlterUserScramCredentialsRequestData data;
private AlterUserScramCredentialsRequest(AlterUserScramCredentialsRequestData data, short version) {
super(ApiKeys.ALTER_USER_SCRAM_CREDENTIALS, version);
this.data = data;
}
public static AlterUserScramCredentialsRequest parse(ByteBuffer buffer, short version) {
return new AlterUserScramCredentialsRequest(new AlterUserScramCredentialsRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public AlterUserScramCredentialsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ApiError apiError = ApiError.fromThrowable(e);
short errorCode = apiError.error().code();
String errorMessage = apiError.message();
Set<String> users = Stream.concat(
this.data.deletions().stream().map(deletion -> deletion.name()),
this.data.upsertions().stream().map(upsertion -> upsertion.name()))
.collect(Collectors.toSet());
List<AlterUserScramCredentialsResponseData.AlterUserScramCredentialsResult> results =
users.stream().sorted().map(user ->
new AlterUserScramCredentialsResponseData.AlterUserScramCredentialsResult()
.setUser(user)
.setErrorCode(errorCode)
.setErrorMessage(errorMessage))
.collect(Collectors.toList());
return new AlterUserScramCredentialsResponse(new AlterUserScramCredentialsResponseData().setResults(results));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/AlterUserScramCredentialsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
public class AlterUserScramCredentialsResponse extends AbstractResponse {
private final AlterUserScramCredentialsResponseData data;
public AlterUserScramCredentialsResponse(AlterUserScramCredentialsResponseData responseData) {
super(ApiKeys.ALTER_USER_SCRAM_CREDENTIALS);
this.data = responseData;
}
@Override
public AlterUserScramCredentialsResponseData data() {
return data;
}
@Override
public boolean shouldClientThrottle(short version) {
return true;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(data.results().stream().map(r -> Errors.forCode(r.errorCode())));
}
public static AlterUserScramCredentialsResponse parse(ByteBuffer buffer, short version) {
return new AlterUserScramCredentialsResponse(new AlterUserScramCredentialsResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ApiError.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.ApiException;
import org.apache.kafka.common.protocol.Errors;
import java.util.Objects;
/**
* Encapsulates an error code (via the Errors enum) and an optional message. Generally, the optional message is only
* defined if it adds information over the default message associated with the error code.
*
* This is an internal class (like every class in the requests package).
*/
public class ApiError {
public static final ApiError NONE = new ApiError(Errors.NONE, null);
private final Errors error;
private final String message;
public static ApiError fromThrowable(Throwable t) {
// Avoid populating the error message if it's a generic one. Also don't populate error
// message for UNKNOWN_SERVER_ERROR to ensure we don't leak sensitive information.
Throwable throwableToBeEncoded = Errors.maybeUnwrapException(t);
Errors error = Errors.forException(throwableToBeEncoded);
String message = error == Errors.UNKNOWN_SERVER_ERROR ||
error.message().equals(throwableToBeEncoded.getMessage()) ? null : throwableToBeEncoded.getMessage();
return new ApiError(error, message);
}
public ApiError(Errors error) {
this(error, error.message());
}
public ApiError(Errors error, String message) {
this.error = error;
this.message = message;
}
public ApiError(short code, String message) {
this.error = Errors.forCode(code);
this.message = message;
}
public boolean is(Errors error) {
return this.error == error;
}
public boolean isFailure() {
return !isSuccess();
}
public boolean isSuccess() {
return is(Errors.NONE);
}
public Errors error() {
return error;
}
/**
* Return the optional error message or null. Consider using {@link #messageWithFallback()} instead.
*/
public String message() {
return message;
}
/**
* If `message` is defined, return it. Otherwise fallback to the default error message associated with the error
* code.
*/
public String messageWithFallback() {
if (message == null)
return error.message();
return message;
}
public ApiException exception() {
return error.exception(message);
}
@Override
public int hashCode() {
return Objects.hash(error, message);
}
@Override
public boolean equals(Object o) {
if (!(o instanceof ApiError)) {
return false;
}
ApiError other = (ApiError) o;
return Objects.equals(error, other.error) &&
Objects.equals(message, other.message);
}
@Override
public String toString() {
return "ApiError(error=" + error + ", message=" + message + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ApiVersionsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.util.regex.Pattern;
import org.apache.kafka.common.message.ApiVersionsRequestData;
import org.apache.kafka.common.message.ApiVersionsResponseData;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.utils.AppInfoParser;
import java.nio.ByteBuffer;
public class ApiVersionsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<ApiVersionsRequest> {
private static final String DEFAULT_CLIENT_SOFTWARE_NAME = "apache-kafka-java";
private static final ApiVersionsRequestData DATA = new ApiVersionsRequestData()
.setClientSoftwareName(DEFAULT_CLIENT_SOFTWARE_NAME)
.setClientSoftwareVersion(AppInfoParser.getVersion());
public Builder() {
super(ApiKeys.API_VERSIONS);
}
public Builder(short version) {
super(ApiKeys.API_VERSIONS, version);
}
@Override
public ApiVersionsRequest build(short version) {
return new ApiVersionsRequest(DATA, version);
}
@Override
public String toString() {
return DATA.toString();
}
}
private static final Pattern SOFTWARE_NAME_VERSION_PATTERN = Pattern.compile("[a-zA-Z0-9](?:[a-zA-Z0-9\\-.]*[a-zA-Z0-9])?");
private final Short unsupportedRequestVersion;
private final ApiVersionsRequestData data;
public ApiVersionsRequest(ApiVersionsRequestData data, short version) {
this(data, version, null);
}
public ApiVersionsRequest(ApiVersionsRequestData data, short version, Short unsupportedRequestVersion) {
super(ApiKeys.API_VERSIONS, version);
this.data = data;
// Unlike other request types, the broker handles ApiVersion requests with higher versions than
// supported. It does so by treating the request as if it were v0 and returns a response using
// the v0 response schema. The reason for this is that the client does not yet know what versions
// a broker supports when this request is sent, so instead of assuming the lowest supported version,
// it can use the most recent version and only fallback to the old version when necessary.
this.unsupportedRequestVersion = unsupportedRequestVersion;
}
public boolean hasUnsupportedRequestVersion() {
return unsupportedRequestVersion != null;
}
public boolean isValid() {
if (version() >= 3) {
return SOFTWARE_NAME_VERSION_PATTERN.matcher(data.clientSoftwareName()).matches() &&
SOFTWARE_NAME_VERSION_PATTERN.matcher(data.clientSoftwareVersion()).matches();
} else {
return true;
}
}
@Override
public ApiVersionsRequestData data() {
return data;
}
@Override
public ApiVersionsResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ApiVersionsResponseData data = new ApiVersionsResponseData()
.setErrorCode(Errors.forException(e).code());
if (version() >= 1) {
data.setThrottleTimeMs(throttleTimeMs);
}
// Starting from Apache Kafka 2.4 (KIP-511), ApiKeys field is populated with the supported
// versions of the ApiVersionsRequest when an UNSUPPORTED_VERSION error is returned.
if (Errors.forException(e) == Errors.UNSUPPORTED_VERSION) {
ApiVersionCollection apiKeys = new ApiVersionCollection();
apiKeys.add(ApiVersionsResponse.toApiVersion(ApiKeys.API_VERSIONS));
data.setApiKeys(apiKeys);
}
return new ApiVersionsResponse(data);
}
public static ApiVersionsRequest parse(ByteBuffer buffer, short version) {
return new ApiVersionsRequest(new ApiVersionsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ApiVersionsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.clients.NodeApiVersions;
import org.apache.kafka.common.feature.Features;
import org.apache.kafka.common.feature.SupportedVersionRange;
import org.apache.kafka.common.message.ApiMessageType;
import org.apache.kafka.common.message.ApiMessageType.ListenerType;
import org.apache.kafka.common.message.ApiVersionsResponseData;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection;
import org.apache.kafka.common.message.ApiVersionsResponseData.FinalizedFeatureKey;
import org.apache.kafka.common.message.ApiVersionsResponseData.FinalizedFeatureKeyCollection;
import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey;
import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKeyCollection;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordVersion;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
/**
* Possible error codes:
* - {@link Errors#UNSUPPORTED_VERSION}
* - {@link Errors#INVALID_REQUEST}
*/
public class ApiVersionsResponse extends AbstractResponse {
public static final long UNKNOWN_FINALIZED_FEATURES_EPOCH = -1L;
private final ApiVersionsResponseData data;
public ApiVersionsResponse(ApiVersionsResponseData data) {
super(ApiKeys.API_VERSIONS);
this.data = data;
}
@Override
public ApiVersionsResponseData data() {
return data;
}
public ApiVersion apiVersion(short apiKey) {
return data.apiKeys().find(apiKey);
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(this.data.errorCode()));
}
@Override
public int throttleTimeMs() {
return this.data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
public boolean zkMigrationReady() {
return data.zkMigrationReady();
}
public static ApiVersionsResponse parse(ByteBuffer buffer, short version) {
// Fallback to version 0 for ApiVersions response. If a client sends an ApiVersionsRequest
// using a version higher than that supported by the broker, a version 0 response is sent
// to the client indicating UNSUPPORTED_VERSION. When the client receives the response, it
// falls back while parsing it which means that the version received by this
// method is not necessarily the real one. It may be version 0 as well.
int prev = buffer.position();
try {
return new ApiVersionsResponse(new ApiVersionsResponseData(new ByteBufferAccessor(buffer), version));
} catch (RuntimeException e) {
buffer.position(prev);
if (version != 0)
return new ApiVersionsResponse(new ApiVersionsResponseData(new ByteBufferAccessor(buffer), (short) 0));
else
throw e;
}
}
public static ApiVersionsResponse defaultApiVersionsResponse(
ApiMessageType.ListenerType listenerType
) {
return defaultApiVersionsResponse(0, listenerType);
}
public static ApiVersionsResponse defaultApiVersionsResponse(
int throttleTimeMs,
ApiMessageType.ListenerType listenerType
) {
return createApiVersionsResponse(
throttleTimeMs,
filterApis(RecordVersion.current(), listenerType, true),
Features.emptySupportedFeatures(),
false
);
}
public static ApiVersionsResponse defaultApiVersionsResponse(
int throttleTimeMs,
ApiMessageType.ListenerType listenerType,
boolean enableUnstableLastVersion
) {
return createApiVersionsResponse(
throttleTimeMs,
filterApis(RecordVersion.current(), listenerType, enableUnstableLastVersion),
Features.emptySupportedFeatures(),
false
);
}
public static ApiVersionsResponse createApiVersionsResponse(
int throttleTimeMs,
ApiVersionCollection apiVersions
) {
return createApiVersionsResponse(throttleTimeMs, apiVersions, Features.emptySupportedFeatures(), false);
}
public static ApiVersionsResponse createApiVersionsResponse(
int throttleTimeMs,
ApiVersionCollection apiVersions,
Features<SupportedVersionRange> latestSupportedFeatures,
boolean zkMigrationEnabled
) {
return createApiVersionsResponse(
throttleTimeMs,
apiVersions,
latestSupportedFeatures,
Collections.emptyMap(),
UNKNOWN_FINALIZED_FEATURES_EPOCH,
zkMigrationEnabled);
}
public static ApiVersionsResponse createApiVersionsResponse(
int throttleTimeMs,
RecordVersion minRecordVersion,
Features<SupportedVersionRange> latestSupportedFeatures,
Map<String, Short> finalizedFeatures,
long finalizedFeaturesEpoch,
NodeApiVersions controllerApiVersions,
ListenerType listenerType,
boolean enableUnstableLastVersion,
boolean zkMigrationEnabled
) {
ApiVersionCollection apiKeys;
if (controllerApiVersions != null) {
apiKeys = intersectForwardableApis(
listenerType,
minRecordVersion,
controllerApiVersions.allSupportedApiVersions(),
enableUnstableLastVersion
);
} else {
apiKeys = filterApis(
minRecordVersion,
listenerType,
enableUnstableLastVersion
);
}
return createApiVersionsResponse(
throttleTimeMs,
apiKeys,
latestSupportedFeatures,
finalizedFeatures,
finalizedFeaturesEpoch,
zkMigrationEnabled
);
}
public static ApiVersionsResponse createApiVersionsResponse(
int throttleTimeMs,
ApiVersionCollection apiVersions,
Features<SupportedVersionRange> latestSupportedFeatures,
Map<String, Short> finalizedFeatures,
long finalizedFeaturesEpoch,
boolean zkMigrationEnabled
) {
return new ApiVersionsResponse(
createApiVersionsResponseData(
throttleTimeMs,
Errors.NONE,
apiVersions,
latestSupportedFeatures,
finalizedFeatures,
finalizedFeaturesEpoch,
zkMigrationEnabled
)
);
}
public static ApiVersionCollection filterApis(
RecordVersion minRecordVersion,
ApiMessageType.ListenerType listenerType
) {
return filterApis(minRecordVersion, listenerType, false);
}
public static ApiVersionCollection filterApis(
RecordVersion minRecordVersion,
ApiMessageType.ListenerType listenerType,
boolean enableUnstableLastVersion
) {
ApiVersionCollection apiKeys = new ApiVersionCollection();
for (ApiKeys apiKey : ApiKeys.apisForListener(listenerType)) {
if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) {
apiKey.toApiVersion(enableUnstableLastVersion).ifPresent(apiKeys::add);
}
}
return apiKeys;
}
public static ApiVersionCollection collectApis(
Set<ApiKeys> apiKeys,
boolean enableUnstableLastVersion
) {
ApiVersionCollection res = new ApiVersionCollection();
for (ApiKeys apiKey : apiKeys) {
apiKey.toApiVersion(enableUnstableLastVersion).ifPresent(res::add);
}
return res;
}
/**
* Find the common range of supported API versions between the locally
* known range and that of another set.
*
* @param listenerType the listener type which constrains the set of exposed APIs
* @param minRecordVersion min inter broker magic
* @param activeControllerApiVersions controller ApiVersions
* @param enableUnstableLastVersion whether unstable versions should be advertised or not
* @return commonly agreed ApiVersion collection
*/
public static ApiVersionCollection intersectForwardableApis(
final ApiMessageType.ListenerType listenerType,
final RecordVersion minRecordVersion,
final Map<ApiKeys, ApiVersion> activeControllerApiVersions,
boolean enableUnstableLastVersion
) {
ApiVersionCollection apiKeys = new ApiVersionCollection();
for (ApiKeys apiKey : ApiKeys.apisForListener(listenerType)) {
if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) {
final Optional<ApiVersion> brokerApiVersion = apiKey.toApiVersion(enableUnstableLastVersion);
if (!brokerApiVersion.isPresent()) {
// Broker does not support this API key.
continue;
}
final ApiVersion finalApiVersion;
if (!apiKey.forwardable) {
finalApiVersion = brokerApiVersion.get();
} else {
Optional<ApiVersion> intersectVersion = intersect(
brokerApiVersion.get(),
activeControllerApiVersions.getOrDefault(apiKey, null)
);
if (intersectVersion.isPresent()) {
finalApiVersion = intersectVersion.get();
} else {
// Controller doesn't support this API key, or there is no intersection.
continue;
}
}
apiKeys.add(finalApiVersion.duplicate());
}
}
return apiKeys;
}
private static ApiVersionsResponseData createApiVersionsResponseData(
final int throttleTimeMs,
final Errors error,
final ApiVersionCollection apiKeys,
final Features<SupportedVersionRange> latestSupportedFeatures,
final Map<String, Short> finalizedFeatures,
final long finalizedFeaturesEpoch,
final boolean zkMigrationEnabled
) {
final ApiVersionsResponseData data = new ApiVersionsResponseData();
data.setThrottleTimeMs(throttleTimeMs);
data.setErrorCode(error.code());
data.setApiKeys(apiKeys);
data.setSupportedFeatures(createSupportedFeatureKeys(latestSupportedFeatures));
data.setFinalizedFeatures(createFinalizedFeatureKeys(finalizedFeatures));
data.setFinalizedFeaturesEpoch(finalizedFeaturesEpoch);
data.setZkMigrationReady(zkMigrationEnabled);
return data;
}
private static SupportedFeatureKeyCollection createSupportedFeatureKeys(
Features<SupportedVersionRange> latestSupportedFeatures) {
SupportedFeatureKeyCollection converted = new SupportedFeatureKeyCollection();
for (Map.Entry<String, SupportedVersionRange> feature : latestSupportedFeatures.features().entrySet()) {
final SupportedFeatureKey key = new SupportedFeatureKey();
final SupportedVersionRange versionRange = feature.getValue();
key.setName(feature.getKey());
key.setMinVersion(versionRange.min());
key.setMaxVersion(versionRange.max());
converted.add(key);
}
return converted;
}
private static FinalizedFeatureKeyCollection createFinalizedFeatureKeys(
Map<String, Short> finalizedFeatures) {
FinalizedFeatureKeyCollection converted = new FinalizedFeatureKeyCollection();
for (Map.Entry<String, Short> feature : finalizedFeatures.entrySet()) {
final FinalizedFeatureKey key = new FinalizedFeatureKey();
final short versionLevel = feature.getValue();
key.setName(feature.getKey());
key.setMinVersionLevel(versionLevel);
key.setMaxVersionLevel(versionLevel);
converted.add(key);
}
return converted;
}
public static Optional<ApiVersion> intersect(ApiVersion thisVersion,
ApiVersion other) {
if (thisVersion == null || other == null) return Optional.empty();
if (thisVersion.apiKey() != other.apiKey())
throw new IllegalArgumentException("thisVersion.apiKey: " + thisVersion.apiKey()
+ " must be equal to other.apiKey: " + other.apiKey());
short minVersion = (short) Math.max(thisVersion.minVersion(), other.minVersion());
short maxVersion = (short) Math.min(thisVersion.maxVersion(), other.maxVersion());
return minVersion > maxVersion
? Optional.empty()
: Optional.of(new ApiVersion()
.setApiKey(thisVersion.apiKey())
.setMinVersion(minVersion)
.setMaxVersion(maxVersion));
}
public static ApiVersion toApiVersion(ApiKeys apiKey) {
return new ApiVersion()
.setApiKey(apiKey.id)
.setMinVersion(apiKey.oldestVersion())
.setMaxVersion(apiKey.latestVersion());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/BeginQuorumEpochRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.BeginQuorumEpochRequestData;
import org.apache.kafka.common.message.BeginQuorumEpochResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
public class BeginQuorumEpochRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<BeginQuorumEpochRequest> {
private final BeginQuorumEpochRequestData data;
public Builder(BeginQuorumEpochRequestData data) {
super(ApiKeys.BEGIN_QUORUM_EPOCH);
this.data = data;
}
@Override
public BeginQuorumEpochRequest build(short version) {
return new BeginQuorumEpochRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final BeginQuorumEpochRequestData data;
private BeginQuorumEpochRequest(BeginQuorumEpochRequestData data, short version) {
super(ApiKeys.BEGIN_QUORUM_EPOCH, version);
this.data = data;
}
@Override
public BeginQuorumEpochRequestData data() {
return data;
}
@Override
public BeginQuorumEpochResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new BeginQuorumEpochResponse(new BeginQuorumEpochResponseData()
.setErrorCode(Errors.forException(e).code()));
}
public static BeginQuorumEpochRequest parse(ByteBuffer buffer, short version) {
return new BeginQuorumEpochRequest(new BeginQuorumEpochRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static BeginQuorumEpochRequestData singletonRequest(TopicPartition topicPartition,
int leaderEpoch,
int leaderId) {
return singletonRequest(topicPartition, null, leaderEpoch, leaderId);
}
public static BeginQuorumEpochRequestData singletonRequest(TopicPartition topicPartition,
String clusterId,
int leaderEpoch,
int leaderId) {
return new BeginQuorumEpochRequestData()
.setClusterId(clusterId)
.setTopics(Collections.singletonList(
new BeginQuorumEpochRequestData.TopicData()
.setTopicName(topicPartition.topic())
.setPartitions(Collections.singletonList(
new BeginQuorumEpochRequestData.PartitionData()
.setPartitionIndex(topicPartition.partition())
.setLeaderEpoch(leaderEpoch)
.setLeaderId(leaderId))))
);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.BeginQuorumEpochResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* Possible error codes.
*
* Top level errors:
* - {@link Errors#CLUSTER_AUTHORIZATION_FAILED}
* - {@link Errors#BROKER_NOT_AVAILABLE}
*
* Partition level errors:
* - {@link Errors#FENCED_LEADER_EPOCH}
* - {@link Errors#INVALID_REQUEST}
* - {@link Errors#INCONSISTENT_VOTER_SET}
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION}
*/
public class BeginQuorumEpochResponse extends AbstractResponse {
private final BeginQuorumEpochResponseData data;
public BeginQuorumEpochResponse(BeginQuorumEpochResponseData data) {
super(ApiKeys.BEGIN_QUORUM_EPOCH);
this.data = data;
}
public static BeginQuorumEpochResponseData singletonResponse(
Errors topLevelError,
TopicPartition topicPartition,
Errors partitionLevelError,
int leaderEpoch,
int leaderId
) {
return new BeginQuorumEpochResponseData()
.setErrorCode(topLevelError.code())
.setTopics(Collections.singletonList(
new BeginQuorumEpochResponseData.TopicData()
.setTopicName(topicPartition.topic())
.setPartitions(Collections.singletonList(
new BeginQuorumEpochResponseData.PartitionData()
.setErrorCode(partitionLevelError.code())
.setLeaderId(leaderId)
.setLeaderEpoch(leaderEpoch)
)))
);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errors = new HashMap<>();
errors.put(Errors.forCode(data.errorCode()), 1);
for (BeginQuorumEpochResponseData.TopicData topicResponse : data.topics()) {
for (BeginQuorumEpochResponseData.PartitionData partitionResponse : topicResponse.partitions()) {
errors.compute(Errors.forCode(partitionResponse.errorCode()),
(error, count) -> count == null ? 1 : count + 1);
}
}
return errors;
}
@Override
public BeginQuorumEpochResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
public static BeginQuorumEpochResponse parse(ByteBuffer buffer, short version) {
return new BeginQuorumEpochResponse(new BeginQuorumEpochResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/BrokerHeartbeatRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.BrokerHeartbeatRequestData;
import org.apache.kafka.common.message.BrokerHeartbeatResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class BrokerHeartbeatRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<BrokerHeartbeatRequest> {
private final BrokerHeartbeatRequestData data;
public Builder(BrokerHeartbeatRequestData data) {
super(ApiKeys.BROKER_HEARTBEAT);
this.data = data;
}
@Override
public BrokerHeartbeatRequest build(short version) {
return new BrokerHeartbeatRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final BrokerHeartbeatRequestData data;
public BrokerHeartbeatRequest(BrokerHeartbeatRequestData data, short version) {
super(ApiKeys.BROKER_HEARTBEAT, version);
this.data = data;
}
@Override
public BrokerHeartbeatRequestData data() {
return data;
}
@Override
public BrokerHeartbeatResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
return new BrokerHeartbeatResponse(new BrokerHeartbeatResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code()));
}
public static BrokerHeartbeatRequest parse(ByteBuffer buffer, short version) {
return new BrokerHeartbeatRequest(new BrokerHeartbeatRequestData(new ByteBufferAccessor(buffer), version),
version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/BrokerHeartbeatResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.BrokerHeartbeatResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class BrokerHeartbeatResponse extends AbstractResponse {
private final BrokerHeartbeatResponseData data;
public BrokerHeartbeatResponse(BrokerHeartbeatResponseData data) {
super(ApiKeys.BROKER_HEARTBEAT);
this.data = data;
}
@Override
public BrokerHeartbeatResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
errorCounts.put(Errors.forCode(data.errorCode()), 1);
return errorCounts;
}
public static BrokerHeartbeatResponse parse(ByteBuffer buffer, short version) {
return new BrokerHeartbeatResponse(new BrokerHeartbeatResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return true;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/BrokerRegistrationRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.BrokerRegistrationRequestData;
import org.apache.kafka.common.message.BrokerRegistrationResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class BrokerRegistrationRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<BrokerRegistrationRequest> {
private final BrokerRegistrationRequestData data;
public Builder(BrokerRegistrationRequestData data) {
super(ApiKeys.BROKER_REGISTRATION);
this.data = data;
}
@Override
public short oldestAllowedVersion() {
if (data.isMigratingZkBroker()) {
return (short) 1;
} else {
return (short) 0;
}
}
@Override
public BrokerRegistrationRequest build(short version) {
return new BrokerRegistrationRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final BrokerRegistrationRequestData data;
public BrokerRegistrationRequest(BrokerRegistrationRequestData data, short version) {
super(ApiKeys.BROKER_REGISTRATION, version);
this.data = data;
}
@Override
public BrokerRegistrationRequestData data() {
return data;
}
@Override
public BrokerRegistrationResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
return new BrokerRegistrationResponse(new BrokerRegistrationResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code()));
}
public static BrokerRegistrationRequest parse(ByteBuffer buffer, short version) {
return new BrokerRegistrationRequest(new BrokerRegistrationRequestData(new ByteBufferAccessor(buffer), version),
version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/BrokerRegistrationResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.BrokerRegistrationResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class BrokerRegistrationResponse extends AbstractResponse {
private final BrokerRegistrationResponseData data;
public BrokerRegistrationResponse(BrokerRegistrationResponseData data) {
super(ApiKeys.BROKER_REGISTRATION);
this.data = data;
}
@Override
public BrokerRegistrationResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
errorCounts.put(Errors.forCode(data.errorCode()), 1);
return errorCounts;
}
public static BrokerRegistrationResponse parse(ByteBuffer buffer, short version) {
return new BrokerRegistrationResponse(new BrokerRegistrationResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return true;
}
@Override
public String toString() {
return data.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ConsumerGroupHeartbeatRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData;
import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class ConsumerGroupHeartbeatRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<ConsumerGroupHeartbeatRequest> {
private final ConsumerGroupHeartbeatRequestData data;
public Builder(ConsumerGroupHeartbeatRequestData data) {
super(ApiKeys.CONSUMER_GROUP_HEARTBEAT);
this.data = data;
}
@Override
public ConsumerGroupHeartbeatRequest build(short version) {
return new ConsumerGroupHeartbeatRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final ConsumerGroupHeartbeatRequestData data;
public ConsumerGroupHeartbeatRequest(ConsumerGroupHeartbeatRequestData data, short version) {
super(ApiKeys.CONSUMER_GROUP_HEARTBEAT, version);
this.data = data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new ConsumerGroupHeartbeatResponse(
new ConsumerGroupHeartbeatResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(Errors.forException(e).code())
);
}
@Override
public ConsumerGroupHeartbeatRequestData data() {
return data;
}
public static ConsumerGroupHeartbeatRequest parse(ByteBuffer buffer, short version) {
return new ConsumerGroupHeartbeatRequest(new ConsumerGroupHeartbeatRequestData(
new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ConsumerGroupHeartbeatResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.Map;
/**
* Possible error codes.
*
* - {@link Errors#GROUP_AUTHORIZATION_FAILED}
* - {@link Errors#NOT_COORDINATOR}
* - {@link Errors#COORDINATOR_NOT_AVAILABLE}
* - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS}
* - {@link Errors#INVALID_REQUEST}
* - {@link Errors#UNKNOWN_MEMBER_ID}
* - {@link Errors#FENCED_MEMBER_EPOCH}
* - {@link Errors#UNSUPPORTED_ASSIGNOR}
* - {@link Errors#UNRELEASED_INSTANCE_ID}
* - {@link Errors#GROUP_MAX_SIZE_REACHED}
*/
public class ConsumerGroupHeartbeatResponse extends AbstractResponse {
private final ConsumerGroupHeartbeatResponseData data;
public ConsumerGroupHeartbeatResponse(ConsumerGroupHeartbeatResponseData data) {
super(ApiKeys.CONSUMER_GROUP_HEARTBEAT);
this.data = data;
}
@Override
public ConsumerGroupHeartbeatResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
return Collections.singletonMap(Errors.forCode(data.errorCode()), 1);
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public static ConsumerGroupHeartbeatResponse parse(ByteBuffer buffer, short version) {
return new ConsumerGroupHeartbeatResponse(new ConsumerGroupHeartbeatResponseData(
new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ControlledShutdownRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ControlledShutdownRequestData;
import org.apache.kafka.common.message.ControlledShutdownResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class ControlledShutdownRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<ControlledShutdownRequest> {
private final ControlledShutdownRequestData data;
public Builder(ControlledShutdownRequestData data, short desiredVersion) {
super(ApiKeys.CONTROLLED_SHUTDOWN, desiredVersion);
this.data = data;
}
@Override
public ControlledShutdownRequest build(short version) {
return new ControlledShutdownRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final ControlledShutdownRequestData data;
private ControlledShutdownRequest(ControlledShutdownRequestData data, short version) {
super(ApiKeys.CONTROLLED_SHUTDOWN, version);
this.data = data;
}
@Override
public ControlledShutdownResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ControlledShutdownResponseData data = new ControlledShutdownResponseData()
.setErrorCode(Errors.forException(e).code());
return new ControlledShutdownResponse(data);
}
public static ControlledShutdownRequest parse(ByteBuffer buffer, short version) {
return new ControlledShutdownRequest(new ControlledShutdownRequestData(new ByteBufferAccessor(buffer), version),
version);
}
@Override
public ControlledShutdownRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ControlledShutdownResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.ControlledShutdownResponseData;
import org.apache.kafka.common.message.ControlledShutdownResponseData.RemainingPartition;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.Set;
public class ControlledShutdownResponse extends AbstractResponse {
/**
* Possible error codes:
*
* UNKNOWN(-1) (this is because IllegalStateException may be thrown in `KafkaController.shutdownBroker`, it would be good to improve this)
* BROKER_NOT_AVAILABLE(8)
* STALE_CONTROLLER_EPOCH(11)
*/
private final ControlledShutdownResponseData data;
public ControlledShutdownResponse(ControlledShutdownResponseData data) {
super(ApiKeys.CONTROLLED_SHUTDOWN);
this.data = data;
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
public static ControlledShutdownResponse parse(ByteBuffer buffer, short version) {
return new ControlledShutdownResponse(new ControlledShutdownResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public ControlledShutdownResponseData data() {
return data;
}
public static ControlledShutdownResponse prepareResponse(Errors error, Set<TopicPartition> tps) {
ControlledShutdownResponseData data = new ControlledShutdownResponseData();
data.setErrorCode(error.code());
ControlledShutdownResponseData.RemainingPartitionCollection pSet = new ControlledShutdownResponseData.RemainingPartitionCollection();
tps.forEach(tp -> {
pSet.add(new RemainingPartition()
.setTopicName(tp.topic())
.setPartitionIndex(tp.partition()));
});
data.setRemainingPartitions(pSet);
return new ControlledShutdownResponse(data);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/CorrelationIdMismatchException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
/**
* Raised if the correlationId in a response header does not match
* the expected value from the request header.
*/
public class CorrelationIdMismatchException extends IllegalStateException {
private final int requestCorrelationId;
private final int responseCorrelationId;
public CorrelationIdMismatchException(
String message,
int requestCorrelationId,
int responseCorrelationId
) {
super(message);
this.requestCorrelationId = requestCorrelationId;
this.responseCorrelationId = responseCorrelationId;
}
public int requestCorrelationId() {
return requestCorrelationId;
}
public int responseCorrelationId() {
return responseCorrelationId;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/CreateAclsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.CreateAclsRequestData;
import org.apache.kafka.common.message.CreateAclsRequestData.AclCreation;
import org.apache.kafka.common.message.CreateAclsResponseData;
import org.apache.kafka.common.message.CreateAclsResponseData.AclCreationResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.resource.ResourceType;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
public class CreateAclsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<CreateAclsRequest> {
private final CreateAclsRequestData data;
public Builder(CreateAclsRequestData data) {
super(ApiKeys.CREATE_ACLS);
this.data = data;
}
@Override
public CreateAclsRequest build(short version) {
return new CreateAclsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final CreateAclsRequestData data;
CreateAclsRequest(CreateAclsRequestData data, short version) {
super(ApiKeys.CREATE_ACLS, version);
validate(data);
this.data = data;
}
public List<AclCreation> aclCreations() {
return data.creations();
}
@Override
public CreateAclsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable throwable) {
CreateAclsResponseData.AclCreationResult result = CreateAclsRequest.aclResult(throwable);
List<CreateAclsResponseData.AclCreationResult> results = Collections.nCopies(data.creations().size(), result);
return new CreateAclsResponse(new CreateAclsResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setResults(results));
}
public static CreateAclsRequest parse(ByteBuffer buffer, short version) {
return new CreateAclsRequest(new CreateAclsRequestData(new ByteBufferAccessor(buffer), version), version);
}
private void validate(CreateAclsRequestData data) {
if (version() == 0) {
final boolean unsupported = data.creations().stream().anyMatch(creation ->
creation.resourcePatternType() != PatternType.LITERAL.code());
if (unsupported)
throw new UnsupportedVersionException("Version 0 only supports literal resource pattern types");
}
final boolean unknown = data.creations().stream().anyMatch(creation ->
creation.resourcePatternType() == PatternType.UNKNOWN.code()
|| creation.resourceType() == ResourceType.UNKNOWN.code()
|| creation.permissionType() == AclPermissionType.UNKNOWN.code()
|| creation.operation() == AclOperation.UNKNOWN.code());
if (unknown)
throw new IllegalArgumentException("CreatableAcls contain unknown elements: " + data.creations());
}
public static AclBinding aclBinding(AclCreation acl) {
ResourcePattern pattern = new ResourcePattern(
ResourceType.fromCode(acl.resourceType()),
acl.resourceName(),
PatternType.fromCode(acl.resourcePatternType()));
AccessControlEntry entry = new AccessControlEntry(
acl.principal(),
acl.host(),
AclOperation.fromCode(acl.operation()),
AclPermissionType.fromCode(acl.permissionType()));
return new AclBinding(pattern, entry);
}
public static AclCreation aclCreation(AclBinding binding) {
return new AclCreation()
.setHost(binding.entry().host())
.setOperation(binding.entry().operation().code())
.setPermissionType(binding.entry().permissionType().code())
.setPrincipal(binding.entry().principal())
.setResourceName(binding.pattern().name())
.setResourceType(binding.pattern().resourceType().code())
.setResourcePatternType(binding.pattern().patternType().code());
}
private static AclCreationResult aclResult(Throwable throwable) {
ApiError apiError = ApiError.fromThrowable(throwable);
return new AclCreationResult()
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/CreateAclsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.CreateAclsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
public class CreateAclsResponse extends AbstractResponse {
private final CreateAclsResponseData data;
public CreateAclsResponse(CreateAclsResponseData data) {
super(ApiKeys.CREATE_ACLS);
this.data = data;
}
@Override
public CreateAclsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public List<CreateAclsResponseData.AclCreationResult> results() {
return data.results();
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(results().stream().map(r -> Errors.forCode(r.errorCode())));
}
public static CreateAclsResponse parse(ByteBuffer buffer, short version) {
return new CreateAclsResponse(new CreateAclsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/CreateDelegationTokenRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.CreateDelegationTokenRequestData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import java.nio.ByteBuffer;
public class CreateDelegationTokenRequest extends AbstractRequest {
private final CreateDelegationTokenRequestData data;
private CreateDelegationTokenRequest(CreateDelegationTokenRequestData data, short version) {
super(ApiKeys.CREATE_DELEGATION_TOKEN, version);
this.data = data;
}
public static CreateDelegationTokenRequest parse(ByteBuffer buffer, short version) {
return new CreateDelegationTokenRequest(new CreateDelegationTokenRequestData(new ByteBufferAccessor(buffer), version),
version);
}
@Override
public CreateDelegationTokenRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return CreateDelegationTokenResponse.prepareResponse(version(), throttleTimeMs, Errors.forException(e),
KafkaPrincipal.ANONYMOUS, KafkaPrincipal.ANONYMOUS);
}
public static class Builder extends AbstractRequest.Builder<CreateDelegationTokenRequest> {
private final CreateDelegationTokenRequestData data;
public Builder(CreateDelegationTokenRequestData data) {
super(ApiKeys.CREATE_DELEGATION_TOKEN);
this.data = data;
}
@Override
public CreateDelegationTokenRequest build(short version) {
return new CreateDelegationTokenRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/CreateDelegationTokenResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.CreateDelegationTokenResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import java.nio.ByteBuffer;
import java.util.Map;
public class CreateDelegationTokenResponse extends AbstractResponse {
private final CreateDelegationTokenResponseData data;
public CreateDelegationTokenResponse(CreateDelegationTokenResponseData data) {
super(ApiKeys.CREATE_DELEGATION_TOKEN);
this.data = data;
}
public static CreateDelegationTokenResponse parse(ByteBuffer buffer, short version) {
return new CreateDelegationTokenResponse(
new CreateDelegationTokenResponseData(new ByteBufferAccessor(buffer), version));
}
public static CreateDelegationTokenResponse prepareResponse(int version,
int throttleTimeMs,
Errors error,
KafkaPrincipal owner,
KafkaPrincipal tokenRequester,
long issueTimestamp,
long expiryTimestamp,
long maxTimestamp,
String tokenId,
ByteBuffer hmac) {
CreateDelegationTokenResponseData data = new CreateDelegationTokenResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code())
.setPrincipalType(owner.getPrincipalType())
.setPrincipalName(owner.getName())
.setIssueTimestampMs(issueTimestamp)
.setExpiryTimestampMs(expiryTimestamp)
.setMaxTimestampMs(maxTimestamp)
.setTokenId(tokenId)
.setHmac(hmac.array());
if (version > 2) {
data.setTokenRequesterPrincipalType(tokenRequester.getPrincipalType())
.setTokenRequesterPrincipalName(tokenRequester.getName());
}
return new CreateDelegationTokenResponse(data);
}
public static CreateDelegationTokenResponse prepareResponse(int version, int throttleTimeMs, Errors error,
KafkaPrincipal owner, KafkaPrincipal requester) {
return prepareResponse(version, throttleTimeMs, error, owner, requester, -1, -1, -1, "", ByteBuffer.wrap(new byte[] {}));
}
@Override
public CreateDelegationTokenResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
public boolean hasError() {
return error() != Errors.NONE;
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/CreatePartitionsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.CreatePartitionsRequestData;
import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic;
import org.apache.kafka.common.message.CreatePartitionsResponseData;
import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
public class CreatePartitionsRequest extends AbstractRequest {
private final CreatePartitionsRequestData data;
public static class Builder extends AbstractRequest.Builder<CreatePartitionsRequest> {
private final CreatePartitionsRequestData data;
public Builder(CreatePartitionsRequestData data) {
super(ApiKeys.CREATE_PARTITIONS);
this.data = data;
}
@Override
public CreatePartitionsRequest build(short version) {
return new CreatePartitionsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
CreatePartitionsRequest(CreatePartitionsRequestData data, short apiVersion) {
super(ApiKeys.CREATE_PARTITIONS, apiVersion);
this.data = data;
}
@Override
public CreatePartitionsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
CreatePartitionsResponseData response = new CreatePartitionsResponseData();
response.setThrottleTimeMs(throttleTimeMs);
ApiError apiError = ApiError.fromThrowable(e);
for (CreatePartitionsTopic topic : data.topics()) {
response.results().add(new CreatePartitionsTopicResult()
.setName(topic.name())
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message())
);
}
return new CreatePartitionsResponse(response);
}
public static CreatePartitionsRequest parse(ByteBuffer buffer, short version) {
return new CreatePartitionsRequest(new CreatePartitionsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/CreatePartitionsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.CreatePartitionsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class CreatePartitionsResponse extends AbstractResponse {
private final CreatePartitionsResponseData data;
public CreatePartitionsResponse(CreatePartitionsResponseData data) {
super(ApiKeys.CREATE_PARTITIONS);
this.data = data;
}
@Override
public CreatePartitionsResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new HashMap<>();
data.results().forEach(result ->
updateErrorCounts(counts, Errors.forCode(result.errorCode()))
);
return counts;
}
public static CreatePartitionsResponse parse(ByteBuffer buffer, short version) {
return new CreatePartitionsResponse(new CreatePartitionsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/CreateTopicsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.CreateTopicsRequestData;
import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic;
import org.apache.kafka.common.message.CreateTopicsResponseData;
import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
public class CreateTopicsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<CreateTopicsRequest> {
private final CreateTopicsRequestData data;
public Builder(CreateTopicsRequestData data) {
super(ApiKeys.CREATE_TOPICS);
this.data = data;
}
@Override
public CreateTopicsRequest build(short version) {
if (data.validateOnly() && version == 0)
throw new UnsupportedVersionException("validateOnly is not supported in version 0 of " +
"CreateTopicsRequest");
final List<String> topicsWithDefaults = data.topics()
.stream()
.filter(topic -> topic.assignments().isEmpty())
.filter(topic ->
topic.numPartitions() == CreateTopicsRequest.NO_NUM_PARTITIONS
|| topic.replicationFactor() == CreateTopicsRequest.NO_REPLICATION_FACTOR)
.map(CreatableTopic::name)
.collect(Collectors.toList());
if (!topicsWithDefaults.isEmpty() && version < 4) {
throw new UnsupportedVersionException("Creating topics with default "
+ "partitions/replication factor are only supported in CreateTopicRequest "
+ "version 4+. The following topics need values for partitions and replicas: "
+ topicsWithDefaults);
}
return new CreateTopicsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
@Override
public boolean equals(Object other) {
return other instanceof Builder && this.data.equals(((Builder) other).data);
}
@Override
public int hashCode() {
return data.hashCode();
}
}
private final CreateTopicsRequestData data;
public static final int NO_NUM_PARTITIONS = -1;
public static final short NO_REPLICATION_FACTOR = -1;
public CreateTopicsRequest(CreateTopicsRequestData data, short version) {
super(ApiKeys.CREATE_TOPICS, version);
this.data = data;
}
@Override
public CreateTopicsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
CreateTopicsResponseData response = new CreateTopicsResponseData();
if (version() >= 2) {
response.setThrottleTimeMs(throttleTimeMs);
}
ApiError apiError = ApiError.fromThrowable(e);
for (CreatableTopic topic : data.topics()) {
response.topics().add(new CreatableTopicResult().
setName(topic.name()).
setErrorCode(apiError.error().code()).
setErrorMessage(apiError.message()));
}
return new CreateTopicsResponse(response);
}
public static CreateTopicsRequest parse(ByteBuffer buffer, short version) {
return new CreateTopicsRequest(new CreateTopicsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/CreateTopicsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.CreateTopicsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class CreateTopicsResponse extends AbstractResponse {
/**
* Possible error codes:
*
* REQUEST_TIMED_OUT(7)
* INVALID_TOPIC_EXCEPTION(17)
* TOPIC_AUTHORIZATION_FAILED(29)
* TOPIC_ALREADY_EXISTS(36)
* INVALID_PARTITIONS(37)
* INVALID_REPLICATION_FACTOR(38)
* INVALID_REPLICA_ASSIGNMENT(39)
* INVALID_CONFIG(40)
* NOT_CONTROLLER(41)
* INVALID_REQUEST(42)
* POLICY_VIOLATION(44)
*/
private final CreateTopicsResponseData data;
public CreateTopicsResponse(CreateTopicsResponseData data) {
super(ApiKeys.CREATE_TOPICS);
this.data = data;
}
@Override
public CreateTopicsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
HashMap<Errors, Integer> counts = new HashMap<>();
data.topics().forEach(result ->
updateErrorCounts(counts, Errors.forCode(result.errorCode()))
);
return counts;
}
public static CreateTopicsResponse parse(ByteBuffer buffer, short version) {
return new CreateTopicsResponse(new CreateTopicsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 3;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DeleteAclsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.util.Collections;
import java.util.stream.Collectors;
import org.apache.kafka.common.acl.AccessControlEntryFilter;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.DeleteAclsRequestData;
import org.apache.kafka.common.message.DeleteAclsRequestData.DeleteAclsFilter;
import org.apache.kafka.common.message.DeleteAclsResponseData;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePatternFilter;
import org.apache.kafka.common.resource.ResourceType;
import java.nio.ByteBuffer;
import java.util.List;
import static org.apache.kafka.common.protocol.ApiKeys.DELETE_ACLS;
public class DeleteAclsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DeleteAclsRequest> {
private final DeleteAclsRequestData data;
public Builder(DeleteAclsRequestData data) {
super(DELETE_ACLS);
this.data = data;
}
@Override
public DeleteAclsRequest build(short version) {
return new DeleteAclsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DeleteAclsRequestData data;
private DeleteAclsRequest(DeleteAclsRequestData data, short version) {
super(ApiKeys.DELETE_ACLS, version);
this.data = data;
normalizeAndValidate();
}
private void normalizeAndValidate() {
if (version() == 0) {
for (DeleteAclsRequestData.DeleteAclsFilter filter : data.filters()) {
PatternType patternType = PatternType.fromCode(filter.patternTypeFilter());
// On older brokers, no pattern types existed except LITERAL (effectively). So even though ANY is not
// directly supported on those brokers, we can get the same effect as ANY by setting the pattern type
// to LITERAL. Note that the wildcard `*` is considered `LITERAL` for compatibility reasons.
if (patternType == PatternType.ANY)
filter.setPatternTypeFilter(PatternType.LITERAL.code());
else if (patternType != PatternType.LITERAL)
throw new UnsupportedVersionException("Version 0 does not support pattern type " +
patternType + " (only LITERAL and ANY are supported)");
}
}
final boolean unknown = data.filters().stream().anyMatch(filter ->
filter.patternTypeFilter() == PatternType.UNKNOWN.code()
|| filter.resourceTypeFilter() == ResourceType.UNKNOWN.code()
|| filter.operation() == AclOperation.UNKNOWN.code()
|| filter.permissionType() == AclPermissionType.UNKNOWN.code()
);
if (unknown) {
throw new IllegalArgumentException("Filters contain UNKNOWN elements, filters: " + data.filters());
}
}
public List<AclBindingFilter> filters() {
return data.filters().stream().map(DeleteAclsRequest::aclBindingFilter).collect(Collectors.toList());
}
@Override
public DeleteAclsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable throwable) {
ApiError apiError = ApiError.fromThrowable(throwable);
List<DeleteAclsFilterResult> filterResults = Collections.nCopies(data.filters().size(),
new DeleteAclsResponseData.DeleteAclsFilterResult()
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message()));
return new DeleteAclsResponse(new DeleteAclsResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setFilterResults(filterResults), version());
}
public static DeleteAclsRequest parse(ByteBuffer buffer, short version) {
return new DeleteAclsRequest(new DeleteAclsRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static DeleteAclsFilter deleteAclsFilter(AclBindingFilter filter) {
return new DeleteAclsFilter()
.setResourceNameFilter(filter.patternFilter().name())
.setResourceTypeFilter(filter.patternFilter().resourceType().code())
.setPatternTypeFilter(filter.patternFilter().patternType().code())
.setHostFilter(filter.entryFilter().host())
.setOperation(filter.entryFilter().operation().code())
.setPermissionType(filter.entryFilter().permissionType().code())
.setPrincipalFilter(filter.entryFilter().principal());
}
private static AclBindingFilter aclBindingFilter(DeleteAclsFilter filter) {
ResourcePatternFilter patternFilter = new ResourcePatternFilter(
ResourceType.fromCode(filter.resourceTypeFilter()),
filter.resourceNameFilter(),
PatternType.fromCode(filter.patternTypeFilter()));
AccessControlEntryFilter entryFilter = new AccessControlEntryFilter(
filter.principalFilter(),
filter.hostFilter(),
AclOperation.fromCode(filter.operation()),
AclPermissionType.fromCode(filter.permissionType()));
return new AclBindingFilter(patternFilter, entryFilter);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DeleteAclsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.DeleteAclsResponseData;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult;
import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourceType;
import org.apache.kafka.server.authorizer.AclDeleteResult;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class DeleteAclsResponse extends AbstractResponse {
public static final Logger log = LoggerFactory.getLogger(DeleteAclsResponse.class);
private final DeleteAclsResponseData data;
public DeleteAclsResponse(DeleteAclsResponseData data, short version) {
super(ApiKeys.DELETE_ACLS);
this.data = data;
validate(version);
}
@Override
public DeleteAclsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public List<DeleteAclsResponseData.DeleteAclsFilterResult> filterResults() {
return data.filterResults();
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(filterResults().stream().map(r -> Errors.forCode(r.errorCode())));
}
public static DeleteAclsResponse parse(ByteBuffer buffer, short version) {
return new DeleteAclsResponse(new DeleteAclsResponseData(new ByteBufferAccessor(buffer), version), version);
}
public String toString() {
return data.toString();
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
private void validate(short version) {
if (version == 0) {
final boolean unsupported = filterResults().stream()
.flatMap(r -> r.matchingAcls().stream())
.anyMatch(matchingAcl -> matchingAcl.patternType() != PatternType.LITERAL.code());
if (unsupported)
throw new UnsupportedVersionException("Version 0 only supports literal resource pattern types");
}
final boolean unknown = filterResults().stream()
.flatMap(r -> r.matchingAcls().stream())
.anyMatch(matchingAcl -> matchingAcl.patternType() == PatternType.UNKNOWN.code()
|| matchingAcl.resourceType() == ResourceType.UNKNOWN.code()
|| matchingAcl.permissionType() == AclPermissionType.UNKNOWN.code()
|| matchingAcl.operation() == AclOperation.UNKNOWN.code());
if (unknown)
throw new IllegalArgumentException("DeleteAclsMatchingAcls contain UNKNOWN elements");
}
public static DeleteAclsFilterResult filterResult(AclDeleteResult result) {
ApiError error = result.exception().map(e -> ApiError.fromThrowable(e)).orElse(ApiError.NONE);
List<DeleteAclsMatchingAcl> matchingAcls = result.aclBindingDeleteResults().stream()
.map(DeleteAclsResponse::matchingAcl)
.collect(Collectors.toList());
return new DeleteAclsFilterResult()
.setErrorCode(error.error().code())
.setErrorMessage(error.message())
.setMatchingAcls(matchingAcls);
}
private static DeleteAclsMatchingAcl matchingAcl(AclDeleteResult.AclBindingDeleteResult result) {
ApiError error = result.exception().map(e -> ApiError.fromThrowable(e)).orElse(ApiError.NONE);
AclBinding acl = result.aclBinding();
return matchingAcl(acl, error);
}
// Visible for testing
public static DeleteAclsMatchingAcl matchingAcl(AclBinding acl, ApiError error) {
return new DeleteAclsMatchingAcl()
.setErrorCode(error.error().code())
.setErrorMessage(error.message())
.setResourceName(acl.pattern().name())
.setResourceType(acl.pattern().resourceType().code())
.setPatternType(acl.pattern().patternType().code())
.setHost(acl.entry().host())
.setOperation(acl.entry().operation().code())
.setPermissionType(acl.entry().permissionType().code())
.setPrincipal(acl.entry().principal());
}
public static AclBinding aclBinding(DeleteAclsMatchingAcl matchingAcl) {
ResourcePattern resourcePattern = new ResourcePattern(ResourceType.fromCode(matchingAcl.resourceType()),
matchingAcl.resourceName(), PatternType.fromCode(matchingAcl.patternType()));
AccessControlEntry accessControlEntry = new AccessControlEntry(matchingAcl.principal(), matchingAcl.host(),
AclOperation.fromCode(matchingAcl.operation()), AclPermissionType.fromCode(matchingAcl.permissionType()));
return new AclBinding(resourcePattern, accessControlEntry);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DeleteGroupsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DeleteGroupsRequestData;
import org.apache.kafka.common.message.DeleteGroupsResponseData;
import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult;
import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResultCollection;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class DeleteGroupsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DeleteGroupsRequest> {
private final DeleteGroupsRequestData data;
public Builder(DeleteGroupsRequestData data) {
super(ApiKeys.DELETE_GROUPS);
this.data = data;
}
@Override
public DeleteGroupsRequest build(short version) {
return new DeleteGroupsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DeleteGroupsRequestData data;
public DeleteGroupsRequest(DeleteGroupsRequestData data, short version) {
super(ApiKeys.DELETE_GROUPS, version);
this.data = data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
DeletableGroupResultCollection groupResults = new DeletableGroupResultCollection();
for (String groupId : data.groupsNames()) {
groupResults.add(new DeletableGroupResult()
.setGroupId(groupId)
.setErrorCode(error.code()));
}
return new DeleteGroupsResponse(
new DeleteGroupsResponseData()
.setResults(groupResults)
.setThrottleTimeMs(throttleTimeMs)
);
}
public static DeleteGroupsRequest parse(ByteBuffer buffer, short version) {
return new DeleteGroupsRequest(new DeleteGroupsRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public DeleteGroupsRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DeleteGroupsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DeleteGroupsResponseData;
import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
/**
* Possible error codes:
*
* COORDINATOR_LOAD_IN_PROGRESS (14)
* COORDINATOR_NOT_AVAILABLE(15)
* NOT_COORDINATOR (16)
* INVALID_GROUP_ID(24)
* GROUP_AUTHORIZATION_FAILED(30)
* NON_EMPTY_GROUP(68)
* GROUP_ID_NOT_FOUND(69)
*/
public class DeleteGroupsResponse extends AbstractResponse {
private final DeleteGroupsResponseData data;
public DeleteGroupsResponse(DeleteGroupsResponseData data) {
super(ApiKeys.DELETE_GROUPS);
this.data = data;
}
@Override
public DeleteGroupsResponseData data() {
return data;
}
public Map<String, Errors> errors() {
Map<String, Errors> errorMap = new HashMap<>();
for (DeletableGroupResult result : data.results()) {
errorMap.put(result.groupId(), Errors.forCode(result.errorCode()));
}
return errorMap;
}
public Errors get(String group) throws IllegalArgumentException {
DeletableGroupResult result = data.results().find(group);
if (result == null) {
throw new IllegalArgumentException("could not find group " + group + " in the delete group response");
}
return Errors.forCode(result.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new HashMap<>();
data.results().forEach(result ->
updateErrorCounts(counts, Errors.forCode(result.errorCode()))
);
return counts;
}
public static DeleteGroupsResponse parse(ByteBuffer buffer, short version) {
return new DeleteGroupsResponse(new DeleteGroupsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DeleteRecordsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DeleteRecordsRequestData;
import org.apache.kafka.common.message.DeleteRecordsRequestData.DeleteRecordsTopic;
import org.apache.kafka.common.message.DeleteRecordsResponseData;
import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsTopicResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class DeleteRecordsRequest extends AbstractRequest {
public static final long HIGH_WATERMARK = -1L;
private final DeleteRecordsRequestData data;
public static class Builder extends AbstractRequest.Builder<DeleteRecordsRequest> {
private DeleteRecordsRequestData data;
public Builder(DeleteRecordsRequestData data) {
super(ApiKeys.DELETE_RECORDS);
this.data = data;
}
@Override
public DeleteRecordsRequest build(short version) {
return new DeleteRecordsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private DeleteRecordsRequest(DeleteRecordsRequestData data, short version) {
super(ApiKeys.DELETE_RECORDS, version);
this.data = data;
}
@Override
public DeleteRecordsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
DeleteRecordsResponseData result = new DeleteRecordsResponseData().setThrottleTimeMs(throttleTimeMs);
short errorCode = Errors.forException(e).code();
for (DeleteRecordsTopic topic : data.topics()) {
DeleteRecordsTopicResult topicResult = new DeleteRecordsTopicResult().setName(topic.name());
result.topics().add(topicResult);
for (DeleteRecordsRequestData.DeleteRecordsPartition partition : topic.partitions()) {
topicResult.partitions().add(new DeleteRecordsResponseData.DeleteRecordsPartitionResult()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(errorCode)
.setLowWatermark(DeleteRecordsResponse.INVALID_LOW_WATERMARK));
}
}
return new DeleteRecordsResponse(result);
}
public static DeleteRecordsRequest parse(ByteBuffer buffer, short version) {
return new DeleteRecordsRequest(new DeleteRecordsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DeleteRecordsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DeleteRecordsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class DeleteRecordsResponse extends AbstractResponse {
public static final long INVALID_LOW_WATERMARK = -1L;
private final DeleteRecordsResponseData data;
/**
* Possible error code:
*
* OFFSET_OUT_OF_RANGE (1)
* UNKNOWN_TOPIC_OR_PARTITION (3)
* NOT_LEADER_OR_FOLLOWER (6)
* REQUEST_TIMED_OUT (7)
* UNKNOWN (-1)
*/
public DeleteRecordsResponse(DeleteRecordsResponseData data) {
super(ApiKeys.DELETE_RECORDS);
this.data = data;
}
@Override
public DeleteRecordsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
data.topics().forEach(topicResponses ->
topicResponses.partitions().forEach(response ->
updateErrorCounts(errorCounts, Errors.forCode(response.errorCode()))
)
);
return errorCounts;
}
public static DeleteRecordsResponse parse(ByteBuffer buffer, short version) {
return new DeleteRecordsResponse(new DeleteRecordsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DeleteTopicsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.message.DeleteTopicsRequestData;
import org.apache.kafka.common.message.DeleteTopicsResponseData;
import org.apache.kafka.common.message.DeleteTopicsResponseData.DeletableTopicResult;
import org.apache.kafka.common.message.DeleteTopicsRequestData.DeleteTopicState;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
public class DeleteTopicsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DeleteTopicsRequest> {
private DeleteTopicsRequestData data;
public Builder(DeleteTopicsRequestData data) {
super(ApiKeys.DELETE_TOPICS);
this.data = data;
}
@Override
public DeleteTopicsRequest build(short version) {
if (version >= 6 && !data.topicNames().isEmpty()) {
data.setTopics(groupByTopic(data.topicNames()));
}
return new DeleteTopicsRequest(data, version);
}
private List<DeleteTopicState> groupByTopic(List<String> topics) {
List<DeleteTopicState> topicStates = new ArrayList<>();
for (String topic : topics) {
topicStates.add(new DeleteTopicState().setName(topic));
}
return topicStates;
}
@Override
public String toString() {
return data.toString();
}
}
private DeleteTopicsRequestData data;
private DeleteTopicsRequest(DeleteTopicsRequestData data, short version) {
super(ApiKeys.DELETE_TOPICS, version);
this.data = data;
}
@Override
public DeleteTopicsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
DeleteTopicsResponseData response = new DeleteTopicsResponseData();
if (version() >= 1) {
response.setThrottleTimeMs(throttleTimeMs);
}
ApiError apiError = ApiError.fromThrowable(e);
for (DeleteTopicState topic : topics()) {
response.responses().add(new DeletableTopicResult()
.setName(topic.name())
.setTopicId(topic.topicId())
.setErrorCode(apiError.error().code()));
}
return new DeleteTopicsResponse(response);
}
public List<String> topicNames() {
if (version() >= 6)
return data.topics().stream().map(topic -> topic.name()).collect(Collectors.toList());
return data.topicNames();
}
public int numberOfTopics() {
if (version() >= 6)
return data.topics().size();
return data.topicNames().size();
}
public List<Uuid> topicIds() {
if (version() >= 6)
return data.topics().stream().map(topic -> topic.topicId()).collect(Collectors.toList());
return Collections.emptyList();
}
public List<DeleteTopicState> topics() {
if (version() >= 6)
return data.topics();
return data.topicNames().stream().map(name -> new DeleteTopicState().setName(name)).collect(Collectors.toList());
}
public static DeleteTopicsRequest parse(ByteBuffer buffer, short version) {
return new DeleteTopicsRequest(new DeleteTopicsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DeleteTopicsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DeleteTopicsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class DeleteTopicsResponse extends AbstractResponse {
/**
* Possible error codes:
*
* REQUEST_TIMED_OUT(7)
* INVALID_TOPIC_EXCEPTION(17)
* TOPIC_AUTHORIZATION_FAILED(29)
* NOT_CONTROLLER(41)
* INVALID_REQUEST(42)
* TOPIC_DELETION_DISABLED(73)
*/
private final DeleteTopicsResponseData data;
public DeleteTopicsResponse(DeleteTopicsResponseData data) {
super(ApiKeys.DELETE_TOPICS);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public DeleteTopicsResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
HashMap<Errors, Integer> counts = new HashMap<>();
data.responses().forEach(result ->
updateErrorCounts(counts, Errors.forCode(result.errorCode()))
);
return counts;
}
public static DeleteTopicsResponse parse(ByteBuffer buffer, short version) {
return new DeleteTopicsResponse(new DeleteTopicsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeAclsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.acl.AccessControlEntryFilter;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.DescribeAclsRequestData;
import org.apache.kafka.common.message.DescribeAclsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePatternFilter;
import org.apache.kafka.common.resource.ResourceType;
import java.nio.ByteBuffer;
public class DescribeAclsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DescribeAclsRequest> {
private final DescribeAclsRequestData data;
public Builder(AclBindingFilter filter) {
super(ApiKeys.DESCRIBE_ACLS);
ResourcePatternFilter patternFilter = filter.patternFilter();
AccessControlEntryFilter entryFilter = filter.entryFilter();
data = new DescribeAclsRequestData()
.setHostFilter(entryFilter.host())
.setOperation(entryFilter.operation().code())
.setPermissionType(entryFilter.permissionType().code())
.setPrincipalFilter(entryFilter.principal())
.setResourceNameFilter(patternFilter.name())
.setPatternTypeFilter(patternFilter.patternType().code())
.setResourceTypeFilter(patternFilter.resourceType().code());
}
@Override
public DescribeAclsRequest build(short version) {
return new DescribeAclsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DescribeAclsRequestData data;
private DescribeAclsRequest(DescribeAclsRequestData data, short version) {
super(ApiKeys.DESCRIBE_ACLS, version);
this.data = data;
normalizeAndValidate(version);
}
private void normalizeAndValidate(short version) {
if (version == 0) {
PatternType patternType = PatternType.fromCode(data.patternTypeFilter());
// On older brokers, no pattern types existed except LITERAL (effectively). So even though ANY is not
// directly supported on those brokers, we can get the same effect as ANY by setting the pattern type
// to LITERAL. Note that the wildcard `*` is considered `LITERAL` for compatibility reasons.
if (patternType == PatternType.ANY)
data.setPatternTypeFilter(PatternType.LITERAL.code());
else if (patternType != PatternType.LITERAL)
throw new UnsupportedVersionException("Version 0 only supports literal resource pattern types");
}
if (data.patternTypeFilter() == PatternType.UNKNOWN.code()
|| data.resourceTypeFilter() == ResourceType.UNKNOWN.code()
|| data.permissionType() == AclPermissionType.UNKNOWN.code()
|| data.operation() == AclOperation.UNKNOWN.code()) {
throw new IllegalArgumentException("DescribeAclsRequest contains UNKNOWN elements: " + data);
}
}
@Override
public DescribeAclsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable throwable) {
ApiError error = ApiError.fromThrowable(throwable);
DescribeAclsResponseData response = new DescribeAclsResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.error().code())
.setErrorMessage(error.message());
return new DescribeAclsResponse(response, version());
}
public static DescribeAclsRequest parse(ByteBuffer buffer, short version) {
return new DescribeAclsRequest(new DescribeAclsRequestData(new ByteBufferAccessor(buffer), version), version);
}
public AclBindingFilter filter() {
ResourcePatternFilter rpf = new ResourcePatternFilter(
ResourceType.fromCode(data.resourceTypeFilter()),
data.resourceNameFilter(),
PatternType.fromCode(data.patternTypeFilter()));
AccessControlEntryFilter acef = new AccessControlEntryFilter(
data.principalFilter(),
data.hostFilter(),
AclOperation.fromCode(data.operation()),
AclPermissionType.fromCode(data.permissionType()));
return new AclBindingFilter(rpf, acef);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeAclsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.acl.AccessControlEntry;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.resource.PatternType;
import org.apache.kafka.common.resource.ResourcePattern;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Optional;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.acl.AclPermissionType;
import org.apache.kafka.common.message.DescribeAclsResponseData;
import org.apache.kafka.common.message.DescribeAclsResponseData.AclDescription;
import org.apache.kafka.common.message.DescribeAclsResponseData.DescribeAclsResource;
import org.apache.kafka.common.resource.ResourceType;
public class DescribeAclsResponse extends AbstractResponse {
private final DescribeAclsResponseData data;
public DescribeAclsResponse(DescribeAclsResponseData data, short version) {
super(ApiKeys.DESCRIBE_ACLS);
this.data = data;
validate(Optional.of(version));
}
// Skips version validation, visible for testing
DescribeAclsResponse(DescribeAclsResponseData data) {
super(ApiKeys.DESCRIBE_ACLS);
this.data = data;
validate(Optional.empty());
}
@Override
public DescribeAclsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public ApiError error() {
return new ApiError(Errors.forCode(data.errorCode()), data.errorMessage());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
public List<DescribeAclsResource> acls() {
return data.resources();
}
public static DescribeAclsResponse parse(ByteBuffer buffer, short version) {
return new DescribeAclsResponse(new DescribeAclsResponseData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
private void validate(Optional<Short> version) {
if (version.isPresent() && version.get() == 0) {
final boolean unsupported = acls().stream()
.anyMatch(acl -> acl.patternType() != PatternType.LITERAL.code());
if (unsupported) {
throw new UnsupportedVersionException("Version 0 only supports literal resource pattern types");
}
}
for (DescribeAclsResource resource : acls()) {
if (resource.patternType() == PatternType.UNKNOWN.code() || resource.resourceType() == ResourceType.UNKNOWN.code())
throw new IllegalArgumentException("Contain UNKNOWN elements");
for (AclDescription acl : resource.acls()) {
if (acl.operation() == AclOperation.UNKNOWN.code() || acl.permissionType() == AclPermissionType.UNKNOWN.code()) {
throw new IllegalArgumentException("Contain UNKNOWN elements");
}
}
}
}
private static Stream<AclBinding> aclBindings(DescribeAclsResource resource) {
return resource.acls().stream().map(acl -> {
ResourcePattern pattern = new ResourcePattern(
ResourceType.fromCode(resource.resourceType()),
resource.resourceName(),
PatternType.fromCode(resource.patternType()));
AccessControlEntry entry = new AccessControlEntry(
acl.principal(),
acl.host(),
AclOperation.fromCode(acl.operation()),
AclPermissionType.fromCode(acl.permissionType()));
return new AclBinding(pattern, entry);
});
}
public static List<AclBinding> aclBindings(List<DescribeAclsResource> resources) {
return resources.stream().flatMap(DescribeAclsResponse::aclBindings).collect(Collectors.toList());
}
public static List<DescribeAclsResource> aclsResources(Collection<AclBinding> acls) {
Map<ResourcePattern, List<AccessControlEntry>> patternToEntries = new HashMap<>();
for (AclBinding acl : acls) {
patternToEntries.computeIfAbsent(acl.pattern(), v -> new ArrayList<>()).add(acl.entry());
}
List<DescribeAclsResource> resources = new ArrayList<>(patternToEntries.size());
for (Entry<ResourcePattern, List<AccessControlEntry>> entry : patternToEntries.entrySet()) {
ResourcePattern key = entry.getKey();
List<AclDescription> aclDescriptions = new ArrayList<>();
for (AccessControlEntry ace : entry.getValue()) {
AclDescription ad = new AclDescription()
.setHost(ace.host())
.setOperation(ace.operation().code())
.setPermissionType(ace.permissionType().code())
.setPrincipal(ace.principal());
aclDescriptions.add(ad);
}
DescribeAclsResource dar = new DescribeAclsResource()
.setResourceName(key.name())
.setPatternType(key.patternType().code())
.setResourceType(key.resourceType().code())
.setAcls(aclDescriptions);
resources.add(dar);
}
return resources;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeClientQuotasRequestData;
import org.apache.kafka.common.message.DescribeClientQuotasRequestData.ComponentData;
import org.apache.kafka.common.message.DescribeClientQuotasResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.quota.ClientQuotaFilter;
import org.apache.kafka.common.quota.ClientQuotaFilterComponent;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
public class DescribeClientQuotasRequest extends AbstractRequest {
// These values must not change.
public static final byte MATCH_TYPE_EXACT = 0;
public static final byte MATCH_TYPE_DEFAULT = 1;
public static final byte MATCH_TYPE_SPECIFIED = 2;
public static class Builder extends AbstractRequest.Builder<DescribeClientQuotasRequest> {
private final DescribeClientQuotasRequestData data;
public Builder(ClientQuotaFilter filter) {
super(ApiKeys.DESCRIBE_CLIENT_QUOTAS);
List<ComponentData> componentData = new ArrayList<>(filter.components().size());
for (ClientQuotaFilterComponent component : filter.components()) {
ComponentData fd = new ComponentData().setEntityType(component.entityType());
if (component.match() == null) {
fd.setMatchType(MATCH_TYPE_SPECIFIED);
fd.setMatch(null);
} else if (component.match().isPresent()) {
fd.setMatchType(MATCH_TYPE_EXACT);
fd.setMatch(component.match().get());
} else {
fd.setMatchType(MATCH_TYPE_DEFAULT);
fd.setMatch(null);
}
componentData.add(fd);
}
this.data = new DescribeClientQuotasRequestData()
.setComponents(componentData)
.setStrict(filter.strict());
}
@Override
public DescribeClientQuotasRequest build(short version) {
return new DescribeClientQuotasRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DescribeClientQuotasRequestData data;
public DescribeClientQuotasRequest(DescribeClientQuotasRequestData data, short version) {
super(ApiKeys.DESCRIBE_CLIENT_QUOTAS, version);
this.data = data;
}
public ClientQuotaFilter filter() {
List<ClientQuotaFilterComponent> components = new ArrayList<>(data.components().size());
for (ComponentData componentData : data.components()) {
ClientQuotaFilterComponent component;
switch (componentData.matchType()) {
case MATCH_TYPE_EXACT:
component = ClientQuotaFilterComponent.ofEntity(componentData.entityType(), componentData.match());
break;
case MATCH_TYPE_DEFAULT:
component = ClientQuotaFilterComponent.ofDefaultEntity(componentData.entityType());
break;
case MATCH_TYPE_SPECIFIED:
component = ClientQuotaFilterComponent.ofEntityType(componentData.entityType());
break;
default:
throw new IllegalArgumentException("Unexpected match type: " + componentData.matchType());
}
components.add(component);
}
if (data.strict()) {
return ClientQuotaFilter.containsOnly(components);
} else {
return ClientQuotaFilter.contains(components);
}
}
@Override
public DescribeClientQuotasRequestData data() {
return data;
}
@Override
public DescribeClientQuotasResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ApiError error = ApiError.fromThrowable(e);
return new DescribeClientQuotasResponse(new DescribeClientQuotasResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.error().code())
.setErrorMessage(error.message())
.setEntries(null));
}
public static DescribeClientQuotasRequest parse(ByteBuffer buffer, short version) {
return new DescribeClientQuotasRequest(new DescribeClientQuotasRequestData(new ByteBufferAccessor(buffer), version),
version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeClientQuotasResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.apache.kafka.common.message.DescribeClientQuotasResponseData;
import org.apache.kafka.common.message.DescribeClientQuotasResponseData.EntityData;
import org.apache.kafka.common.message.DescribeClientQuotasResponseData.EntryData;
import org.apache.kafka.common.message.DescribeClientQuotasResponseData.ValueData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.quota.ClientQuotaEntity;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class DescribeClientQuotasResponse extends AbstractResponse {
private final DescribeClientQuotasResponseData data;
public DescribeClientQuotasResponse(DescribeClientQuotasResponseData data) {
super(ApiKeys.DESCRIBE_CLIENT_QUOTAS);
this.data = data;
}
public void complete(KafkaFutureImpl<Map<ClientQuotaEntity, Map<String, Double>>> future) {
Errors error = Errors.forCode(data.errorCode());
if (error != Errors.NONE) {
future.completeExceptionally(error.exception(data.errorMessage()));
return;
}
Map<ClientQuotaEntity, Map<String, Double>> result = new HashMap<>(data.entries().size());
for (EntryData entries : data.entries()) {
Map<String, String> entity = new HashMap<>(entries.entity().size());
for (EntityData entityData : entries.entity()) {
entity.put(entityData.entityType(), entityData.entityName());
}
Map<String, Double> values = new HashMap<>(entries.values().size());
for (ValueData valueData : entries.values()) {
values.put(valueData.key(), valueData.value());
}
result.put(new ClientQuotaEntity(entity), values);
}
future.complete(result);
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public DescribeClientQuotasResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
public static DescribeClientQuotasResponse parse(ByteBuffer buffer, short version) {
return new DescribeClientQuotasResponse(new DescribeClientQuotasResponseData(new ByteBufferAccessor(buffer), version));
}
public static DescribeClientQuotasResponse fromQuotaEntities(Map<ClientQuotaEntity, Map<String, Double>> entities,
int throttleTimeMs) {
List<EntryData> entries = new ArrayList<>(entities.size());
for (Map.Entry<ClientQuotaEntity, Map<String, Double>> entry : entities.entrySet()) {
ClientQuotaEntity quotaEntity = entry.getKey();
List<EntityData> entityData = new ArrayList<>(quotaEntity.entries().size());
for (Map.Entry<String, String> entityEntry : quotaEntity.entries().entrySet()) {
entityData.add(new EntityData()
.setEntityType(entityEntry.getKey())
.setEntityName(entityEntry.getValue()));
}
Map<String, Double> quotaValues = entry.getValue();
List<ValueData> valueData = new ArrayList<>(quotaValues.size());
for (Map.Entry<String, Double> valuesEntry : entry.getValue().entrySet()) {
valueData.add(new ValueData()
.setKey(valuesEntry.getKey())
.setValue(valuesEntry.getValue()));
}
entries.add(new EntryData()
.setEntity(entityData)
.setValues(valueData));
}
return new DescribeClientQuotasResponse(new DescribeClientQuotasResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode((short) 0)
.setErrorMessage(null)
.setEntries(entries));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeClusterRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import org.apache.kafka.common.message.DescribeClusterRequestData;
import org.apache.kafka.common.message.DescribeClusterResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
public class DescribeClusterRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DescribeClusterRequest> {
private final DescribeClusterRequestData data;
public Builder(DescribeClusterRequestData data) {
super(ApiKeys.DESCRIBE_CLUSTER);
this.data = data;
}
@Override
public DescribeClusterRequest build(final short version) {
return new DescribeClusterRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DescribeClusterRequestData data;
public DescribeClusterRequest(DescribeClusterRequestData data, short version) {
super(ApiKeys.DESCRIBE_CLUSTER, version);
this.data = data;
}
@Override
public DescribeClusterRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(final int throttleTimeMs, final Throwable e) {
ApiError apiError = ApiError.fromThrowable(e);
return new DescribeClusterResponse(new DescribeClusterResponseData()
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message()));
}
@Override
public String toString(final boolean verbose) {
return data.toString();
}
public static DescribeClusterRequest parse(ByteBuffer buffer, short version) {
return new DescribeClusterRequest(new DescribeClusterRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeClusterResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.message.DescribeClusterResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
public class DescribeClusterResponse extends AbstractResponse {
private final DescribeClusterResponseData data;
public DescribeClusterResponse(DescribeClusterResponseData data) {
super(ApiKeys.DESCRIBE_CLUSTER);
this.data = data;
}
public Map<Integer, Node> nodes() {
return data.brokers().valuesList().stream()
.map(b -> new Node(b.brokerId(), b.host(), b.port(), b.rack()))
.collect(Collectors.toMap(Node::id, Function.identity()));
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public DescribeClusterResponseData data() {
return data;
}
public static DescribeClusterResponse parse(ByteBuffer buffer, short version) {
return new DescribeClusterResponse(new DescribeClusterResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeConfigsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeConfigsRequestData;
import org.apache.kafka.common.message.DescribeConfigsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.stream.Collectors;
public class DescribeConfigsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DescribeConfigsRequest> {
private final DescribeConfigsRequestData data;
public Builder(DescribeConfigsRequestData data) {
super(ApiKeys.DESCRIBE_CONFIGS);
this.data = data;
}
@Override
public DescribeConfigsRequest build(short version) {
return new DescribeConfigsRequest(data, version);
}
}
private final DescribeConfigsRequestData data;
public DescribeConfigsRequest(DescribeConfigsRequestData data, short version) {
super(ApiKeys.DESCRIBE_CONFIGS, version);
this.data = data;
}
@Override
public DescribeConfigsRequestData data() {
return data;
}
@Override
public DescribeConfigsResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
return new DescribeConfigsResponse(new DescribeConfigsResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setResults(data.resources().stream().map(result -> {
return new DescribeConfigsResponseData.DescribeConfigsResult().setErrorCode(error.code())
.setErrorMessage(error.message())
.setResourceName(result.resourceName())
.setResourceType(result.resourceType());
}).collect(Collectors.toList())
));
}
public static DescribeConfigsRequest parse(ByteBuffer buffer, short version) {
return new DescribeConfigsRequest(new DescribeConfigsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeConfigsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.message.DescribeConfigsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
import java.util.stream.Collectors;
public class DescribeConfigsResponse extends AbstractResponse {
public static class Config {
private final ApiError error;
private final Collection<ConfigEntry> entries;
public Config(ApiError error, Collection<ConfigEntry> entries) {
this.error = Objects.requireNonNull(error, "error");
this.entries = Objects.requireNonNull(entries, "entries");
}
public ApiError error() {
return error;
}
public Collection<ConfigEntry> entries() {
return entries;
}
}
public static class ConfigEntry {
private final String name;
private final String value;
private final boolean isSensitive;
private final ConfigSource source;
private final boolean readOnly;
private final Collection<ConfigSynonym> synonyms;
private final ConfigType type;
private final String documentation;
public ConfigEntry(String name, String value, ConfigSource source, boolean isSensitive, boolean readOnly,
Collection<ConfigSynonym> synonyms) {
this(name, value, source, isSensitive, readOnly, synonyms, ConfigType.UNKNOWN, null);
}
public ConfigEntry(String name, String value, ConfigSource source, boolean isSensitive, boolean readOnly,
Collection<ConfigSynonym> synonyms, ConfigType type, String documentation) {
this.name = Objects.requireNonNull(name, "name");
this.value = value;
this.source = Objects.requireNonNull(source, "source");
this.isSensitive = isSensitive;
this.readOnly = readOnly;
this.synonyms = Objects.requireNonNull(synonyms, "synonyms");
this.type = type;
this.documentation = documentation;
}
public String name() {
return name;
}
public String value() {
return value;
}
public boolean isSensitive() {
return isSensitive;
}
public ConfigSource source() {
return source;
}
public boolean isReadOnly() {
return readOnly;
}
public Collection<ConfigSynonym> synonyms() {
return synonyms;
}
public ConfigType type() {
return type;
}
public String documentation() {
return documentation;
}
}
public enum ConfigSource {
UNKNOWN((byte) 0, org.apache.kafka.clients.admin.ConfigEntry.ConfigSource.UNKNOWN),
TOPIC_CONFIG((byte) 1, org.apache.kafka.clients.admin.ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG),
DYNAMIC_BROKER_CONFIG((byte) 2, org.apache.kafka.clients.admin.ConfigEntry.ConfigSource.DYNAMIC_BROKER_CONFIG),
DYNAMIC_DEFAULT_BROKER_CONFIG((byte) 3, org.apache.kafka.clients.admin.ConfigEntry.ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG),
STATIC_BROKER_CONFIG((byte) 4, org.apache.kafka.clients.admin.ConfigEntry.ConfigSource.STATIC_BROKER_CONFIG),
DEFAULT_CONFIG((byte) 5, org.apache.kafka.clients.admin.ConfigEntry.ConfigSource.DEFAULT_CONFIG),
DYNAMIC_BROKER_LOGGER_CONFIG((byte) 6, org.apache.kafka.clients.admin.ConfigEntry.ConfigSource.DYNAMIC_BROKER_LOGGER_CONFIG);
final byte id;
private final org.apache.kafka.clients.admin.ConfigEntry.ConfigSource source;
private static final ConfigSource[] VALUES = values();
ConfigSource(byte id, org.apache.kafka.clients.admin.ConfigEntry.ConfigSource source) {
this.id = id;
this.source = source;
}
public byte id() {
return id;
}
public static ConfigSource forId(byte id) {
if (id < 0)
throw new IllegalArgumentException("id should be positive, id: " + id);
if (id >= VALUES.length)
return UNKNOWN;
return VALUES[id];
}
public org.apache.kafka.clients.admin.ConfigEntry.ConfigSource source() {
return source;
}
}
public enum ConfigType {
UNKNOWN((byte) 0, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.UNKNOWN),
BOOLEAN((byte) 1, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.BOOLEAN),
STRING((byte) 2, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.STRING),
INT((byte) 3, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.INT),
SHORT((byte) 4, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.SHORT),
LONG((byte) 5, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.LONG),
DOUBLE((byte) 6, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.DOUBLE),
LIST((byte) 7, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.LIST),
CLASS((byte) 8, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.CLASS),
PASSWORD((byte) 9, org.apache.kafka.clients.admin.ConfigEntry.ConfigType.PASSWORD);
final byte id;
final org.apache.kafka.clients.admin.ConfigEntry.ConfigType type;
private static final ConfigType[] VALUES = values();
ConfigType(byte id, org.apache.kafka.clients.admin.ConfigEntry.ConfigType type) {
this.id = id;
this.type = type;
}
public byte id() {
return id;
}
public static ConfigType forId(byte id) {
if (id < 0)
throw new IllegalArgumentException("id should be positive, id: " + id);
if (id >= VALUES.length)
return UNKNOWN;
return VALUES[id];
}
public org.apache.kafka.clients.admin.ConfigEntry.ConfigType type() {
return type;
}
}
public static class ConfigSynonym {
private final String name;
private final String value;
private final ConfigSource source;
public ConfigSynonym(String name, String value, ConfigSource source) {
this.name = Objects.requireNonNull(name, "name");
this.value = value;
this.source = Objects.requireNonNull(source, "source");
}
public String name() {
return name;
}
public String value() {
return value;
}
public ConfigSource source() {
return source;
}
}
public Map<ConfigResource, DescribeConfigsResponseData.DescribeConfigsResult> resultMap() {
return data().results().stream().collect(Collectors.toMap(
configsResult ->
new ConfigResource(ConfigResource.Type.forId(configsResult.resourceType()),
configsResult.resourceName()),
Function.identity()));
}
private final DescribeConfigsResponseData data;
public DescribeConfigsResponse(DescribeConfigsResponseData data) {
super(ApiKeys.DESCRIBE_CONFIGS);
this.data = data;
}
// This constructor should only be used after deserialization, it has special handling for version 0
private DescribeConfigsResponse(DescribeConfigsResponseData data, short version) {
super(ApiKeys.DESCRIBE_CONFIGS);
this.data = data;
if (version == 0) {
for (DescribeConfigsResponseData.DescribeConfigsResult result : data.results()) {
for (DescribeConfigsResponseData.DescribeConfigsResourceResult config : result.configs()) {
if (config.isDefault()) {
config.setConfigSource(ConfigSource.DEFAULT_CONFIG.id);
} else {
if (result.resourceType() == ConfigResource.Type.BROKER.id()) {
config.setConfigSource(ConfigSource.STATIC_BROKER_CONFIG.id);
} else if (result.resourceType() == ConfigResource.Type.TOPIC.id()) {
config.setConfigSource(ConfigSource.TOPIC_CONFIG.id);
} else {
config.setConfigSource(ConfigSource.UNKNOWN.id);
}
}
}
}
}
}
@Override
public DescribeConfigsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
data.results().forEach(response ->
updateErrorCounts(errorCounts, Errors.forCode(response.errorCode()))
);
return errorCounts;
}
public static DescribeConfigsResponse parse(ByteBuffer buffer, short version) {
return new DescribeConfigsResponse(new DescribeConfigsResponseData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeDelegationTokenRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeDelegationTokenRequestData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.stream.Collectors;
public class DescribeDelegationTokenRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DescribeDelegationTokenRequest> {
private final DescribeDelegationTokenRequestData data;
public Builder(List<KafkaPrincipal> owners) {
super(ApiKeys.DESCRIBE_DELEGATION_TOKEN);
this.data = new DescribeDelegationTokenRequestData()
.setOwners(owners == null ? null : owners
.stream()
.map(owner -> new DescribeDelegationTokenRequestData.DescribeDelegationTokenOwner()
.setPrincipalName(owner.getName())
.setPrincipalType(owner.getPrincipalType()))
.collect(Collectors.toList()));
}
@Override
public DescribeDelegationTokenRequest build(short version) {
return new DescribeDelegationTokenRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DescribeDelegationTokenRequestData data;
public DescribeDelegationTokenRequest(DescribeDelegationTokenRequestData data, short version) {
super(ApiKeys.DESCRIBE_DELEGATION_TOKEN, version);
this.data = data;
}
@Override
public DescribeDelegationTokenRequestData data() {
return data;
}
public boolean ownersListEmpty() {
return data.owners() != null && data.owners().isEmpty();
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new DescribeDelegationTokenResponse(version(), throttleTimeMs, Errors.forException(e));
}
public static DescribeDelegationTokenRequest parse(ByteBuffer buffer, short version) {
return new DescribeDelegationTokenRequest(new DescribeDelegationTokenRequestData(
new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeDelegationTokenResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeDelegationTokenResponseData;
import org.apache.kafka.common.message.DescribeDelegationTokenResponseData.DescribedDelegationToken;
import org.apache.kafka.common.message.DescribeDelegationTokenResponseData.DescribedDelegationTokenRenewer;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.token.delegation.DelegationToken;
import org.apache.kafka.common.security.token.delegation.TokenInformation;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class DescribeDelegationTokenResponse extends AbstractResponse {
private final DescribeDelegationTokenResponseData data;
public DescribeDelegationTokenResponse(int version, int throttleTimeMs, Errors error, List<DelegationToken> tokens) {
super(ApiKeys.DESCRIBE_DELEGATION_TOKEN);
List<DescribedDelegationToken> describedDelegationTokenList = tokens
.stream()
.map(dt -> {
DescribedDelegationToken ddt = new DescribedDelegationToken()
.setTokenId(dt.tokenInfo().tokenId())
.setPrincipalType(dt.tokenInfo().owner().getPrincipalType())
.setPrincipalName(dt.tokenInfo().owner().getName())
.setIssueTimestamp(dt.tokenInfo().issueTimestamp())
.setMaxTimestamp(dt.tokenInfo().maxTimestamp())
.setExpiryTimestamp(dt.tokenInfo().expiryTimestamp())
.setHmac(dt.hmac())
.setRenewers(dt.tokenInfo().renewers()
.stream()
.map(r -> new DescribedDelegationTokenRenewer().setPrincipalName(r.getName()).setPrincipalType(r.getPrincipalType()))
.collect(Collectors.toList()));
if (version > 2) {
ddt.setTokenRequesterPrincipalType(dt.tokenInfo().tokenRequester().getPrincipalType())
.setTokenRequesterPrincipalName(dt.tokenInfo().tokenRequester().getName());
}
return ddt;
})
.collect(Collectors.toList());
this.data = new DescribeDelegationTokenResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code())
.setTokens(describedDelegationTokenList);
}
public DescribeDelegationTokenResponse(int version, int throttleTimeMs, Errors error) {
this(version, throttleTimeMs, error, new ArrayList<>());
}
public DescribeDelegationTokenResponse(DescribeDelegationTokenResponseData data) {
super(ApiKeys.DESCRIBE_DELEGATION_TOKEN);
this.data = data;
}
public static DescribeDelegationTokenResponse parse(ByteBuffer buffer, short version) {
return new DescribeDelegationTokenResponse(new DescribeDelegationTokenResponseData(
new ByteBufferAccessor(buffer), version));
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
@Override
public DescribeDelegationTokenResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
public List<DelegationToken> tokens() {
return data.tokens()
.stream()
.map(ddt -> new DelegationToken(new TokenInformation(
ddt.tokenId(),
new KafkaPrincipal(ddt.principalType(), ddt.principalName()),
new KafkaPrincipal(ddt.tokenRequesterPrincipalType(), ddt.tokenRequesterPrincipalName()),
ddt.renewers()
.stream()
.map(ddtr -> new KafkaPrincipal(ddtr.principalType(), ddtr.principalName()))
.collect(Collectors.toList()), ddt.issueTimestamp(), ddt.maxTimestamp(), ddt.expiryTimestamp()),
ddt.hmac()))
.collect(Collectors.toList());
}
public boolean hasError() {
return error() != Errors.NONE;
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeGroupsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeGroupsRequestData;
import org.apache.kafka.common.message.DescribeGroupsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class DescribeGroupsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DescribeGroupsRequest> {
private final DescribeGroupsRequestData data;
public Builder(DescribeGroupsRequestData data) {
super(ApiKeys.DESCRIBE_GROUPS);
this.data = data;
}
@Override
public DescribeGroupsRequest build(short version) {
return new DescribeGroupsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DescribeGroupsRequestData data;
private DescribeGroupsRequest(DescribeGroupsRequestData data, short version) {
super(ApiKeys.DESCRIBE_GROUPS, version);
this.data = data;
}
@Override
public DescribeGroupsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
DescribeGroupsResponseData describeGroupsResponseData = new DescribeGroupsResponseData();
data.groups().forEach(groupId ->
describeGroupsResponseData.groups().add(DescribeGroupsResponse.groupError(groupId, error))
);
if (version() >= 1) {
describeGroupsResponseData.setThrottleTimeMs(throttleTimeMs);
}
return new DescribeGroupsResponse(describeGroupsResponseData);
}
public static DescribeGroupsRequest parse(ByteBuffer buffer, short version) {
return new DescribeGroupsRequest(new DescribeGroupsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeGroupsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeGroupsResponseData;
import org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroup;
import org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroupMember;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.utils.Utils;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class DescribeGroupsResponse extends AbstractResponse {
public static final int AUTHORIZED_OPERATIONS_OMITTED = Integer.MIN_VALUE;
/**
* Possible per-group error codes:
*
* COORDINATOR_LOAD_IN_PROGRESS (14)
* COORDINATOR_NOT_AVAILABLE (15)
* NOT_COORDINATOR (16)
* AUTHORIZATION_FAILED (29)
*/
private final DescribeGroupsResponseData data;
public DescribeGroupsResponse(DescribeGroupsResponseData data) {
super(ApiKeys.DESCRIBE_GROUPS);
this.data = data;
}
public static DescribedGroupMember groupMember(
final String memberId,
final String groupInstanceId,
final String clientId,
final String clientHost,
final byte[] assignment,
final byte[] metadata) {
return new DescribedGroupMember()
.setMemberId(memberId)
.setGroupInstanceId(groupInstanceId)
.setClientId(clientId)
.setClientHost(clientHost)
.setMemberAssignment(assignment)
.setMemberMetadata(metadata);
}
public static DescribedGroup groupMetadata(
final String groupId,
final Errors error,
final String state,
final String protocolType,
final String protocol,
final List<DescribedGroupMember> members,
final Set<Byte> authorizedOperations) {
DescribedGroup groupMetadata = new DescribedGroup();
groupMetadata.setGroupId(groupId)
.setErrorCode(error.code())
.setGroupState(state)
.setProtocolType(protocolType)
.setProtocolData(protocol)
.setMembers(members)
.setAuthorizedOperations(Utils.to32BitField(authorizedOperations));
return groupMetadata;
}
public static DescribedGroup groupMetadata(
final String groupId,
final Errors error,
final String state,
final String protocolType,
final String protocol,
final List<DescribedGroupMember> members,
final int authorizedOperations
) {
return new DescribedGroup()
.setGroupId(groupId)
.setErrorCode(error.code())
.setGroupState(state)
.setProtocolType(protocolType)
.setProtocolData(protocol)
.setMembers(members)
.setAuthorizedOperations(authorizedOperations);
}
public static DescribedGroup groupError(String groupId, Errors error) {
return groupMetadata(groupId, error, DescribeGroupsResponse.UNKNOWN_STATE, DescribeGroupsResponse.UNKNOWN_PROTOCOL_TYPE,
DescribeGroupsResponse.UNKNOWN_PROTOCOL, Collections.emptyList(), AUTHORIZED_OPERATIONS_OMITTED);
}
@Override
public DescribeGroupsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public static final String UNKNOWN_STATE = "";
public static final String UNKNOWN_PROTOCOL_TYPE = "";
public static final String UNKNOWN_PROTOCOL = "";
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
data.groups().forEach(describedGroup ->
updateErrorCounts(errorCounts, Errors.forCode(describedGroup.errorCode())));
return errorCounts;
}
public static DescribeGroupsResponse parse(ByteBuffer buffer, short version) {
return new DescribeGroupsResponse(new DescribeGroupsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeLogDirsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeLogDirsRequestData;
import org.apache.kafka.common.message.DescribeLogDirsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class DescribeLogDirsRequest extends AbstractRequest {
private final DescribeLogDirsRequestData data;
public static class Builder extends AbstractRequest.Builder<DescribeLogDirsRequest> {
private final DescribeLogDirsRequestData data;
public Builder(DescribeLogDirsRequestData data) {
super(ApiKeys.DESCRIBE_LOG_DIRS);
this.data = data;
}
@Override
public DescribeLogDirsRequest build(short version) {
return new DescribeLogDirsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
public DescribeLogDirsRequest(DescribeLogDirsRequestData data, short version) {
super(ApiKeys.DESCRIBE_LOG_DIRS, version);
this.data = data;
}
@Override
public DescribeLogDirsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new DescribeLogDirsResponse(new DescribeLogDirsResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(Errors.forException(e).code()));
}
public boolean isAllTopicPartitions() {
return data.topics() == null;
}
public static DescribeLogDirsRequest parse(ByteBuffer buffer, short version) {
return new DescribeLogDirsRequest(new DescribeLogDirsRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeLogDirsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.DescribeLogDirsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class DescribeLogDirsResponse extends AbstractResponse {
public static final long INVALID_OFFSET_LAG = -1L;
public static final long UNKNOWN_VOLUME_BYTES = -1L;
private final DescribeLogDirsResponseData data;
public DescribeLogDirsResponse(DescribeLogDirsResponseData data) {
super(ApiKeys.DESCRIBE_LOG_DIRS);
this.data = data;
}
@Override
public DescribeLogDirsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
errorCounts.put(Errors.forCode(data.errorCode()), 1);
data.results().forEach(result -> {
updateErrorCounts(errorCounts, Errors.forCode(result.errorCode()));
});
return errorCounts;
}
public static DescribeLogDirsResponse parse(ByteBuffer buffer, short version) {
return new DescribeLogDirsResponse(new DescribeLogDirsResponseData(new ByteBufferAccessor(buffer), version));
}
// Note this class is part of the public API, reachable from Admin.describeLogDirs()
/**
* Possible error code:
*
* KAFKA_STORAGE_ERROR (56)
* UNKNOWN (-1)
*
* @deprecated Deprecated Since Kafka 2.7.
* Use {@link org.apache.kafka.clients.admin.DescribeLogDirsResult#descriptions()}
* and {@link org.apache.kafka.clients.admin.DescribeLogDirsResult#allDescriptions()} to access the replacement
* class {@link org.apache.kafka.clients.admin.LogDirDescription}.
*/
@Deprecated
static public class LogDirInfo {
public final Errors error;
public final Map<TopicPartition, ReplicaInfo> replicaInfos;
public LogDirInfo(Errors error, Map<TopicPartition, ReplicaInfo> replicaInfos) {
this.error = error;
this.replicaInfos = replicaInfos;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("(error=")
.append(error)
.append(", replicas=")
.append(replicaInfos)
.append(")");
return builder.toString();
}
}
// Note this class is part of the public API, reachable from Admin.describeLogDirs()
/**
* @deprecated Deprecated Since Kafka 2.7.
* Use {@link org.apache.kafka.clients.admin.DescribeLogDirsResult#descriptions()}
* and {@link org.apache.kafka.clients.admin.DescribeLogDirsResult#allDescriptions()} to access the replacement
* class {@link org.apache.kafka.clients.admin.ReplicaInfo}.
*/
@Deprecated
static public class ReplicaInfo {
public final long size;
public final long offsetLag;
public final boolean isFuture;
public ReplicaInfo(long size, long offsetLag, boolean isFuture) {
this.size = size;
this.offsetLag = offsetLag;
this.isFuture = isFuture;
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("(size=")
.append(size)
.append(", offsetLag=")
.append(offsetLag)
.append(", isFuture=")
.append(isFuture)
.append(")");
return builder.toString();
}
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeProducersRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeProducersRequestData;
import org.apache.kafka.common.message.DescribeProducersRequestData.TopicRequest;
import org.apache.kafka.common.message.DescribeProducersResponseData;
import org.apache.kafka.common.message.DescribeProducersResponseData.PartitionResponse;
import org.apache.kafka.common.message.DescribeProducersResponseData.TopicResponse;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class DescribeProducersRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DescribeProducersRequest> {
public final DescribeProducersRequestData data;
public Builder(DescribeProducersRequestData data) {
super(ApiKeys.DESCRIBE_PRODUCERS);
this.data = data;
}
public DescribeProducersRequestData.TopicRequest addTopic(String topic) {
DescribeProducersRequestData.TopicRequest topicRequest =
new DescribeProducersRequestData.TopicRequest().setName(topic);
data.topics().add(topicRequest);
return topicRequest;
}
@Override
public DescribeProducersRequest build(short version) {
return new DescribeProducersRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DescribeProducersRequestData data;
private DescribeProducersRequest(DescribeProducersRequestData data, short version) {
super(ApiKeys.DESCRIBE_PRODUCERS, version);
this.data = data;
}
@Override
public DescribeProducersRequestData data() {
return data;
}
@Override
public DescribeProducersResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
DescribeProducersResponseData response = new DescribeProducersResponseData();
for (TopicRequest topicRequest : data.topics()) {
TopicResponse topicResponse = new TopicResponse()
.setName(topicRequest.name());
for (int partitionId : topicRequest.partitionIndexes()) {
topicResponse.partitions().add(
new PartitionResponse()
.setPartitionIndex(partitionId)
.setErrorCode(error.code())
);
}
response.topics().add(topicResponse);
}
return new DescribeProducersResponse(response);
}
public static DescribeProducersRequest parse(ByteBuffer buffer, short version) {
return new DescribeProducersRequest(new DescribeProducersRequestData(
new ByteBufferAccessor(buffer), version), version);
}
@Override
public String toString(boolean verbose) {
return data.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeProducersResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeProducersResponseData;
import org.apache.kafka.common.message.DescribeProducersResponseData.PartitionResponse;
import org.apache.kafka.common.message.DescribeProducersResponseData.TopicResponse;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class DescribeProducersResponse extends AbstractResponse {
private final DescribeProducersResponseData data;
public DescribeProducersResponse(DescribeProducersResponseData data) {
super(ApiKeys.DESCRIBE_PRODUCERS);
this.data = data;
}
@Override
public DescribeProducersResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
for (TopicResponse topicResponse : data.topics()) {
for (PartitionResponse partitionResponse : topicResponse.partitions()) {
updateErrorCounts(errorCounts, Errors.forCode(partitionResponse.errorCode()));
}
}
return errorCounts;
}
public static DescribeProducersResponse parse(ByteBuffer buffer, short version) {
return new DescribeProducersResponse(new DescribeProducersResponseData(
new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeQuorumRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.DescribeQuorumRequestData;
import org.apache.kafka.common.message.DescribeQuorumResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
public class DescribeQuorumRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DescribeQuorumRequest> {
private final DescribeQuorumRequestData data;
public Builder(DescribeQuorumRequestData data) {
super(ApiKeys.DESCRIBE_QUORUM);
this.data = data;
}
@Override
public DescribeQuorumRequest build(short version) {
return new DescribeQuorumRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DescribeQuorumRequestData data;
private DescribeQuorumRequest(DescribeQuorumRequestData data, short version) {
super(ApiKeys.DESCRIBE_QUORUM, version);
this.data = data;
}
public static DescribeQuorumRequest parse(ByteBuffer buffer, short version) {
return new DescribeQuorumRequest(new DescribeQuorumRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static DescribeQuorumRequestData singletonRequest(TopicPartition topicPartition) {
return new DescribeQuorumRequestData()
.setTopics(Collections.singletonList(
new DescribeQuorumRequestData.TopicData()
.setTopicName(topicPartition.topic())
.setPartitions(Collections.singletonList(
new DescribeQuorumRequestData.PartitionData()
.setPartitionIndex(topicPartition.partition()))
)));
}
@Override
public DescribeQuorumRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new DescribeQuorumResponse(getTopLevelErrorResponse(Errors.forException(e)));
}
public static DescribeQuorumResponseData getPartitionLevelErrorResponse(DescribeQuorumRequestData data, Errors error) {
short errorCode = error.code();
List<DescribeQuorumResponseData.TopicData> topicResponses = new ArrayList<>();
for (DescribeQuorumRequestData.TopicData topic : data.topics()) {
topicResponses.add(
new DescribeQuorumResponseData.TopicData()
.setTopicName(topic.topicName())
.setPartitions(topic.partitions().stream().map(
requestPartition -> new DescribeQuorumResponseData.PartitionData()
.setPartitionIndex(requestPartition.partitionIndex())
.setErrorCode(errorCode)
).collect(Collectors.toList())));
}
return new DescribeQuorumResponseData().setTopics(topicResponses);
}
public static DescribeQuorumResponseData getTopLevelErrorResponse(Errors topLevelError) {
return new DescribeQuorumResponseData().setErrorCode(topLevelError.code());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeQuorumResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.DescribeQuorumResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* Possible error codes.
*
* Top level errors:
* - {@link Errors#CLUSTER_AUTHORIZATION_FAILED}
* - {@link Errors#BROKER_NOT_AVAILABLE}
*
* Partition level errors:
* - {@link Errors#NOT_LEADER_OR_FOLLOWER}
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION}
*/
public class DescribeQuorumResponse extends AbstractResponse {
private final DescribeQuorumResponseData data;
public DescribeQuorumResponse(DescribeQuorumResponseData data) {
super(ApiKeys.DESCRIBE_QUORUM);
this.data = data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errors = new HashMap<>();
errors.put(Errors.forCode(data.errorCode()), 1);
for (DescribeQuorumResponseData.TopicData topicResponse : data.topics()) {
for (DescribeQuorumResponseData.PartitionData partitionResponse : topicResponse.partitions()) {
updateErrorCounts(errors, Errors.forCode(partitionResponse.errorCode()));
}
}
return errors;
}
@Override
public DescribeQuorumResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
public static DescribeQuorumResponseData singletonErrorResponse(
TopicPartition topicPartition,
Errors error
) {
return new DescribeQuorumResponseData()
.setTopics(Collections.singletonList(new DescribeQuorumResponseData.TopicData()
.setTopicName(topicPartition.topic())
.setPartitions(Collections.singletonList(new DescribeQuorumResponseData.PartitionData()
.setPartitionIndex(topicPartition.partition())
.setErrorCode(error.code())))));
}
public static DescribeQuorumResponseData singletonResponse(
TopicPartition topicPartition,
DescribeQuorumResponseData.PartitionData partitionData
) {
return new DescribeQuorumResponseData()
.setTopics(Collections.singletonList(new DescribeQuorumResponseData.TopicData()
.setTopicName(topicPartition.topic())
.setPartitions(Collections.singletonList(partitionData
.setPartitionIndex(topicPartition.partition())))));
}
public static DescribeQuorumResponse parse(ByteBuffer buffer, short version) {
return new DescribeQuorumResponse(new DescribeQuorumResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeTransactionsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeTransactionsRequestData;
import org.apache.kafka.common.message.DescribeTransactionsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class DescribeTransactionsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DescribeTransactionsRequest> {
public final DescribeTransactionsRequestData data;
public Builder(DescribeTransactionsRequestData data) {
super(ApiKeys.DESCRIBE_TRANSACTIONS);
this.data = data;
}
@Override
public DescribeTransactionsRequest build(short version) {
return new DescribeTransactionsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DescribeTransactionsRequestData data;
private DescribeTransactionsRequest(DescribeTransactionsRequestData data, short version) {
super(ApiKeys.DESCRIBE_TRANSACTIONS, version);
this.data = data;
}
@Override
public DescribeTransactionsRequestData data() {
return data;
}
@Override
public DescribeTransactionsResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
DescribeTransactionsResponseData response = new DescribeTransactionsResponseData()
.setThrottleTimeMs(throttleTimeMs);
for (String transactionalId : data.transactionalIds()) {
DescribeTransactionsResponseData.TransactionState transactionState =
new DescribeTransactionsResponseData.TransactionState()
.setTransactionalId(transactionalId)
.setErrorCode(error.code());
response.transactionStates().add(transactionState);
}
return new DescribeTransactionsResponse(response);
}
public static DescribeTransactionsRequest parse(ByteBuffer buffer, short version) {
return new DescribeTransactionsRequest(new DescribeTransactionsRequestData(
new ByteBufferAccessor(buffer), version), version);
}
@Override
public String toString(boolean verbose) {
return data.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeTransactionsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeTransactionsResponseData;
import org.apache.kafka.common.message.DescribeTransactionsResponseData.TransactionState;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class DescribeTransactionsResponse extends AbstractResponse {
private final DescribeTransactionsResponseData data;
public DescribeTransactionsResponse(DescribeTransactionsResponseData data) {
super(ApiKeys.DESCRIBE_TRANSACTIONS);
this.data = data;
}
@Override
public DescribeTransactionsResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
for (TransactionState transactionState : data.transactionStates()) {
Errors error = Errors.forCode(transactionState.errorCode());
updateErrorCounts(errorCounts, error);
}
return errorCounts;
}
public static DescribeTransactionsResponse parse(ByteBuffer buffer, short version) {
return new DescribeTransactionsResponse(new DescribeTransactionsResponseData(
new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeUserScramCredentialsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeUserScramCredentialsRequestData;
import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
public class DescribeUserScramCredentialsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<DescribeUserScramCredentialsRequest> {
private final DescribeUserScramCredentialsRequestData data;
public Builder(DescribeUserScramCredentialsRequestData data) {
super(ApiKeys.DESCRIBE_USER_SCRAM_CREDENTIALS);
this.data = data;
}
@Override
public DescribeUserScramCredentialsRequest build(short version) {
return new DescribeUserScramCredentialsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final DescribeUserScramCredentialsRequestData data;
private final short version;
private DescribeUserScramCredentialsRequest(DescribeUserScramCredentialsRequestData data, short version) {
super(ApiKeys.DESCRIBE_USER_SCRAM_CREDENTIALS, version);
this.data = data;
this.version = version;
}
public static DescribeUserScramCredentialsRequest parse(ByteBuffer buffer, short version) {
return new DescribeUserScramCredentialsRequest(new DescribeUserScramCredentialsRequestData(
new ByteBufferAccessor(buffer), version), version);
}
@Override
public DescribeUserScramCredentialsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ApiError apiError = ApiError.fromThrowable(e);
DescribeUserScramCredentialsResponseData response = new DescribeUserScramCredentialsResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message());
for (DescribeUserScramCredentialsRequestData.UserName user : data.users()) {
response.results().add(new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult()
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message()));
}
return new DescribeUserScramCredentialsResponse(response);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/DescribeUserScramCredentialsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
public class DescribeUserScramCredentialsResponse extends AbstractResponse {
private final DescribeUserScramCredentialsResponseData data;
public DescribeUserScramCredentialsResponse(DescribeUserScramCredentialsResponseData responseData) {
super(ApiKeys.DESCRIBE_USER_SCRAM_CREDENTIALS);
this.data = responseData;
}
@Override
public DescribeUserScramCredentialsResponseData data() {
return data;
}
@Override
public boolean shouldClientThrottle(short version) {
return true;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(data.results().stream().map(r -> Errors.forCode(r.errorCode())));
}
public static DescribeUserScramCredentialsResponse parse(ByteBuffer buffer, short version) {
return new DescribeUserScramCredentialsResponse(new DescribeUserScramCredentialsResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ElectLeadersRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.ElectLeadersRequestData.TopicPartitions;
import org.apache.kafka.common.message.ElectLeadersRequestData;
import org.apache.kafka.common.message.ElectLeadersResponseData.PartitionResult;
import org.apache.kafka.common.message.ElectLeadersResponseData.ReplicaElectionResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.MessageUtil;
public class ElectLeadersRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<ElectLeadersRequest> {
private final ElectionType electionType;
private final Collection<TopicPartition> topicPartitions;
private final int timeoutMs;
public Builder(ElectionType electionType, Collection<TopicPartition> topicPartitions, int timeoutMs) {
super(ApiKeys.ELECT_LEADERS);
this.electionType = electionType;
this.topicPartitions = topicPartitions;
this.timeoutMs = timeoutMs;
}
@Override
public ElectLeadersRequest build(short version) {
return new ElectLeadersRequest(toRequestData(version), version);
}
@Override
public String toString() {
return "ElectLeadersRequest("
+ "electionType=" + electionType
+ ", topicPartitions=" + ((topicPartitions == null) ? "null" : MessageUtil.deepToString(topicPartitions.iterator()))
+ ", timeoutMs=" + timeoutMs
+ ")";
}
private ElectLeadersRequestData toRequestData(short version) {
if (electionType != ElectionType.PREFERRED && version == 0) {
throw new UnsupportedVersionException("API Version 0 only supports PREFERRED election type");
}
ElectLeadersRequestData data = new ElectLeadersRequestData()
.setTimeoutMs(timeoutMs);
if (topicPartitions != null) {
topicPartitions.forEach(tp -> {
ElectLeadersRequestData.TopicPartitions tps = data.topicPartitions().find(tp.topic());
if (tps == null) {
tps = new ElectLeadersRequestData.TopicPartitions().setTopic(tp.topic());
data.topicPartitions().add(tps);
}
tps.partitions().add(tp.partition());
});
} else {
data.setTopicPartitions(null);
}
data.setElectionType(electionType.value);
return data;
}
}
private final ElectLeadersRequestData data;
private ElectLeadersRequest(ElectLeadersRequestData data, short version) {
super(ApiKeys.ELECT_LEADERS, version);
this.data = data;
}
@Override
public ElectLeadersRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ApiError apiError = ApiError.fromThrowable(e);
List<ReplicaElectionResult> electionResults = new ArrayList<>();
if (data.topicPartitions() != null) {
for (TopicPartitions topic : data.topicPartitions()) {
ReplicaElectionResult electionResult = new ReplicaElectionResult();
electionResult.setTopic(topic.topic());
for (Integer partitionId : topic.partitions()) {
PartitionResult partitionResult = new PartitionResult();
partitionResult.setPartitionId(partitionId);
partitionResult.setErrorCode(apiError.error().code());
partitionResult.setErrorMessage(apiError.message());
electionResult.partitionResult().add(partitionResult);
}
electionResults.add(electionResult);
}
}
return new ElectLeadersResponse(throttleTimeMs, apiError.error().code(), electionResults, version());
}
public static ElectLeadersRequest parse(ByteBuffer buffer, short version) {
return new ElectLeadersRequest(new ElectLeadersRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ElectLeadersResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.ElectLeadersResponseData;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.message.ElectLeadersResponseData.ReplicaElectionResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
public class ElectLeadersResponse extends AbstractResponse {
private final ElectLeadersResponseData data;
public ElectLeadersResponse(ElectLeadersResponseData data) {
super(ApiKeys.ELECT_LEADERS);
this.data = data;
}
public ElectLeadersResponse(
int throttleTimeMs,
short errorCode,
List<ReplicaElectionResult> electionResults,
short version) {
super(ApiKeys.ELECT_LEADERS);
this.data = new ElectLeadersResponseData();
data.setThrottleTimeMs(throttleTimeMs);
if (version >= 1)
data.setErrorCode(errorCode);
data.setReplicaElectionResults(electionResults);
}
@Override
public ElectLeadersResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
HashMap<Errors, Integer> counts = new HashMap<>();
updateErrorCounts(counts, Errors.forCode(data.errorCode()));
data.replicaElectionResults().forEach(result ->
result.partitionResult().forEach(partitionResult ->
updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode()))
)
);
return counts;
}
public static ElectLeadersResponse parse(ByteBuffer buffer, short version) {
return new ElectLeadersResponse(new ElectLeadersResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return true;
}
public static Map<TopicPartition, Optional<Throwable>> electLeadersResult(ElectLeadersResponseData data) {
Map<TopicPartition, Optional<Throwable>> map = new HashMap<>();
for (ElectLeadersResponseData.ReplicaElectionResult topicResults : data.replicaElectionResults()) {
for (ElectLeadersResponseData.PartitionResult partitionResult : topicResults.partitionResult()) {
Optional<Throwable> value = Optional.empty();
Errors error = Errors.forCode(partitionResult.errorCode());
if (error != Errors.NONE) {
value = Optional.of(error.exception(partitionResult.errorMessage()));
}
map.put(new TopicPartition(topicResults.topic(), partitionResult.partitionId()),
value);
}
}
return map;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/EndQuorumEpochRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.EndQuorumEpochRequestData;
import org.apache.kafka.common.message.EndQuorumEpochResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
public class EndQuorumEpochRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<EndQuorumEpochRequest> {
private final EndQuorumEpochRequestData data;
public Builder(EndQuorumEpochRequestData data) {
super(ApiKeys.END_QUORUM_EPOCH);
this.data = data;
}
@Override
public EndQuorumEpochRequest build(short version) {
return new EndQuorumEpochRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final EndQuorumEpochRequestData data;
private EndQuorumEpochRequest(EndQuorumEpochRequestData data, short version) {
super(ApiKeys.END_QUORUM_EPOCH, version);
this.data = data;
}
@Override
public EndQuorumEpochRequestData data() {
return data;
}
@Override
public EndQuorumEpochResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new EndQuorumEpochResponse(new EndQuorumEpochResponseData()
.setErrorCode(Errors.forException(e).code()));
}
public static EndQuorumEpochRequest parse(ByteBuffer buffer, short version) {
return new EndQuorumEpochRequest(new EndQuorumEpochRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static EndQuorumEpochRequestData singletonRequest(TopicPartition topicPartition,
int leaderEpoch,
int leaderId,
List<Integer> preferredSuccessors) {
return singletonRequest(topicPartition, null, leaderEpoch, leaderId, preferredSuccessors);
}
public static EndQuorumEpochRequestData singletonRequest(TopicPartition topicPartition,
String clusterId,
int leaderEpoch,
int leaderId,
List<Integer> preferredSuccessors) {
return new EndQuorumEpochRequestData()
.setClusterId(clusterId)
.setTopics(Collections.singletonList(
new EndQuorumEpochRequestData.TopicData()
.setTopicName(topicPartition.topic())
.setPartitions(Collections.singletonList(
new EndQuorumEpochRequestData.PartitionData()
.setPartitionIndex(topicPartition.partition())
.setLeaderEpoch(leaderEpoch)
.setLeaderId(leaderId)
.setPreferredSuccessors(preferredSuccessors))))
);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/EndQuorumEpochResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.EndQuorumEpochResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* Possible error codes.
*
* Top level errors:
* - {@link Errors#CLUSTER_AUTHORIZATION_FAILED}
* - {@link Errors#BROKER_NOT_AVAILABLE}
*
* Partition level errors:
* - {@link Errors#FENCED_LEADER_EPOCH}
* - {@link Errors#INVALID_REQUEST}
* - {@link Errors#INCONSISTENT_VOTER_SET}
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION}
*/
public class EndQuorumEpochResponse extends AbstractResponse {
private final EndQuorumEpochResponseData data;
public EndQuorumEpochResponse(EndQuorumEpochResponseData data) {
super(ApiKeys.END_QUORUM_EPOCH);
this.data = data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errors = new HashMap<>();
errors.put(Errors.forCode(data.errorCode()), 1);
for (EndQuorumEpochResponseData.TopicData topicResponse : data.topics()) {
for (EndQuorumEpochResponseData.PartitionData partitionResponse : topicResponse.partitions()) {
updateErrorCounts(errors, Errors.forCode(partitionResponse.errorCode()));
}
}
return errors;
}
@Override
public EndQuorumEpochResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
public static EndQuorumEpochResponseData singletonResponse(
Errors topLevelError,
TopicPartition topicPartition,
Errors partitionLevelError,
int leaderEpoch,
int leaderId
) {
return new EndQuorumEpochResponseData()
.setErrorCode(topLevelError.code())
.setTopics(Collections.singletonList(
new EndQuorumEpochResponseData.TopicData()
.setTopicName(topicPartition.topic())
.setPartitions(Collections.singletonList(
new EndQuorumEpochResponseData.PartitionData()
.setErrorCode(partitionLevelError.code())
.setLeaderId(leaderId)
.setLeaderEpoch(leaderEpoch)
)))
);
}
public static EndQuorumEpochResponse parse(ByteBuffer buffer, short version) {
return new EndQuorumEpochResponse(new EndQuorumEpochResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/EndTxnRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.EndTxnRequestData;
import org.apache.kafka.common.message.EndTxnResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class EndTxnRequest extends AbstractRequest {
private final EndTxnRequestData data;
public static class Builder extends AbstractRequest.Builder<EndTxnRequest> {
public final EndTxnRequestData data;
public Builder(EndTxnRequestData data) {
super(ApiKeys.END_TXN);
this.data = data;
}
@Override
public EndTxnRequest build(short version) {
return new EndTxnRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private EndTxnRequest(EndTxnRequestData data, short version) {
super(ApiKeys.END_TXN, version);
this.data = data;
}
public TransactionResult result() {
if (data.committed())
return TransactionResult.COMMIT;
else
return TransactionResult.ABORT;
}
@Override
public EndTxnRequestData data() {
return data;
}
@Override
public EndTxnResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new EndTxnResponse(new EndTxnResponseData()
.setErrorCode(Errors.forException(e).code())
.setThrottleTimeMs(throttleTimeMs)
);
}
public static EndTxnRequest parse(ByteBuffer buffer, short version) {
return new EndTxnRequest(new EndTxnRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/EndTxnResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.EndTxnResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
/**
* Possible error codes:
*
* - {@link Errors#NOT_COORDINATOR}
* - {@link Errors#COORDINATOR_NOT_AVAILABLE}
* - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS}
* - {@link Errors#INVALID_TXN_STATE}
* - {@link Errors#INVALID_PRODUCER_ID_MAPPING}
* - {@link Errors#INVALID_PRODUCER_EPOCH} // for version <=1
* - {@link Errors#PRODUCER_FENCED}
* - {@link Errors#TRANSACTIONAL_ID_AUTHORIZATION_FAILED}
*/
public class EndTxnResponse extends AbstractResponse {
private final EndTxnResponseData data;
public EndTxnResponse(EndTxnResponseData data) {
super(ApiKeys.END_TXN);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
@Override
public EndTxnResponseData data() {
return data;
}
public static EndTxnResponse parse(ByteBuffer buffer, short version) {
return new EndTxnResponse(new EndTxnResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/EnvelopeRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.EnvelopeRequestData;
import org.apache.kafka.common.message.EnvelopeResponseData;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class EnvelopeRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<EnvelopeRequest> {
private final EnvelopeRequestData data;
public Builder(ByteBuffer requestData,
byte[] serializedPrincipal,
byte[] clientAddress) {
super(ApiKeys.ENVELOPE);
this.data = new EnvelopeRequestData()
.setRequestData(requestData)
.setRequestPrincipal(serializedPrincipal)
.setClientHostAddress(clientAddress);
}
@Override
public EnvelopeRequest build(short version) {
return new EnvelopeRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final EnvelopeRequestData data;
public EnvelopeRequest(EnvelopeRequestData data, short version) {
super(ApiKeys.ENVELOPE, version);
this.data = data;
}
public ByteBuffer requestData() {
return data.requestData();
}
public byte[] clientAddress() {
return data.clientHostAddress();
}
public byte[] requestPrincipal() {
return data.requestPrincipal();
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new EnvelopeResponse(new EnvelopeResponseData()
.setErrorCode(Errors.forException(e).code()));
}
public static EnvelopeRequest parse(ByteBuffer buffer, short version) {
return new EnvelopeRequest(new EnvelopeRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public EnvelopeRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/EnvelopeResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.EnvelopeResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
public class EnvelopeResponse extends AbstractResponse {
private final EnvelopeResponseData data;
public EnvelopeResponse(ByteBuffer responseData, Errors error) {
super(ApiKeys.ENVELOPE);
this.data = new EnvelopeResponseData()
.setResponseData(responseData)
.setErrorCode(error.code());
}
public EnvelopeResponse(Errors error) {
this(null, error);
}
public EnvelopeResponse(EnvelopeResponseData data) {
super(ApiKeys.ENVELOPE);
this.data = data;
}
public ByteBuffer responseData() {
return data.responseData();
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public EnvelopeResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
public static EnvelopeResponse parse(ByteBuffer buffer, short version) {
return new EnvelopeResponse(new EnvelopeResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ExpireDelegationTokenRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import org.apache.kafka.common.message.ExpireDelegationTokenRequestData;
import org.apache.kafka.common.message.ExpireDelegationTokenResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
public class ExpireDelegationTokenRequest extends AbstractRequest {
private final ExpireDelegationTokenRequestData data;
private ExpireDelegationTokenRequest(ExpireDelegationTokenRequestData data, short version) {
super(ApiKeys.EXPIRE_DELEGATION_TOKEN, version);
this.data = data;
}
public static ExpireDelegationTokenRequest parse(ByteBuffer buffer, short version) {
return new ExpireDelegationTokenRequest(
new ExpireDelegationTokenRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public ExpireDelegationTokenRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new ExpireDelegationTokenResponse(
new ExpireDelegationTokenResponseData()
.setErrorCode(Errors.forException(e).code())
.setThrottleTimeMs(throttleTimeMs));
}
public ByteBuffer hmac() {
return ByteBuffer.wrap(data.hmac());
}
public long expiryTimePeriod() {
return data.expiryTimePeriodMs();
}
public static class Builder extends AbstractRequest.Builder<ExpireDelegationTokenRequest> {
private final ExpireDelegationTokenRequestData data;
public Builder(ExpireDelegationTokenRequestData data) {
super(ApiKeys.EXPIRE_DELEGATION_TOKEN);
this.data = data;
}
@Override
public ExpireDelegationTokenRequest build(short version) {
return new ExpireDelegationTokenRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ExpireDelegationTokenResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import java.util.Map;
import org.apache.kafka.common.message.ExpireDelegationTokenResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
public class ExpireDelegationTokenResponse extends AbstractResponse {
private final ExpireDelegationTokenResponseData data;
public ExpireDelegationTokenResponse(ExpireDelegationTokenResponseData data) {
super(ApiKeys.EXPIRE_DELEGATION_TOKEN);
this.data = data;
}
public static ExpireDelegationTokenResponse parse(ByteBuffer buffer, short version) {
return new ExpireDelegationTokenResponse(new ExpireDelegationTokenResponseData(new ByteBufferAccessor(buffer),
version));
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
public long expiryTimestamp() {
return data.expiryTimestampMs();
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
@Override
public ExpireDelegationTokenResponseData data() {
return data;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
public boolean hasError() {
return error() != Errors.NONE;
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/FetchMetadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Objects;
public class FetchMetadata {
public static final Logger log = LoggerFactory.getLogger(FetchMetadata.class);
/**
* The session ID used by clients with no session.
*/
public static final int INVALID_SESSION_ID = 0;
/**
* The first epoch. When used in a fetch request, indicates that the client
* wants to create or recreate a session.
*/
public static final int INITIAL_EPOCH = 0;
/**
* An invalid epoch. When used in a fetch request, indicates that the client
* wants to close any existing session, and not create a new one.
*/
public static final int FINAL_EPOCH = -1;
/**
* The FetchMetadata that is used when initializing a new FetchSessionHandler.
*/
public static final FetchMetadata INITIAL = new FetchMetadata(INVALID_SESSION_ID, INITIAL_EPOCH);
/**
* The FetchMetadata that is implicitly used for handling older FetchRequests that
* don't include fetch metadata.
*/
public static final FetchMetadata LEGACY = new FetchMetadata(INVALID_SESSION_ID, FINAL_EPOCH);
/**
* Returns the next epoch.
*
* @param prevEpoch The previous epoch.
* @return The next epoch.
*/
public static int nextEpoch(int prevEpoch) {
if (prevEpoch < 0) {
// The next epoch after FINAL_EPOCH is always FINAL_EPOCH itself.
return FINAL_EPOCH;
} else if (prevEpoch == Integer.MAX_VALUE) {
return 1;
} else {
return prevEpoch + 1;
}
}
/**
* The fetch session ID.
*/
private final int sessionId;
/**
* The fetch session epoch.
*/
private final int epoch;
public FetchMetadata(int sessionId, int epoch) {
this.sessionId = sessionId;
this.epoch = epoch;
}
/**
* Returns true if this is a full fetch request.
*/
public boolean isFull() {
return (this.epoch == INITIAL_EPOCH) || (this.epoch == FINAL_EPOCH);
}
public int sessionId() {
return sessionId;
}
public int epoch() {
return epoch;
}
@Override
public int hashCode() {
return Objects.hash(sessionId, epoch);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
FetchMetadata that = (FetchMetadata) o;
return sessionId == that.sessionId && epoch == that.epoch;
}
/**
* Return the metadata for the next request. The metadata is set to indicate that the client wants to close the
* existing session.
*/
public FetchMetadata nextCloseExisting() {
return new FetchMetadata(sessionId, FINAL_EPOCH);
}
/**
* Return the metadata for the next request. The metadata is set to indicate that the client wants to close the
* existing session and create a new one if possible.
*/
public FetchMetadata nextCloseExistingAttemptNew() {
return new FetchMetadata(sessionId, INITIAL_EPOCH);
}
/**
* Return the metadata for the next full fetch request.
*/
public static FetchMetadata newIncremental(int sessionId) {
return new FetchMetadata(sessionId, nextEpoch(INITIAL_EPOCH));
}
/**
* Return the metadata for the next incremental response.
*/
public FetchMetadata nextIncremental() {
return new FetchMetadata(sessionId, nextEpoch(epoch));
}
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
if (sessionId == INVALID_SESSION_ID) {
bld.append("(sessionId=INVALID, ");
} else {
bld.append("(sessionId=").append(sessionId).append(", ");
}
if (epoch == INITIAL_EPOCH) {
bld.append("epoch=INITIAL)");
} else if (epoch == FINAL_EPOCH) {
bld.append("epoch=FINAL)");
} else {
bld.append("epoch=").append(epoch).append(")");
}
return bld.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/FetchRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.TopicIdPartition;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.message.FetchRequestData;
import org.apache.kafka.common.message.FetchRequestData.ForgottenTopic;
import org.apache.kafka.common.message.FetchRequestData.ReplicaState;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.utils.Utils;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
public class FetchRequest extends AbstractRequest {
public static final int CONSUMER_REPLICA_ID = -1;
// default values for older versions where a request level limit did not exist
public static final int DEFAULT_RESPONSE_MAX_BYTES = Integer.MAX_VALUE;
public static final long INVALID_LOG_START_OFFSET = -1L;
public static final int ORDINARY_CONSUMER_ID = -1;
public static final int DEBUGGING_CONSUMER_ID = -2;
public static final int FUTURE_LOCAL_REPLICA_ID = -3;
private final FetchRequestData data;
private volatile LinkedHashMap<TopicIdPartition, PartitionData> fetchData = null;
private volatile List<TopicIdPartition> toForget = null;
// This is an immutable read-only structures derived from FetchRequestData
private final FetchMetadata metadata;
public static final class PartitionData {
public final Uuid topicId;
public final long fetchOffset;
public final long logStartOffset;
public final int maxBytes;
public final Optional<Integer> currentLeaderEpoch;
public final Optional<Integer> lastFetchedEpoch;
public PartitionData(
Uuid topicId,
long fetchOffset,
long logStartOffset,
int maxBytes,
Optional<Integer> currentLeaderEpoch
) {
this(topicId, fetchOffset, logStartOffset, maxBytes, currentLeaderEpoch, Optional.empty());
}
public PartitionData(
Uuid topicId,
long fetchOffset,
long logStartOffset,
int maxBytes,
Optional<Integer> currentLeaderEpoch,
Optional<Integer> lastFetchedEpoch
) {
this.topicId = topicId;
this.fetchOffset = fetchOffset;
this.logStartOffset = logStartOffset;
this.maxBytes = maxBytes;
this.currentLeaderEpoch = currentLeaderEpoch;
this.lastFetchedEpoch = lastFetchedEpoch;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PartitionData that = (PartitionData) o;
return Objects.equals(topicId, that.topicId) &&
fetchOffset == that.fetchOffset &&
logStartOffset == that.logStartOffset &&
maxBytes == that.maxBytes &&
Objects.equals(currentLeaderEpoch, that.currentLeaderEpoch) &&
Objects.equals(lastFetchedEpoch, that.lastFetchedEpoch);
}
@Override
public int hashCode() {
return Objects.hash(topicId, fetchOffset, logStartOffset, maxBytes, currentLeaderEpoch, lastFetchedEpoch);
}
@Override
public String toString() {
return "PartitionData(" +
"topicId=" + topicId +
", fetchOffset=" + fetchOffset +
", logStartOffset=" + logStartOffset +
", maxBytes=" + maxBytes +
", currentLeaderEpoch=" + currentLeaderEpoch +
", lastFetchedEpoch=" + lastFetchedEpoch +
')';
}
}
private static Optional<Integer> optionalEpoch(int rawEpochValue) {
if (rawEpochValue < 0) {
return Optional.empty();
} else {
return Optional.of(rawEpochValue);
}
}
// It is only used by KafkaRaftClient for downgrading the FetchRequest.
public static class SimpleBuilder extends AbstractRequest.Builder<FetchRequest> {
private final FetchRequestData fetchRequestData;
public SimpleBuilder(FetchRequestData fetchRequestData) {
super(ApiKeys.FETCH);
this.fetchRequestData = fetchRequestData;
}
@Override
public FetchRequest build(short version) {
if (fetchRequestData.replicaId() >= 0) {
throw new IllegalStateException("The replica id should be placed in the replicaState of a fetchRequestData");
}
if (version < 15) {
fetchRequestData.setReplicaId(fetchRequestData.replicaState().replicaId());
fetchRequestData.setReplicaState(new ReplicaState());
}
return new FetchRequest(fetchRequestData, version);
}
}
public static class Builder extends AbstractRequest.Builder<FetchRequest> {
private final int maxWait;
private final int minBytes;
private final int replicaId;
private final long replicaEpoch;
private final Map<TopicPartition, PartitionData> toFetch;
private IsolationLevel isolationLevel = IsolationLevel.READ_UNCOMMITTED;
private int maxBytes = DEFAULT_RESPONSE_MAX_BYTES;
private FetchMetadata metadata = FetchMetadata.LEGACY;
private List<TopicIdPartition> removed = Collections.emptyList();
private List<TopicIdPartition> replaced = Collections.emptyList();
private String rackId = "";
public static Builder forConsumer(short maxVersion, int maxWait, int minBytes, Map<TopicPartition, PartitionData> fetchData) {
return new Builder(ApiKeys.FETCH.oldestVersion(), maxVersion,
CONSUMER_REPLICA_ID, -1, maxWait, minBytes, fetchData);
}
public static Builder forReplica(short allowedVersion, int replicaId, long replicaEpoch, int maxWait, int minBytes,
Map<TopicPartition, PartitionData> fetchData) {
return new Builder(allowedVersion, allowedVersion, replicaId, replicaEpoch, maxWait, minBytes, fetchData);
}
public Builder(short minVersion, short maxVersion, int replicaId, long replicaEpoch, int maxWait, int minBytes,
Map<TopicPartition, PartitionData> fetchData) {
super(ApiKeys.FETCH, minVersion, maxVersion);
this.replicaId = replicaId;
this.replicaEpoch = replicaEpoch;
this.maxWait = maxWait;
this.minBytes = minBytes;
this.toFetch = fetchData;
}
public Builder isolationLevel(IsolationLevel isolationLevel) {
this.isolationLevel = isolationLevel;
return this;
}
// Visible for testing
public FetchMetadata metadata() {
return this.metadata;
}
public Builder metadata(FetchMetadata metadata) {
this.metadata = metadata;
return this;
}
public Builder rackId(String rackId) {
this.rackId = rackId;
return this;
}
public Map<TopicPartition, PartitionData> fetchData() {
return this.toFetch;
}
public Builder setMaxBytes(int maxBytes) {
this.maxBytes = maxBytes;
return this;
}
public List<TopicIdPartition> removed() {
return removed;
}
public Builder removed(List<TopicIdPartition> removed) {
this.removed = removed;
return this;
}
public List<TopicIdPartition> replaced() {
return replaced;
}
public Builder replaced(List<TopicIdPartition> replaced) {
this.replaced = replaced;
return this;
}
private void addToForgottenTopicMap(List<TopicIdPartition> toForget, Map<String, FetchRequestData.ForgottenTopic> forgottenTopicMap) {
toForget.forEach(topicIdPartition -> {
FetchRequestData.ForgottenTopic forgottenTopic = forgottenTopicMap.get(topicIdPartition.topic());
if (forgottenTopic == null) {
forgottenTopic = new ForgottenTopic()
.setTopic(topicIdPartition.topic())
.setTopicId(topicIdPartition.topicId());
forgottenTopicMap.put(topicIdPartition.topic(), forgottenTopic);
}
forgottenTopic.partitions().add(topicIdPartition.partition());
});
}
@Override
public FetchRequest build(short version) {
if (version < 3) {
maxBytes = DEFAULT_RESPONSE_MAX_BYTES;
}
FetchRequestData fetchRequestData = new FetchRequestData();
fetchRequestData.setMaxWaitMs(maxWait);
fetchRequestData.setMinBytes(minBytes);
fetchRequestData.setMaxBytes(maxBytes);
fetchRequestData.setIsolationLevel(isolationLevel.id());
fetchRequestData.setForgottenTopicsData(new ArrayList<>());
if (version < 15) {
fetchRequestData.setReplicaId(replicaId);
} else {
fetchRequestData.setReplicaState(new ReplicaState()
.setReplicaId(replicaId)
.setReplicaEpoch(replicaEpoch));
}
Map<String, FetchRequestData.ForgottenTopic> forgottenTopicMap = new LinkedHashMap<>();
addToForgottenTopicMap(removed, forgottenTopicMap);
// If a version older than v13 is used, topic-partition which were replaced
// by a topic-partition with the same name but a different topic ID are not
// sent out in the "forget" set in order to not remove the newly added
// partition in the "fetch" set.
if (version >= 13) {
addToForgottenTopicMap(replaced, forgottenTopicMap);
}
forgottenTopicMap.forEach((topic, forgottenTopic) -> fetchRequestData.forgottenTopicsData().add(forgottenTopic));
// We collect the partitions in a single FetchTopic only if they appear sequentially in the fetchData
fetchRequestData.setTopics(new ArrayList<>());
FetchRequestData.FetchTopic fetchTopic = null;
for (Map.Entry<TopicPartition, PartitionData> entry : toFetch.entrySet()) {
TopicPartition topicPartition = entry.getKey();
PartitionData partitionData = entry.getValue();
if (fetchTopic == null || !topicPartition.topic().equals(fetchTopic.topic())) {
fetchTopic = new FetchRequestData.FetchTopic()
.setTopic(topicPartition.topic())
.setTopicId(partitionData.topicId)
.setPartitions(new ArrayList<>());
fetchRequestData.topics().add(fetchTopic);
}
FetchRequestData.FetchPartition fetchPartition = new FetchRequestData.FetchPartition()
.setPartition(topicPartition.partition())
.setCurrentLeaderEpoch(partitionData.currentLeaderEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH))
.setLastFetchedEpoch(partitionData.lastFetchedEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH))
.setFetchOffset(partitionData.fetchOffset)
.setLogStartOffset(partitionData.logStartOffset)
.setPartitionMaxBytes(partitionData.maxBytes);
fetchTopic.partitions().add(fetchPartition);
}
if (metadata != null) {
fetchRequestData.setSessionEpoch(metadata.epoch());
fetchRequestData.setSessionId(metadata.sessionId());
}
fetchRequestData.setRackId(rackId);
return new FetchRequest(fetchRequestData, version);
}
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("(type=FetchRequest").
append(", replicaId=").append(replicaId).
append(", maxWait=").append(maxWait).
append(", minBytes=").append(minBytes).
append(", maxBytes=").append(maxBytes).
append(", fetchData=").append(toFetch).
append(", isolationLevel=").append(isolationLevel).
append(", removed=").append(Utils.join(removed, ", ")).
append(", replaced=").append(Utils.join(replaced, ", ")).
append(", metadata=").append(metadata).
append(", rackId=").append(rackId).
append(")");
return bld.toString();
}
}
public static int replicaId(FetchRequestData fetchRequestData) {
return fetchRequestData.replicaId() != -1 ? fetchRequestData.replicaId() : fetchRequestData.replicaState().replicaId();
}
public FetchRequest(FetchRequestData fetchRequestData, short version) {
super(ApiKeys.FETCH, version);
this.data = fetchRequestData;
this.metadata = new FetchMetadata(fetchRequestData.sessionId(), fetchRequestData.sessionEpoch());
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
// For versions 13+ the error is indicated by setting the top-level error code, and no partitions will be returned.
// For earlier versions, the error is indicated in two ways: by setting the same error code in all partitions,
// and by setting the top-level error code. The form where we set the same error code in all partitions
// is needed in order to maintain backwards compatibility with older versions of the protocol
// in which there was no top-level error code. Note that for incremental fetch responses, there
// may not be any partitions at all in the response. For this reason, the top-level error code
// is essential for them.
Errors error = Errors.forException(e);
List<FetchResponseData.FetchableTopicResponse> topicResponseList = new ArrayList<>();
// For version 13+, we know the client can handle a top level error code, so we don't need to send back partitions too.
if (version() < 13) {
data.topics().forEach(topic -> {
List<FetchResponseData.PartitionData> partitionResponses = topic.partitions().stream().map(partition ->
FetchResponse.partitionResponse(partition.partition(), error)).collect(Collectors.toList());
topicResponseList.add(new FetchResponseData.FetchableTopicResponse()
.setTopic(topic.topic())
.setTopicId(topic.topicId())
.setPartitions(partitionResponses));
});
}
return new FetchResponse(new FetchResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code())
.setSessionId(data.sessionId())
.setResponses(topicResponseList));
}
public int replicaId() {
if (version() < 15) {
return data.replicaId();
}
return data.replicaState().replicaId();
}
public long replicaEpoch() {
return data.replicaState().replicaEpoch();
}
public int maxWait() {
return data.maxWaitMs();
}
public int minBytes() {
return data.minBytes();
}
public int maxBytes() {
return data.maxBytes();
}
// For versions < 13, builds the partitionData map using only the FetchRequestData.
// For versions 13+, builds the partitionData map using both the FetchRequestData and a mapping of topic IDs to names.
public Map<TopicIdPartition, PartitionData> fetchData(Map<Uuid, String> topicNames) {
if (fetchData == null) {
synchronized (this) {
if (fetchData == null) {
// Assigning the lazy-initialized `fetchData` in the last step
// to avoid other threads accessing a half-initialized object.
final LinkedHashMap<TopicIdPartition, PartitionData> fetchDataTmp = new LinkedHashMap<>();
final short version = version();
data.topics().forEach(fetchTopic -> {
String name;
if (version < 13) {
name = fetchTopic.topic(); // can't be null
} else {
name = topicNames.get(fetchTopic.topicId());
}
fetchTopic.partitions().forEach(fetchPartition ->
// Topic name may be null here if the topic name was unable to be resolved using the topicNames map.
fetchDataTmp.put(new TopicIdPartition(fetchTopic.topicId(), new TopicPartition(name, fetchPartition.partition())),
new PartitionData(
fetchTopic.topicId(),
fetchPartition.fetchOffset(),
fetchPartition.logStartOffset(),
fetchPartition.partitionMaxBytes(),
optionalEpoch(fetchPartition.currentLeaderEpoch()),
optionalEpoch(fetchPartition.lastFetchedEpoch())
)
)
);
});
fetchData = fetchDataTmp;
}
}
}
return fetchData;
}
// For versions < 13, builds the forgotten topics list using only the FetchRequestData.
// For versions 13+, builds the forgotten topics list using both the FetchRequestData and a mapping of topic IDs to names.
public List<TopicIdPartition> forgottenTopics(Map<Uuid, String> topicNames) {
if (toForget == null) {
synchronized (this) {
if (toForget == null) {
// Assigning the lazy-initialized `toForget` in the last step
// to avoid other threads accessing a half-initialized object.
final List<TopicIdPartition> toForgetTmp = new ArrayList<>();
data.forgottenTopicsData().forEach(forgottenTopic -> {
String name;
if (version() < 13) {
name = forgottenTopic.topic(); // can't be null
} else {
name = topicNames.get(forgottenTopic.topicId());
}
// Topic name may be null here if the topic name was unable to be resolved using the topicNames map.
forgottenTopic.partitions().forEach(partitionId -> toForgetTmp.add(new TopicIdPartition(forgottenTopic.topicId(), new TopicPartition(name, partitionId))));
});
toForget = toForgetTmp;
}
}
}
return toForget;
}
public boolean isFromFollower() {
return replicaId() >= 0;
}
public IsolationLevel isolationLevel() {
return IsolationLevel.forId(data.isolationLevel());
}
public FetchMetadata metadata() {
return metadata;
}
public String rackId() {
return data.rackId();
}
public static FetchRequest parse(ByteBuffer buffer, short version) {
return new FetchRequest(new FetchRequestData(new ByteBufferAccessor(buffer), version), version);
}
// Broker ids are non-negative int.
public static boolean isValidBrokerId(int brokerId) {
return brokerId >= 0;
}
public static boolean isConsumer(int replicaId) {
return replicaId < 0 && replicaId != FUTURE_LOCAL_REPLICA_ID;
}
public static String describeReplicaId(int replicaId) {
switch (replicaId) {
case ORDINARY_CONSUMER_ID: return "consumer";
case DEBUGGING_CONSUMER_ID: return "debug consumer";
case FUTURE_LOCAL_REPLICA_ID: return "future local replica";
default: {
if (isValidBrokerId(replicaId))
return "replica [" + replicaId + "]";
else
return "invalid replica [" + replicaId + "]";
}
}
}
@Override
public FetchRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/FetchResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicIdPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.message.FetchResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.Records;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.stream.Collectors;
import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID;
/**
* This wrapper supports all versions of the Fetch API
*
* Possible error codes:
*
* - {@link Errors#OFFSET_OUT_OF_RANGE} If the fetch offset is out of range for a requested partition
* - {@link Errors#TOPIC_AUTHORIZATION_FAILED} If the user does not have READ access to a requested topic
* - {@link Errors#REPLICA_NOT_AVAILABLE} If the request is received by a broker with version < 2.6 which is not a replica
* - {@link Errors#NOT_LEADER_OR_FOLLOWER} If the broker is not a leader or follower and either the provided leader epoch
* matches the known leader epoch on the broker or is empty
* - {@link Errors#FENCED_LEADER_EPOCH} If the epoch is lower than the broker's epoch
* - {@link Errors#UNKNOWN_LEADER_EPOCH} If the epoch is larger than the broker's epoch
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} If the broker does not have metadata for a topic or partition
* - {@link Errors#KAFKA_STORAGE_ERROR} If the log directory for one of the requested partitions is offline
* - {@link Errors#UNSUPPORTED_COMPRESSION_TYPE} If a fetched topic is using a compression type which is
* not supported by the fetch request version
* - {@link Errors#CORRUPT_MESSAGE} If corrupt message encountered, e.g. when the broker scans the log to find
* the fetch offset after the index lookup
* - {@link Errors#UNKNOWN_TOPIC_ID} If the request contains a topic ID unknown to the broker
* - {@link Errors#FETCH_SESSION_TOPIC_ID_ERROR} If the request version supports topic IDs but the session does not or vice versa,
* or a topic ID in the request is inconsistent with a topic ID in the session
* - {@link Errors#INCONSISTENT_TOPIC_ID} If a topic ID in the session does not match the topic ID in the log
* - {@link Errors#UNKNOWN_SERVER_ERROR} For any unexpected errors
*/
public class FetchResponse extends AbstractResponse {
public static final long INVALID_HIGH_WATERMARK = -1L;
public static final long INVALID_LAST_STABLE_OFFSET = -1L;
public static final long INVALID_LOG_START_OFFSET = -1L;
public static final int INVALID_PREFERRED_REPLICA_ID = -1;
private final FetchResponseData data;
// we build responseData when needed.
private volatile LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData = null;
@Override
public FetchResponseData data() {
return data;
}
/**
* From version 3 or later, the authorized and existing entries in `FetchRequest.fetchData` should be in the same order in `responseData`.
* Version 13 introduces topic IDs which can lead to a few new errors. If there is any unknown topic ID in the request, the
* response will contain a partition-level UNKNOWN_TOPIC_ID error for that partition.
* If a request's topic ID usage is inconsistent with the session, we will return a top level FETCH_SESSION_TOPIC_ID_ERROR error.
* We may also return INCONSISTENT_TOPIC_ID error as a partition-level error when a partition in the session has a topic ID
* inconsistent with the log.
*/
public FetchResponse(FetchResponseData fetchResponseData) {
super(ApiKeys.FETCH);
this.data = fetchResponseData;
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
public LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseData(Map<Uuid, String> topicNames, short version) {
if (responseData == null) {
synchronized (this) {
if (responseData == null) {
// Assigning the lazy-initialized `responseData` in the last step
// to avoid other threads accessing a half-initialized object.
final LinkedHashMap<TopicPartition, FetchResponseData.PartitionData> responseDataTmp =
new LinkedHashMap<>();
data.responses().forEach(topicResponse -> {
String name;
if (version < 13) {
name = topicResponse.topic();
} else {
name = topicNames.get(topicResponse.topicId());
}
if (name != null) {
topicResponse.partitions().forEach(partition ->
responseDataTmp.put(new TopicPartition(name, partition.partitionIndex()), partition));
}
});
responseData = responseDataTmp;
}
}
}
return responseData;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public int sessionId() {
return data.sessionId();
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
updateErrorCounts(errorCounts, error());
data.responses().forEach(topicResponse ->
topicResponse.partitions().forEach(partition ->
updateErrorCounts(errorCounts, Errors.forCode(partition.errorCode())))
);
return errorCounts;
}
public static FetchResponse parse(ByteBuffer buffer, short version) {
return new FetchResponse(new FetchResponseData(new ByteBufferAccessor(buffer), version));
}
// Fetch versions 13 and above should have topic IDs for all topics.
// Fetch versions < 13 should return the empty set.
public Set<Uuid> topicIds() {
return data.responses().stream().map(FetchResponseData.FetchableTopicResponse::topicId).filter(id -> !id.equals(Uuid.ZERO_UUID)).collect(Collectors.toSet());
}
/**
* Convenience method to find the size of a response.
*
* @param version The version of the response to use.
* @param partIterator The partition iterator.
* @return The response size in bytes.
*/
public static int sizeOf(short version,
Iterator<Map.Entry<TopicIdPartition,
FetchResponseData.PartitionData>> partIterator) {
// Since the throttleTimeMs and metadata field sizes are constant and fixed, we can
// use arbitrary values here without affecting the result.
FetchResponseData data = toMessage(Errors.NONE, 0, INVALID_SESSION_ID, partIterator);
ObjectSerializationCache cache = new ObjectSerializationCache();
return 4 + data.size(cache, version);
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 8;
}
public static Optional<FetchResponseData.EpochEndOffset> divergingEpoch(FetchResponseData.PartitionData partitionResponse) {
return partitionResponse.divergingEpoch().epoch() < 0 ? Optional.empty()
: Optional.of(partitionResponse.divergingEpoch());
}
public static boolean isDivergingEpoch(FetchResponseData.PartitionData partitionResponse) {
return partitionResponse.divergingEpoch().epoch() >= 0;
}
public static Optional<Integer> preferredReadReplica(FetchResponseData.PartitionData partitionResponse) {
return partitionResponse.preferredReadReplica() == INVALID_PREFERRED_REPLICA_ID ? Optional.empty()
: Optional.of(partitionResponse.preferredReadReplica());
}
public static boolean isPreferredReplica(FetchResponseData.PartitionData partitionResponse) {
return partitionResponse.preferredReadReplica() != INVALID_PREFERRED_REPLICA_ID;
}
public static FetchResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) {
return partitionResponse(topicIdPartition.topicPartition().partition(), error);
}
public static FetchResponseData.PartitionData partitionResponse(int partition, Errors error) {
return new FetchResponseData.PartitionData()
.setPartitionIndex(partition)
.setErrorCode(error.code())
.setHighWatermark(FetchResponse.INVALID_HIGH_WATERMARK);
}
/**
* Returns `partition.records` as `Records` (instead of `BaseRecords`). If `records` is `null`, returns `MemoryRecords.EMPTY`.
*
* If this response was deserialized after a fetch, this method should never fail. An example where this would
* fail is a down-converted response (e.g. LazyDownConversionRecords) on the broker (before it's serialized and
* sent on the wire).
*
* @param partition partition data
* @return Records or empty record if the records in PartitionData is null.
*/
public static Records recordsOrFail(FetchResponseData.PartitionData partition) {
if (partition.records() == null) return MemoryRecords.EMPTY;
if (partition.records() instanceof Records) return (Records) partition.records();
throw new ClassCastException("The record type is " + partition.records().getClass().getSimpleName() + ", which is not a subtype of " +
Records.class.getSimpleName() + ". This method is only safe to call if the `FetchResponse` was deserialized from bytes.");
}
/**
* @return The size in bytes of the records. 0 is returned if records of input partition is null.
*/
public static int recordsSize(FetchResponseData.PartitionData partition) {
return partition.records() == null ? 0 : partition.records().sizeInBytes();
}
// TODO: remove as a part of KAFKA-12410
public static FetchResponse of(Errors error,
int throttleTimeMs,
int sessionId,
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData) {
return new FetchResponse(toMessage(error, throttleTimeMs, sessionId, responseData.entrySet().iterator()));
}
private static boolean matchingTopic(FetchResponseData.FetchableTopicResponse previousTopic, TopicIdPartition currentTopic) {
if (previousTopic == null)
return false;
if (!previousTopic.topicId().equals(Uuid.ZERO_UUID))
return previousTopic.topicId().equals(currentTopic.topicId());
else
return previousTopic.topic().equals(currentTopic.topicPartition().topic());
}
private static FetchResponseData toMessage(Errors error,
int throttleTimeMs,
int sessionId,
Iterator<Map.Entry<TopicIdPartition, FetchResponseData.PartitionData>> partIterator) {
List<FetchResponseData.FetchableTopicResponse> topicResponseList = new ArrayList<>();
while (partIterator.hasNext()) {
Map.Entry<TopicIdPartition, FetchResponseData.PartitionData> entry = partIterator.next();
FetchResponseData.PartitionData partitionData = entry.getValue();
// Since PartitionData alone doesn't know the partition ID, we set it here
partitionData.setPartitionIndex(entry.getKey().topicPartition().partition());
// We have to keep the order of input topic-partition. Hence, we batch the partitions only if the last
// batch is in the same topic group.
FetchResponseData.FetchableTopicResponse previousTopic = topicResponseList.isEmpty() ? null
: topicResponseList.get(topicResponseList.size() - 1);
if (matchingTopic(previousTopic, entry.getKey()))
previousTopic.partitions().add(partitionData);
else {
List<FetchResponseData.PartitionData> partitionResponses = new ArrayList<>();
partitionResponses.add(partitionData);
topicResponseList.add(new FetchResponseData.FetchableTopicResponse()
.setTopic(entry.getKey().topicPartition().topic())
.setTopicId(entry.getKey().topicId())
.setPartitions(partitionResponses));
}
}
return new FetchResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code())
.setSessionId(sessionId)
.setResponses(topicResponseList);
}
} |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/FetchSnapshotRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.FetchSnapshotRequestData;
import org.apache.kafka.common.message.FetchSnapshotResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.Optional;
import java.util.function.UnaryOperator;
final public class FetchSnapshotRequest extends AbstractRequest {
private final FetchSnapshotRequestData data;
public FetchSnapshotRequest(FetchSnapshotRequestData data, short version) {
super(ApiKeys.FETCH_SNAPSHOT, version);
this.data = data;
}
@Override
public FetchSnapshotResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new FetchSnapshotResponse(
new FetchSnapshotResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(Errors.forException(e).code())
);
}
@Override
public FetchSnapshotRequestData data() {
return data;
}
/**
* Creates a FetchSnapshotRequestData with a single PartitionSnapshot for the topic partition.
*
* The partition index will already be populated when calling operator.
*
* @param topicPartition the topic partition to include
* @param operator unary operator responsible for populating all the appropriate fields
* @return the created fetch snapshot request data
*/
public static FetchSnapshotRequestData singleton(
String clusterId,
TopicPartition topicPartition,
UnaryOperator<FetchSnapshotRequestData.PartitionSnapshot> operator
) {
FetchSnapshotRequestData.PartitionSnapshot partitionSnapshot = operator.apply(
new FetchSnapshotRequestData.PartitionSnapshot().setPartition(topicPartition.partition())
);
return new FetchSnapshotRequestData()
.setClusterId(clusterId)
.setTopics(
Collections.singletonList(
new FetchSnapshotRequestData.TopicSnapshot()
.setName(topicPartition.topic())
.setPartitions(Collections.singletonList(partitionSnapshot))
)
);
}
/**
* Finds the PartitionSnapshot for a given topic partition.
*
* @param data the fetch snapshot request data
* @param topicPartition the topic partition to find
* @return the request partition snapshot if found, otherwise an empty Optional
*/
public static Optional<FetchSnapshotRequestData.PartitionSnapshot> forTopicPartition(
FetchSnapshotRequestData data,
TopicPartition topicPartition
) {
return data
.topics()
.stream()
.filter(topic -> topic.name().equals(topicPartition.topic()))
.flatMap(topic -> topic.partitions().stream())
.filter(partition -> partition.partition() == topicPartition.partition())
.findAny();
}
public static FetchSnapshotRequest parse(ByteBuffer buffer, short version) {
return new FetchSnapshotRequest(new FetchSnapshotRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static class Builder extends AbstractRequest.Builder<FetchSnapshotRequest> {
private final FetchSnapshotRequestData data;
public Builder(FetchSnapshotRequestData data) {
super(ApiKeys.FETCH_SNAPSHOT);
this.data = data;
}
@Override
public FetchSnapshotRequest build(short version) {
return new FetchSnapshotRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/FetchSnapshotResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.FetchSnapshotResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.function.UnaryOperator;
final public class FetchSnapshotResponse extends AbstractResponse {
private final FetchSnapshotResponseData data;
public FetchSnapshotResponse(FetchSnapshotResponseData data) {
super(ApiKeys.FETCH_SNAPSHOT);
this.data = data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errors = new HashMap<>();
Errors topLevelError = Errors.forCode(data.errorCode());
if (topLevelError != Errors.NONE) {
errors.put(topLevelError, 1);
}
for (FetchSnapshotResponseData.TopicSnapshot topicResponse : data.topics()) {
for (FetchSnapshotResponseData.PartitionSnapshot partitionResponse : topicResponse.partitions()) {
errors.compute(Errors.forCode(partitionResponse.errorCode()),
(error, count) -> count == null ? 1 : count + 1);
}
}
return errors;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public FetchSnapshotResponseData data() {
return data;
}
/**
* Creates a FetchSnapshotResponseData with a top level error.
*
* @param error the top level error
* @return the created fetch snapshot response data
*/
public static FetchSnapshotResponseData withTopLevelError(Errors error) {
return new FetchSnapshotResponseData().setErrorCode(error.code());
}
/**
* Creates a FetchSnapshotResponseData with a single PartitionSnapshot for the topic partition.
*
* The partition index will already by populated when calling operator.
*
* @param topicPartition the topic partition to include
* @param operator unary operator responsible for populating all of the appropriate fields
* @return the created fetch snapshot response data
*/
public static FetchSnapshotResponseData singleton(
TopicPartition topicPartition,
UnaryOperator<FetchSnapshotResponseData.PartitionSnapshot> operator
) {
FetchSnapshotResponseData.PartitionSnapshot partitionSnapshot = operator.apply(
new FetchSnapshotResponseData.PartitionSnapshot().setIndex(topicPartition.partition())
);
return new FetchSnapshotResponseData()
.setTopics(
Collections.singletonList(
new FetchSnapshotResponseData.TopicSnapshot()
.setName(topicPartition.topic())
.setPartitions(Collections.singletonList(partitionSnapshot))
)
);
}
/**
* Finds the PartitionSnapshot for a given topic partition.
*
* @param data the fetch snapshot response data
* @param topicPartition the topic partition to find
* @return the response partition snapshot if found, otherwise an empty Optional
*/
public static Optional<FetchSnapshotResponseData.PartitionSnapshot> forTopicPartition(
FetchSnapshotResponseData data,
TopicPartition topicPartition
) {
return data
.topics()
.stream()
.filter(topic -> topic.name().equals(topicPartition.topic()))
.flatMap(topic -> topic.partitions().stream())
.filter(parition -> parition.index() == topicPartition.partition())
.findAny();
}
public static FetchSnapshotResponse parse(ByteBuffer buffer, short version) {
return new FetchSnapshotResponse(new FetchSnapshotResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/FindCoordinatorRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.FindCoordinatorRequestData;
import org.apache.kafka.common.message.FindCoordinatorResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
public class FindCoordinatorRequest extends AbstractRequest {
public static final short MIN_BATCHED_VERSION = 4;
public static class Builder extends AbstractRequest.Builder<FindCoordinatorRequest> {
private final FindCoordinatorRequestData data;
public Builder(FindCoordinatorRequestData data) {
super(ApiKeys.FIND_COORDINATOR);
this.data = data;
}
@Override
public FindCoordinatorRequest build(short version) {
if (version < 1 && data.keyType() == CoordinatorType.TRANSACTION.id()) {
throw new UnsupportedVersionException("Cannot create a v" + version + " FindCoordinator request " +
"because we require features supported only in 2 or later.");
}
int batchedKeys = data.coordinatorKeys().size();
if (version < MIN_BATCHED_VERSION) {
if (batchedKeys > 1)
throw new NoBatchedFindCoordinatorsException("Cannot create a v" + version + " FindCoordinator request " +
"because we require features supported only in " + MIN_BATCHED_VERSION + " or later.");
if (batchedKeys == 1) {
data.setKey(data.coordinatorKeys().get(0));
data.setCoordinatorKeys(Collections.emptyList());
}
} else if (batchedKeys == 0 && data.key() != null) {
data.setCoordinatorKeys(Collections.singletonList(data.key()));
data.setKey(""); // default value
}
return new FindCoordinatorRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
public FindCoordinatorRequestData data() {
return data;
}
}
/**
* Indicates that it is not possible to lookup coordinators in batches with FindCoordinator. Instead
* coordinators must be looked up one by one.
*/
public static class NoBatchedFindCoordinatorsException extends UnsupportedVersionException {
private static final long serialVersionUID = 1L;
public NoBatchedFindCoordinatorsException(String message, Throwable cause) {
super(message, cause);
}
public NoBatchedFindCoordinatorsException(String message) {
super(message);
}
}
private final FindCoordinatorRequestData data;
private FindCoordinatorRequest(FindCoordinatorRequestData data, short version) {
super(ApiKeys.FIND_COORDINATOR, version);
this.data = data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
FindCoordinatorResponseData response = new FindCoordinatorResponseData();
if (version() >= 2) {
response.setThrottleTimeMs(throttleTimeMs);
}
Errors error = Errors.forException(e);
if (version() < MIN_BATCHED_VERSION) {
return FindCoordinatorResponse.prepareOldResponse(error, Node.noNode());
} else {
return FindCoordinatorResponse.prepareErrorResponse(error, data.coordinatorKeys());
}
}
public static FindCoordinatorRequest parse(ByteBuffer buffer, short version) {
return new FindCoordinatorRequest(new FindCoordinatorRequestData(new ByteBufferAccessor(buffer), version),
version);
}
@Override
public FindCoordinatorRequestData data() {
return data;
}
public enum CoordinatorType {
GROUP((byte) 0), TRANSACTION((byte) 1);
final byte id;
CoordinatorType(byte id) {
this.id = id;
}
public byte id() {
return id;
}
public static CoordinatorType forId(byte id) {
switch (id) {
case 0:
return GROUP;
case 1:
return TRANSACTION;
default:
throw new InvalidRequestException("Unknown coordinator type received: " + id);
}
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/FindCoordinatorResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.message.FindCoordinatorResponseData;
import org.apache.kafka.common.message.FindCoordinatorResponseData.Coordinator;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
public class FindCoordinatorResponse extends AbstractResponse {
/**
* Possible error codes:
*
* COORDINATOR_LOAD_IN_PROGRESS (14)
* COORDINATOR_NOT_AVAILABLE (15)
* GROUP_AUTHORIZATION_FAILED (30)
* INVALID_REQUEST (42)
* TRANSACTIONAL_ID_AUTHORIZATION_FAILED (53)
*/
private final FindCoordinatorResponseData data;
public FindCoordinatorResponse(FindCoordinatorResponseData data) {
super(ApiKeys.FIND_COORDINATOR);
this.data = data;
}
public Optional<Coordinator> coordinatorByKey(String key) {
Objects.requireNonNull(key);
if (this.data.coordinators().isEmpty()) {
// version <= 3
return Optional.of(new Coordinator()
.setErrorCode(data.errorCode())
.setErrorMessage(data.errorMessage())
.setHost(data.host())
.setPort(data.port())
.setNodeId(data.nodeId())
.setKey(key));
}
// version >= 4
return data.coordinators().stream().filter(c -> c.key().equals(key)).findFirst();
}
@Override
public FindCoordinatorResponseData data() {
return data;
}
public Node node() {
return new Node(data.nodeId(), data.host(), data.port());
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public boolean hasError() {
return error() != Errors.NONE;
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
if (!data.coordinators().isEmpty()) {
Map<Errors, Integer> errorCounts = new HashMap<>();
for (Coordinator coordinator : data.coordinators()) {
updateErrorCounts(errorCounts, Errors.forCode(coordinator.errorCode()));
}
return errorCounts;
} else {
return errorCounts(error());
}
}
public static FindCoordinatorResponse parse(ByteBuffer buffer, short version) {
return new FindCoordinatorResponse(new FindCoordinatorResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
public List<FindCoordinatorResponseData.Coordinator> coordinators() {
if (!data.coordinators().isEmpty())
return data.coordinators();
else {
FindCoordinatorResponseData.Coordinator coordinator = new Coordinator()
.setErrorCode(data.errorCode())
.setErrorMessage(data.errorMessage())
.setKey(null)
.setNodeId(data.nodeId())
.setHost(data.host())
.setPort(data.port());
return Collections.singletonList(coordinator);
}
}
public static FindCoordinatorResponse prepareOldResponse(Errors error, Node node) {
FindCoordinatorResponseData data = new FindCoordinatorResponseData();
data.setErrorCode(error.code())
.setErrorMessage(error.message())
.setNodeId(node.id())
.setHost(node.host())
.setPort(node.port());
return new FindCoordinatorResponse(data);
}
public static FindCoordinatorResponse prepareResponse(Errors error, String key, Node node) {
FindCoordinatorResponseData data = new FindCoordinatorResponseData();
data.setCoordinators(Collections.singletonList(
new FindCoordinatorResponseData.Coordinator()
.setErrorCode(error.code())
.setErrorMessage(error.message())
.setKey(key)
.setHost(node.host())
.setPort(node.port())
.setNodeId(node.id())));
return new FindCoordinatorResponse(data);
}
public static FindCoordinatorResponse prepareErrorResponse(Errors error, List<String> keys) {
FindCoordinatorResponseData data = new FindCoordinatorResponseData();
List<FindCoordinatorResponseData.Coordinator> coordinators = new ArrayList<>(keys.size());
for (String key : keys) {
FindCoordinatorResponseData.Coordinator coordinator = new FindCoordinatorResponseData.Coordinator()
.setErrorCode(error.code())
.setErrorMessage(error.message())
.setKey(key)
.setHost(Node.noNode().host())
.setPort(Node.noNode().port())
.setNodeId(Node.noNode().id());
coordinators.add(coordinator);
}
data.setCoordinators(coordinators);
return new FindCoordinatorResponse(data);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/HeartbeatRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.HeartbeatRequestData;
import org.apache.kafka.common.message.HeartbeatResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class HeartbeatRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<HeartbeatRequest> {
private final HeartbeatRequestData data;
public Builder(HeartbeatRequestData data) {
super(ApiKeys.HEARTBEAT);
this.data = data;
}
@Override
public HeartbeatRequest build(short version) {
if (data.groupInstanceId() != null && version < 3) {
throw new UnsupportedVersionException("The broker heartbeat protocol version " +
version + " does not support usage of config group.instance.id.");
}
return new HeartbeatRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final HeartbeatRequestData data;
private HeartbeatRequest(HeartbeatRequestData data, short version) {
super(ApiKeys.HEARTBEAT, version);
this.data = data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
HeartbeatResponseData responseData = new HeartbeatResponseData().
setErrorCode(Errors.forException(e).code());
if (version() >= 1) {
responseData.setThrottleTimeMs(throttleTimeMs);
}
return new HeartbeatResponse(responseData);
}
public static HeartbeatRequest parse(ByteBuffer buffer, short version) {
return new HeartbeatRequest(new HeartbeatRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public HeartbeatRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/HeartbeatResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.HeartbeatResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
public class HeartbeatResponse extends AbstractResponse {
/**
* Possible error codes:
*
* GROUP_COORDINATOR_NOT_AVAILABLE (15)
* NOT_COORDINATOR (16)
* ILLEGAL_GENERATION (22)
* UNKNOWN_MEMBER_ID (25)
* REBALANCE_IN_PROGRESS (27)
* GROUP_AUTHORIZATION_FAILED (30)
*/
private final HeartbeatResponseData data;
public HeartbeatResponse(HeartbeatResponseData data) {
super(ApiKeys.HEARTBEAT);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
@Override
public HeartbeatResponseData data() {
return data;
}
public static HeartbeatResponse parse(ByteBuffer buffer, short version) {
return new HeartbeatResponse(new HeartbeatResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.clients.admin.AlterConfigOp;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData;
import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterConfigsResource;
import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData;
import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Map;
public class IncrementalAlterConfigsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<IncrementalAlterConfigsRequest> {
private final IncrementalAlterConfigsRequestData data;
public Builder(IncrementalAlterConfigsRequestData data) {
super(ApiKeys.INCREMENTAL_ALTER_CONFIGS);
this.data = data;
}
public Builder(final Collection<ConfigResource> resources,
final Map<ConfigResource, Collection<AlterConfigOp>> configs,
final boolean validateOnly) {
super(ApiKeys.INCREMENTAL_ALTER_CONFIGS);
this.data = new IncrementalAlterConfigsRequestData()
.setValidateOnly(validateOnly);
for (ConfigResource resource : resources) {
IncrementalAlterConfigsRequestData.AlterableConfigCollection alterableConfigSet =
new IncrementalAlterConfigsRequestData.AlterableConfigCollection();
for (AlterConfigOp configEntry : configs.get(resource))
alterableConfigSet.add(new IncrementalAlterConfigsRequestData.AlterableConfig()
.setName(configEntry.configEntry().name())
.setValue(configEntry.configEntry().value())
.setConfigOperation(configEntry.opType().id()));
IncrementalAlterConfigsRequestData.AlterConfigsResource alterConfigsResource = new IncrementalAlterConfigsRequestData.AlterConfigsResource();
alterConfigsResource.setResourceType(resource.type().id())
.setResourceName(resource.name()).setConfigs(alterableConfigSet);
data.resources().add(alterConfigsResource);
}
}
public Builder(final Map<ConfigResource, Collection<AlterConfigOp>> configs,
final boolean validateOnly) {
this(configs.keySet(), configs, validateOnly);
}
@Override
public IncrementalAlterConfigsRequest build(short version) {
return new IncrementalAlterConfigsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final IncrementalAlterConfigsRequestData data;
private final short version;
public IncrementalAlterConfigsRequest(IncrementalAlterConfigsRequestData data, short version) {
super(ApiKeys.INCREMENTAL_ALTER_CONFIGS, version);
this.data = data;
this.version = version;
}
public static IncrementalAlterConfigsRequest parse(ByteBuffer buffer, short version) {
return new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(
new ByteBufferAccessor(buffer), version), version);
}
@Override
public IncrementalAlterConfigsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(final int throttleTimeMs, final Throwable e) {
IncrementalAlterConfigsResponseData response = new IncrementalAlterConfigsResponseData();
ApiError apiError = ApiError.fromThrowable(e);
for (AlterConfigsResource resource : data.resources()) {
response.responses().add(new AlterConfigsResourceResponse()
.setResourceName(resource.resourceName())
.setResourceType(resource.resourceType())
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message()));
}
return new IncrementalAlterConfigsResponse(response);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/IncrementalAlterConfigsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData;
import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class IncrementalAlterConfigsResponse extends AbstractResponse {
public IncrementalAlterConfigsResponse(final int requestThrottleMs,
final Map<ConfigResource, ApiError> results) {
super(ApiKeys.INCREMENTAL_ALTER_CONFIGS);
final List<AlterConfigsResourceResponse> newResults = new ArrayList<>(results.size());
results.forEach(
(resource, error) -> newResults.add(
new AlterConfigsResourceResponse()
.setErrorCode(error.error().code())
.setErrorMessage(error.message())
.setResourceName(resource.name())
.setResourceType(resource.type().id()))
);
this.data = new IncrementalAlterConfigsResponseData()
.setResponses(newResults)
.setThrottleTimeMs(requestThrottleMs);
}
public static Map<ConfigResource, ApiError> fromResponseData(final IncrementalAlterConfigsResponseData data) {
Map<ConfigResource, ApiError> map = new HashMap<>();
for (AlterConfigsResourceResponse response : data.responses()) {
map.put(new ConfigResource(ConfigResource.Type.forId(response.resourceType()), response.resourceName()),
new ApiError(Errors.forCode(response.errorCode()), response.errorMessage()));
}
return map;
}
private final IncrementalAlterConfigsResponseData data;
public IncrementalAlterConfigsResponse(IncrementalAlterConfigsResponseData data) {
super(ApiKeys.INCREMENTAL_ALTER_CONFIGS);
this.data = data;
}
@Override
public IncrementalAlterConfigsResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
HashMap<Errors, Integer> counts = new HashMap<>();
data.responses().forEach(response ->
updateErrorCounts(counts, Errors.forCode(response.errorCode()))
);
return counts;
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 0;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public static IncrementalAlterConfigsResponse parse(ByteBuffer buffer, short version) {
return new IncrementalAlterConfigsResponse(new IncrementalAlterConfigsResponseData(
new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/InitProducerIdRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.InitProducerIdRequestData;
import org.apache.kafka.common.message.InitProducerIdResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordBatch;
import java.nio.ByteBuffer;
public class InitProducerIdRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<InitProducerIdRequest> {
public final InitProducerIdRequestData data;
public Builder(InitProducerIdRequestData data) {
super(ApiKeys.INIT_PRODUCER_ID);
this.data = data;
}
@Override
public InitProducerIdRequest build(short version) {
if (data.transactionTimeoutMs() <= 0)
throw new IllegalArgumentException("transaction timeout value is not positive: " + data.transactionTimeoutMs());
if (data.transactionalId() != null && data.transactionalId().isEmpty())
throw new IllegalArgumentException("Must set either a null or a non-empty transactional id.");
return new InitProducerIdRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final InitProducerIdRequestData data;
private InitProducerIdRequest(InitProducerIdRequestData data, short version) {
super(ApiKeys.INIT_PRODUCER_ID, version);
this.data = data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
InitProducerIdResponseData response = new InitProducerIdResponseData()
.setErrorCode(Errors.forException(e).code())
.setProducerId(RecordBatch.NO_PRODUCER_ID)
.setProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH)
.setThrottleTimeMs(0);
return new InitProducerIdResponse(response);
}
public static InitProducerIdRequest parse(ByteBuffer buffer, short version) {
return new InitProducerIdRequest(new InitProducerIdRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public InitProducerIdRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/InitProducerIdResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.InitProducerIdResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
/**
* Possible error codes:
*
* - {@link Errors#NOT_COORDINATOR}
* - {@link Errors#COORDINATOR_NOT_AVAILABLE}
* - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS}
* - {@link Errors#TRANSACTIONAL_ID_AUTHORIZATION_FAILED}
* - {@link Errors#CLUSTER_AUTHORIZATION_FAILED}
* - {@link Errors#INVALID_PRODUCER_EPOCH} // for version <=3
* - {@link Errors#PRODUCER_FENCED}
*/
public class InitProducerIdResponse extends AbstractResponse {
private final InitProducerIdResponseData data;
public InitProducerIdResponse(InitProducerIdResponseData data) {
super(ApiKeys.INIT_PRODUCER_ID);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
@Override
public InitProducerIdResponseData data() {
return data;
}
public static InitProducerIdResponse parse(ByteBuffer buffer, short version) {
return new InitProducerIdResponse(new InitProducerIdResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/JoinGroupRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.InvalidConfigurationException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.internals.Topic;
import org.apache.kafka.common.message.JoinGroupRequestData;
import org.apache.kafka.common.message.JoinGroupResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
public class JoinGroupRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<JoinGroupRequest> {
private final JoinGroupRequestData data;
public Builder(JoinGroupRequestData data) {
super(ApiKeys.JOIN_GROUP);
this.data = data;
}
@Override
public JoinGroupRequest build(short version) {
if (data.groupInstanceId() != null && version < 5) {
throw new UnsupportedVersionException("The broker join group protocol version " +
version + " does not support usage of config group.instance.id.");
}
return new JoinGroupRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final JoinGroupRequestData data;
public static final String UNKNOWN_MEMBER_ID = "";
public static final int UNKNOWN_GENERATION_ID = -1;
public static final String UNKNOWN_PROTOCOL_NAME = "";
/**
* Ported from class Topic in {@link org.apache.kafka.common.internals} to restrict the charset for
* static member id.
*/
public static void validateGroupInstanceId(String id) {
Topic.validate(id, "Group instance id", message -> {
throw new InvalidConfigurationException(message);
});
}
/**
* Ensures that the provided {@code reason} remains within a range of 255 chars.
* @param reason This is the reason that is sent to the broker over the wire
* as a part of {@code JoinGroupRequest} or {@code LeaveGroupRequest} messages.
* @return a provided reason as is or truncated reason if it exceeds the 255 chars threshold.
*/
public static String maybeTruncateReason(final String reason) {
if (reason.length() > 255) {
return reason.substring(0, 255);
} else {
return reason;
}
}
public JoinGroupRequest(JoinGroupRequestData data, short version) {
super(ApiKeys.JOIN_GROUP, version);
this.data = data;
maybeOverrideRebalanceTimeout(version);
}
private void maybeOverrideRebalanceTimeout(short version) {
if (version == 0) {
// Version 0 has no rebalance timeout, so we use the session timeout
// to be consistent with the original behavior of the API.
data.setRebalanceTimeoutMs(data.sessionTimeoutMs());
}
}
@Override
public JoinGroupRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
JoinGroupResponseData data = new JoinGroupResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(Errors.forException(e).code())
.setGenerationId(UNKNOWN_GENERATION_ID)
.setProtocolName(UNKNOWN_PROTOCOL_NAME)
.setLeader(UNKNOWN_MEMBER_ID)
.setMemberId(UNKNOWN_MEMBER_ID)
.setMembers(Collections.emptyList());
if (version() >= 7)
data.setProtocolName(null);
else
data.setProtocolName(UNKNOWN_PROTOCOL_NAME);
return new JoinGroupResponse(data, version());
}
public static JoinGroupRequest parse(ByteBuffer buffer, short version) {
return new JoinGroupRequest(new JoinGroupRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/JoinGroupResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.JoinGroupResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
public class JoinGroupResponse extends AbstractResponse {
private final JoinGroupResponseData data;
public JoinGroupResponse(JoinGroupResponseData data, short version) {
super(ApiKeys.JOIN_GROUP);
this.data = data;
// All versions prior to version 7 do not support nullable
// string for the protocol name. Empty string should be used.
if (version < 7 && data.protocolName() == null) {
data.setProtocolName("");
}
}
@Override
public JoinGroupResponseData data() {
return data;
}
public boolean isLeader() {
return data.memberId().equals(data.leader());
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
public static JoinGroupResponse parse(ByteBuffer buffer, short version) {
return new JoinGroupResponse(new JoinGroupResponseData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public String toString() {
return data.toString();
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 3;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/LeaderAndIsrRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.message.LeaderAndIsrRequestData;
import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrLiveLeader;
import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrTopicState;
import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState;
import org.apache.kafka.common.message.LeaderAndIsrResponseData;
import org.apache.kafka.common.message.LeaderAndIsrResponseData.LeaderAndIsrTopicError;
import org.apache.kafka.common.message.LeaderAndIsrResponseData.LeaderAndIsrPartitionError;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.utils.FlattenedIterator;
import org.apache.kafka.common.utils.Utils;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class LeaderAndIsrRequest extends AbstractControlRequest {
public static class Builder extends AbstractControlRequest.Builder<LeaderAndIsrRequest> {
private final List<LeaderAndIsrPartitionState> partitionStates;
private final Map<String, Uuid> topicIds;
private final Collection<Node> liveLeaders;
public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch,
List<LeaderAndIsrPartitionState> partitionStates, Map<String, Uuid> topicIds,
Collection<Node> liveLeaders) {
this(version, controllerId, controllerEpoch, brokerEpoch, partitionStates, topicIds,
liveLeaders, false);
}
public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch,
List<LeaderAndIsrPartitionState> partitionStates, Map<String, Uuid> topicIds,
Collection<Node> liveLeaders, boolean kraftController) {
super(ApiKeys.LEADER_AND_ISR, version, controllerId, controllerEpoch, brokerEpoch, kraftController);
this.partitionStates = partitionStates;
this.topicIds = topicIds;
this.liveLeaders = liveLeaders;
}
@Override
public LeaderAndIsrRequest build(short version) {
List<LeaderAndIsrLiveLeader> leaders = liveLeaders.stream().map(n -> new LeaderAndIsrLiveLeader()
.setBrokerId(n.id())
.setHostName(n.host())
.setPort(n.port())
).collect(Collectors.toList());
LeaderAndIsrRequestData data = new LeaderAndIsrRequestData()
.setControllerId(controllerId)
.setControllerEpoch(controllerEpoch)
.setBrokerEpoch(brokerEpoch)
.setLiveLeaders(leaders);
if (version >= 7) {
data.setIsKRaftController(kraftController);
}
if (version >= 2) {
Map<String, LeaderAndIsrTopicState> topicStatesMap = groupByTopic(partitionStates, topicIds);
data.setTopicStates(new ArrayList<>(topicStatesMap.values()));
} else {
data.setUngroupedPartitionStates(partitionStates);
}
return new LeaderAndIsrRequest(data, version);
}
private static Map<String, LeaderAndIsrTopicState> groupByTopic(List<LeaderAndIsrPartitionState> partitionStates, Map<String, Uuid> topicIds) {
Map<String, LeaderAndIsrTopicState> topicStates = new HashMap<>();
// We don't null out the topic name in LeaderAndIsrRequestPartition since it's ignored by
// the generated code if version >= 2
for (LeaderAndIsrPartitionState partition : partitionStates) {
LeaderAndIsrTopicState topicState = topicStates.computeIfAbsent(partition.topicName(), t -> new LeaderAndIsrTopicState()
.setTopicName(partition.topicName())
.setTopicId(topicIds.getOrDefault(partition.topicName(), Uuid.ZERO_UUID)));
topicState.partitionStates().add(partition);
}
return topicStates;
}
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("(type=LeaderAndIsRequest")
.append(", controllerId=").append(controllerId)
.append(", controllerEpoch=").append(controllerEpoch)
.append(", brokerEpoch=").append(brokerEpoch)
.append(", partitionStates=").append(partitionStates)
.append(", topicIds=").append(topicIds)
.append(", liveLeaders=(").append(Utils.join(liveLeaders, ", ")).append(")")
.append(")");
return bld.toString();
}
}
private final LeaderAndIsrRequestData data;
LeaderAndIsrRequest(LeaderAndIsrRequestData data, short version) {
super(ApiKeys.LEADER_AND_ISR, version);
this.data = data;
// Do this from the constructor to make it thread-safe (even though it's only needed when some methods are called)
normalize();
}
private void normalize() {
if (version() >= 2) {
for (LeaderAndIsrTopicState topicState : data.topicStates()) {
for (LeaderAndIsrPartitionState partitionState : topicState.partitionStates()) {
// Set the topic name so that we can always present the ungrouped view to callers
partitionState.setTopicName(topicState.topicName());
}
}
}
}
@Override
public LeaderAndIsrResponse getErrorResponse(int throttleTimeMs, Throwable e) {
LeaderAndIsrResponseData responseData = new LeaderAndIsrResponseData();
Errors error = Errors.forException(e);
responseData.setErrorCode(error.code());
if (version() < 5) {
List<LeaderAndIsrPartitionError> partitions = new ArrayList<>();
for (LeaderAndIsrPartitionState partition : partitionStates()) {
partitions.add(new LeaderAndIsrPartitionError()
.setTopicName(partition.topicName())
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(error.code()));
}
responseData.setPartitionErrors(partitions);
} else {
for (LeaderAndIsrTopicState topicState : data.topicStates()) {
List<LeaderAndIsrPartitionError> partitions = new ArrayList<>(
topicState.partitionStates().size());
for (LeaderAndIsrPartitionState partition : topicState.partitionStates()) {
partitions.add(new LeaderAndIsrPartitionError()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(error.code()));
}
responseData.topics().add(new LeaderAndIsrTopicError()
.setTopicId(topicState.topicId())
.setPartitionErrors(partitions));
}
}
return new LeaderAndIsrResponse(responseData, version());
}
@Override
public int controllerId() {
return data.controllerId();
}
@Override
public boolean isKRaftController() {
return data.isKRaftController();
}
@Override
public int controllerEpoch() {
return data.controllerEpoch();
}
@Override
public long brokerEpoch() {
return data.brokerEpoch();
}
public Iterable<LeaderAndIsrPartitionState> partitionStates() {
if (version() >= 2)
return () -> new FlattenedIterator<>(data.topicStates().iterator(),
topicState -> topicState.partitionStates().iterator());
return data.ungroupedPartitionStates();
}
public Map<String, Uuid> topicIds() {
return data.topicStates().stream()
.collect(Collectors.toMap(LeaderAndIsrTopicState::topicName, LeaderAndIsrTopicState::topicId));
}
public List<LeaderAndIsrLiveLeader> liveLeaders() {
return Collections.unmodifiableList(data.liveLeaders());
}
@Override
public LeaderAndIsrRequestData data() {
return data;
}
public static LeaderAndIsrRequest parse(ByteBuffer buffer, short version) {
return new LeaderAndIsrRequest(new LeaderAndIsrRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/LeaderAndIsrResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.message.LeaderAndIsrResponseData;
import org.apache.kafka.common.message.LeaderAndIsrResponseData.LeaderAndIsrTopicError;
import org.apache.kafka.common.message.LeaderAndIsrResponseData.LeaderAndIsrTopicErrorCollection;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
public class LeaderAndIsrResponse extends AbstractResponse {
/**
* Possible error code:
*
* STALE_CONTROLLER_EPOCH (11)
* STALE_BROKER_EPOCH (77)
*/
private final LeaderAndIsrResponseData data;
private final short version;
public LeaderAndIsrResponse(LeaderAndIsrResponseData data, short version) {
super(ApiKeys.LEADER_AND_ISR);
this.data = data;
this.version = version;
}
public LeaderAndIsrTopicErrorCollection topics() {
return this.data.topics();
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
Errors error = error();
if (error != Errors.NONE) {
// Minor optimization since the top-level error applies to all partitions
if (version < 5)
return Collections.singletonMap(error, data.partitionErrors().size() + 1);
return Collections.singletonMap(error,
data.topics().stream().mapToInt(t -> t.partitionErrors().size()).sum() + 1);
}
Map<Errors, Integer> errors;
if (version < 5)
errors = errorCounts(data.partitionErrors().stream().map(l -> Errors.forCode(l.errorCode())));
else
errors = errorCounts(data.topics().stream().flatMap(t -> t.partitionErrors().stream()).map(l ->
Errors.forCode(l.errorCode())));
updateErrorCounts(errors, Errors.NONE);
return errors;
}
public Map<TopicPartition, Errors> partitionErrors(Map<Uuid, String> topicNames) {
Map<TopicPartition, Errors> errors = new HashMap<>();
if (version < 5) {
data.partitionErrors().forEach(partition ->
errors.put(new TopicPartition(partition.topicName(), partition.partitionIndex()),
Errors.forCode(partition.errorCode())));
} else {
for (LeaderAndIsrTopicError topic : data.topics()) {
String topicName = topicNames.get(topic.topicId());
if (topicName != null) {
topic.partitionErrors().forEach(partition ->
errors.put(new TopicPartition(topicName, partition.partitionIndex()),
Errors.forCode(partition.errorCode())));
}
}
}
return errors;
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
public static LeaderAndIsrResponse parse(ByteBuffer buffer, short version) {
return new LeaderAndIsrResponse(new LeaderAndIsrResponseData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public LeaderAndIsrResponseData data() {
return data;
}
@Override
public String toString() {
return data.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/LeaveGroupRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.LeaveGroupRequestData;
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity;
import org.apache.kafka.common.message.LeaveGroupResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.MessageUtil;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
public class LeaveGroupRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<LeaveGroupRequest> {
private final String groupId;
private final List<MemberIdentity> members;
public Builder(String groupId, List<MemberIdentity> members) {
this(groupId, members, ApiKeys.LEAVE_GROUP.oldestVersion(), ApiKeys.LEAVE_GROUP.latestVersion());
}
Builder(String groupId, List<MemberIdentity> members, short oldestVersion, short latestVersion) {
super(ApiKeys.LEAVE_GROUP, oldestVersion, latestVersion);
this.groupId = groupId;
this.members = members;
if (members.isEmpty()) {
throw new IllegalArgumentException("leaving members should not be empty");
}
}
/**
* Based on the request version to choose fields.
*/
@Override
public LeaveGroupRequest build(short version) {
final LeaveGroupRequestData data;
// Starting from version 3, all the leave group request will be in batch.
if (version >= 3) {
data = new LeaveGroupRequestData()
.setGroupId(groupId)
.setMembers(members);
} else {
if (members.size() != 1) {
throw new UnsupportedVersionException("Version " + version + " leave group request only " +
"supports single member instance than " + members.size() + " members");
}
data = new LeaveGroupRequestData()
.setGroupId(groupId)
.setMemberId(members.get(0).memberId());
}
return new LeaveGroupRequest(data, version);
}
@Override
public String toString() {
return "(type=LeaveGroupRequest" +
", groupId=" + groupId +
", members=" + MessageUtil.deepToString(members.iterator()) +
")";
}
}
private final LeaveGroupRequestData data;
private LeaveGroupRequest(LeaveGroupRequestData data, short version) {
super(ApiKeys.LEAVE_GROUP, version);
this.data = data;
}
@Override
public LeaveGroupRequestData data() {
return data;
}
public LeaveGroupRequestData normalizedData() {
if (version() >= 3) {
return data;
} else {
return new LeaveGroupRequestData()
.setGroupId(data.groupId())
.setMembers(Collections.singletonList(
new MemberIdentity().setMemberId(data.memberId())));
}
}
public List<MemberIdentity> members() {
// Before version 3, leave group request is still in single mode
return version() <= 2 ? Collections.singletonList(
new MemberIdentity()
.setMemberId(data.memberId())) : data.members();
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
LeaveGroupResponseData responseData = new LeaveGroupResponseData()
.setErrorCode(Errors.forException(e).code());
if (version() >= 1) {
responseData.setThrottleTimeMs(throttleTimeMs);
}
return new LeaveGroupResponse(responseData);
}
public static LeaveGroupRequest parse(ByteBuffer buffer, short version) {
return new LeaveGroupRequest(new LeaveGroupRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/LeaveGroupResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.LeaveGroupResponseData;
import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* Possible error codes.
*
* Top level errors:
* - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS}
* - {@link Errors#COORDINATOR_NOT_AVAILABLE}
* - {@link Errors#NOT_COORDINATOR}
* - {@link Errors#GROUP_AUTHORIZATION_FAILED}
*
* Member level errors:
* - {@link Errors#FENCED_INSTANCE_ID}
* - {@link Errors#UNKNOWN_MEMBER_ID}
*
* If the top level error code is set, normally this indicates that broker early stops the request
* handling due to some severe global error, so it is expected to see the member level errors to be empty.
* For older version response, we may populate member level error towards top level because older client
* couldn't parse member level.
*/
public class LeaveGroupResponse extends AbstractResponse {
private final LeaveGroupResponseData data;
public LeaveGroupResponse(LeaveGroupResponseData data) {
super(ApiKeys.LEAVE_GROUP);
this.data = data;
}
public LeaveGroupResponse(LeaveGroupResponseData data, short version) {
super(ApiKeys.LEAVE_GROUP);
if (version >= 3) {
this.data = data;
} else {
if (data.members().size() != 1) {
throw new UnsupportedVersionException("LeaveGroup response version " + version +
" can only contain one member, got " + data.members().size() + " members.");
}
Errors topLevelError = Errors.forCode(data.errorCode());
short errorCode = getError(topLevelError, data.members()).code();
this.data = new LeaveGroupResponseData().setErrorCode(errorCode);
}
}
public LeaveGroupResponse(List<MemberResponse> memberResponses,
Errors topLevelError,
final int throttleTimeMs,
final short version) {
super(ApiKeys.LEAVE_GROUP);
if (version <= 2) {
// Populate member level error.
final short errorCode = getError(topLevelError, memberResponses).code();
this.data = new LeaveGroupResponseData()
.setErrorCode(errorCode);
} else {
this.data = new LeaveGroupResponseData()
.setErrorCode(topLevelError.code())
.setMembers(memberResponses);
}
if (version >= 1) {
this.data.setThrottleTimeMs(throttleTimeMs);
}
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public List<MemberResponse> memberResponses() {
return data.members();
}
public Errors error() {
return getError(Errors.forCode(data.errorCode()), data.members());
}
public Errors topLevelError() {
return Errors.forCode(data.errorCode());
}
private static Errors getError(Errors topLevelError, List<MemberResponse> memberResponses) {
if (topLevelError != Errors.NONE) {
return topLevelError;
} else {
for (MemberResponse memberResponse : memberResponses) {
Errors memberError = Errors.forCode(memberResponse.errorCode());
if (memberError != Errors.NONE) {
return memberError;
}
}
return Errors.NONE;
}
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> combinedErrorCounts = new HashMap<>();
// Top level error.
updateErrorCounts(combinedErrorCounts, Errors.forCode(data.errorCode()));
// Member level error.
data.members().forEach(memberResponse -> {
updateErrorCounts(combinedErrorCounts, Errors.forCode(memberResponse.errorCode()));
});
return combinedErrorCounts;
}
@Override
public LeaveGroupResponseData data() {
return data;
}
public static LeaveGroupResponse parse(ByteBuffer buffer, short version) {
return new LeaveGroupResponse(new LeaveGroupResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
@Override
public boolean equals(Object other) {
return other instanceof LeaveGroupResponse &&
((LeaveGroupResponse) other).data.equals(this.data);
}
@Override
public int hashCode() {
return Objects.hashCode(data);
}
@Override
public String toString() {
return data.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ListGroupsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.ListGroupsRequestData;
import org.apache.kafka.common.message.ListGroupsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
/**
* Possible error codes:
*
* COORDINATOR_LOAD_IN_PROGRESS (14)
* COORDINATOR_NOT_AVAILABLE (15)
* AUTHORIZATION_FAILED (29)
*/
public class ListGroupsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<ListGroupsRequest> {
private final ListGroupsRequestData data;
public Builder(ListGroupsRequestData data) {
super(ApiKeys.LIST_GROUPS);
this.data = data;
}
@Override
public ListGroupsRequest build(short version) {
if (!data.statesFilter().isEmpty() && version < 4) {
throw new UnsupportedVersionException("The broker only supports ListGroups " +
"v" + version + ", but we need v4 or newer to request groups by states.");
}
return new ListGroupsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final ListGroupsRequestData data;
public ListGroupsRequest(ListGroupsRequestData data, short version) {
super(ApiKeys.LIST_GROUPS, version);
this.data = data;
}
@Override
public ListGroupsResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ListGroupsResponseData listGroupsResponseData = new ListGroupsResponseData().
setGroups(Collections.emptyList()).
setErrorCode(Errors.forException(e).code());
if (version() >= 1) {
listGroupsResponseData.setThrottleTimeMs(throttleTimeMs);
}
return new ListGroupsResponse(listGroupsResponseData);
}
public static ListGroupsRequest parse(ByteBuffer buffer, short version) {
return new ListGroupsRequest(new ListGroupsRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public ListGroupsRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ListGroupsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ListGroupsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
public class ListGroupsResponse extends AbstractResponse {
private final ListGroupsResponseData data;
public ListGroupsResponse(ListGroupsResponseData data) {
super(ApiKeys.LIST_GROUPS);
this.data = data;
}
@Override
public ListGroupsResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
public static ListGroupsResponse parse(ByteBuffer buffer, short version) {
return new ListGroupsResponse(new ListGroupsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ListOffsetsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.kafka.common.IsolationLevel;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.ListOffsetsRequestData;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition;
import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic;
import org.apache.kafka.common.message.ListOffsetsResponseData;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
public class ListOffsetsRequest extends AbstractRequest {
public static final long EARLIEST_TIMESTAMP = -2L;
public static final long LATEST_TIMESTAMP = -1L;
public static final long MAX_TIMESTAMP = -3L;
/**
* It is used to represent the earliest message stored in the local log which is also called the local-log-start-offset
*/
public static final long EARLIEST_LOCAL_TIMESTAMP = -4L;
public static final int CONSUMER_REPLICA_ID = -1;
public static final int DEBUGGING_REPLICA_ID = -2;
private final ListOffsetsRequestData data;
private final Set<TopicPartition> duplicatePartitions;
public static class Builder extends AbstractRequest.Builder<ListOffsetsRequest> {
private final ListOffsetsRequestData data;
public static Builder forReplica(short allowedVersion, int replicaId) {
return new Builder((short) 0, allowedVersion, replicaId, IsolationLevel.READ_UNCOMMITTED);
}
public static Builder forConsumer(boolean requireTimestamp, IsolationLevel isolationLevel, boolean requireMaxTimestamp) {
short minVersion = 0;
if (requireMaxTimestamp)
minVersion = 7;
else if (isolationLevel == IsolationLevel.READ_COMMITTED)
minVersion = 2;
else if (requireTimestamp)
minVersion = 1;
return new Builder(minVersion, ApiKeys.LIST_OFFSETS.latestVersion(), CONSUMER_REPLICA_ID, isolationLevel);
}
private Builder(short oldestAllowedVersion,
short latestAllowedVersion,
int replicaId,
IsolationLevel isolationLevel) {
super(ApiKeys.LIST_OFFSETS, oldestAllowedVersion, latestAllowedVersion);
data = new ListOffsetsRequestData()
.setIsolationLevel(isolationLevel.id())
.setReplicaId(replicaId);
}
public Builder setTargetTimes(List<ListOffsetsTopic> topics) {
data.setTopics(topics);
return this;
}
@Override
public ListOffsetsRequest build(short version) {
return new ListOffsetsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
/**
* Private constructor with a specified version.
*/
private ListOffsetsRequest(ListOffsetsRequestData data, short version) {
super(ApiKeys.LIST_OFFSETS, version);
this.data = data;
duplicatePartitions = new HashSet<>();
Set<TopicPartition> partitions = new HashSet<>();
for (ListOffsetsTopic topic : data.topics()) {
for (ListOffsetsPartition partition : topic.partitions()) {
TopicPartition tp = new TopicPartition(topic.name(), partition.partitionIndex());
if (!partitions.add(tp)) {
duplicatePartitions.add(tp);
}
}
}
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
short versionId = version();
short errorCode = Errors.forException(e).code();
List<ListOffsetsTopicResponse> responses = new ArrayList<>();
for (ListOffsetsTopic topic : data.topics()) {
ListOffsetsTopicResponse topicResponse = new ListOffsetsTopicResponse().setName(topic.name());
List<ListOffsetsPartitionResponse> partitions = new ArrayList<>();
for (ListOffsetsPartition partition : topic.partitions()) {
ListOffsetsPartitionResponse partitionResponse = new ListOffsetsPartitionResponse()
.setErrorCode(errorCode)
.setPartitionIndex(partition.partitionIndex());
if (versionId == 0) {
partitionResponse.setOldStyleOffsets(Collections.emptyList());
} else {
partitionResponse.setOffset(ListOffsetsResponse.UNKNOWN_OFFSET)
.setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP);
}
partitions.add(partitionResponse);
}
topicResponse.setPartitions(partitions);
responses.add(topicResponse);
}
ListOffsetsResponseData responseData = new ListOffsetsResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setTopics(responses);
return new ListOffsetsResponse(responseData);
}
@Override
public ListOffsetsRequestData data() {
return data;
}
public int replicaId() {
return data.replicaId();
}
public IsolationLevel isolationLevel() {
return IsolationLevel.forId(data.isolationLevel());
}
public List<ListOffsetsTopic> topics() {
return data.topics();
}
public Set<TopicPartition> duplicatePartitions() {
return duplicatePartitions;
}
public static ListOffsetsRequest parse(ByteBuffer buffer, short version) {
return new ListOffsetsRequest(new ListOffsetsRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static List<ListOffsetsTopic> toListOffsetsTopics(Map<TopicPartition, ListOffsetsPartition> timestampsToSearch) {
Map<String, ListOffsetsTopic> topics = new HashMap<>();
for (Map.Entry<TopicPartition, ListOffsetsPartition> entry : timestampsToSearch.entrySet()) {
TopicPartition tp = entry.getKey();
ListOffsetsTopic topic = topics.computeIfAbsent(tp.topic(), k -> new ListOffsetsTopic().setName(tp.topic()));
topic.partitions().add(entry.getValue());
}
return new ArrayList<>(topics.values());
}
public static ListOffsetsTopic singletonRequestData(String topic, int partitionIndex, long timestamp, int maxNumOffsets) {
return new ListOffsetsTopic()
.setName(topic)
.setPartitions(Collections.singletonList(new ListOffsetsPartition()
.setPartitionIndex(partitionIndex)
.setTimestamp(timestamp)
.setMaxNumOffsets(maxNumOffsets)));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ListOffsetsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.ListOffsetsResponseData;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse;
import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordBatch;
/**
* Possible error codes:
*
* - {@link Errors#UNSUPPORTED_FOR_MESSAGE_FORMAT} If the message format does not support lookup by timestamp
* - {@link Errors#TOPIC_AUTHORIZATION_FAILED} If the user does not have DESCRIBE access to a requested topic
* - {@link Errors#REPLICA_NOT_AVAILABLE} If the request is received by a broker with version < 2.6 which is not a replica
* - {@link Errors#NOT_LEADER_OR_FOLLOWER} If the broker is not a leader or follower and either the provided leader epoch
* matches the known leader epoch on the broker or is empty
* - {@link Errors#FENCED_LEADER_EPOCH} If the epoch is lower than the broker's epoch
* - {@link Errors#UNKNOWN_LEADER_EPOCH} If the epoch is larger than the broker's epoch
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} If the broker does not have metadata for a topic or partition
* - {@link Errors#KAFKA_STORAGE_ERROR} If the log directory for one of the requested partitions is offline
* - {@link Errors#UNKNOWN_SERVER_ERROR} For any unexpected errors
* - {@link Errors#LEADER_NOT_AVAILABLE} The leader's HW has not caught up after recent election (v4 protocol)
* - {@link Errors#OFFSET_NOT_AVAILABLE} The leader's HW has not caught up after recent election (v5+ protocol)
*/
public class ListOffsetsResponse extends AbstractResponse {
public static final long UNKNOWN_TIMESTAMP = -1L;
public static final long UNKNOWN_OFFSET = -1L;
public static final int UNKNOWN_EPOCH = RecordBatch.NO_PARTITION_LEADER_EPOCH;
private final ListOffsetsResponseData data;
public ListOffsetsResponse(ListOffsetsResponseData data) {
super(ApiKeys.LIST_OFFSETS);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public ListOffsetsResponseData data() {
return data;
}
public List<ListOffsetsTopicResponse> topics() {
return data.topics();
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
topics().forEach(topic ->
topic.partitions().forEach(partition ->
updateErrorCounts(errorCounts, Errors.forCode(partition.errorCode()))
)
);
return errorCounts;
}
public static ListOffsetsResponse parse(ByteBuffer buffer, short version) {
return new ListOffsetsResponse(new ListOffsetsResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 3;
}
public static ListOffsetsTopicResponse singletonListOffsetsTopicResponse(TopicPartition tp, Errors error, long timestamp, long offset, int epoch) {
return new ListOffsetsTopicResponse()
.setName(tp.topic())
.setPartitions(Collections.singletonList(new ListOffsetsPartitionResponse()
.setPartitionIndex(tp.partition())
.setErrorCode(error.code())
.setTimestamp(timestamp)
.setOffset(offset)
.setLeaderEpoch(epoch)));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ListPartitionReassignmentsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ListPartitionReassignmentsRequestData;
import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData;
import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment;
import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import static org.apache.kafka.common.message.ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics;
public class ListPartitionReassignmentsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<ListPartitionReassignmentsRequest> {
private final ListPartitionReassignmentsRequestData data;
public Builder(ListPartitionReassignmentsRequestData data) {
super(ApiKeys.LIST_PARTITION_REASSIGNMENTS);
this.data = data;
}
@Override
public ListPartitionReassignmentsRequest build(short version) {
return new ListPartitionReassignmentsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private ListPartitionReassignmentsRequestData data;
private ListPartitionReassignmentsRequest(ListPartitionReassignmentsRequestData data, short version) {
super(ApiKeys.LIST_PARTITION_REASSIGNMENTS, version);
this.data = data;
}
public static ListPartitionReassignmentsRequest parse(ByteBuffer buffer, short version) {
return new ListPartitionReassignmentsRequest(new ListPartitionReassignmentsRequestData(
new ByteBufferAccessor(buffer), version), version);
}
@Override
public ListPartitionReassignmentsRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ApiError apiError = ApiError.fromThrowable(e);
List<OngoingTopicReassignment> ongoingTopicReassignments = new ArrayList<>();
if (data.topics() != null) {
for (ListPartitionReassignmentsTopics topic : data.topics()) {
ongoingTopicReassignments.add(
new OngoingTopicReassignment()
.setName(topic.name())
.setPartitions(topic.partitionIndexes().stream().map(partitionIndex ->
new OngoingPartitionReassignment().setPartitionIndex(partitionIndex)).collect(Collectors.toList()))
);
}
}
ListPartitionReassignmentsResponseData responseData = new ListPartitionReassignmentsResponseData()
.setTopics(ongoingTopicReassignments)
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message())
.setThrottleTimeMs(throttleTimeMs);
return new ListPartitionReassignmentsResponse(responseData);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ListPartitionReassignmentsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
public class ListPartitionReassignmentsResponse extends AbstractResponse {
private final ListPartitionReassignmentsResponseData data;
public ListPartitionReassignmentsResponse(ListPartitionReassignmentsResponseData responseData) {
super(ApiKeys.LIST_PARTITION_REASSIGNMENTS);
this.data = responseData;
}
public static ListPartitionReassignmentsResponse parse(ByteBuffer buffer, short version) {
return new ListPartitionReassignmentsResponse(new ListPartitionReassignmentsResponseData(
new ByteBufferAccessor(buffer), version));
}
@Override
public ListPartitionReassignmentsResponseData data() {
return data;
}
@Override
public boolean shouldClientThrottle(short version) {
return true;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ListTransactionsRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ListTransactionsRequestData;
import org.apache.kafka.common.message.ListTransactionsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class ListTransactionsRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<ListTransactionsRequest> {
public final ListTransactionsRequestData data;
public Builder(ListTransactionsRequestData data) {
super(ApiKeys.LIST_TRANSACTIONS);
this.data = data;
}
@Override
public ListTransactionsRequest build(short version) {
return new ListTransactionsRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final ListTransactionsRequestData data;
private ListTransactionsRequest(ListTransactionsRequestData data, short version) {
super(ApiKeys.LIST_TRANSACTIONS, version);
this.data = data;
}
public ListTransactionsRequestData data() {
return data;
}
@Override
public ListTransactionsResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
ListTransactionsResponseData response = new ListTransactionsResponseData()
.setErrorCode(error.code())
.setThrottleTimeMs(throttleTimeMs);
return new ListTransactionsResponse(response);
}
public static ListTransactionsRequest parse(ByteBuffer buffer, short version) {
return new ListTransactionsRequest(new ListTransactionsRequestData(
new ByteBufferAccessor(buffer), version), version);
}
@Override
public String toString(boolean verbose) {
return data.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ListTransactionsResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ListTransactionsResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class ListTransactionsResponse extends AbstractResponse {
private final ListTransactionsResponseData data;
public ListTransactionsResponse(ListTransactionsResponseData data) {
super(ApiKeys.LIST_TRANSACTIONS);
this.data = data;
}
public ListTransactionsResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
updateErrorCounts(errorCounts, Errors.forCode(data.errorCode()));
return errorCounts;
}
public static ListTransactionsResponse parse(ByteBuffer buffer, short version) {
return new ListTransactionsResponse(new ListTransactionsResponseData(
new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.