index int64 | repo_id string | file_path string | content string |
|---|---|---|---|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/MetadataRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.MetadataRequestData;
import org.apache.kafka.common.message.MetadataRequestData.MetadataRequestTopic;
import org.apache.kafka.common.message.MetadataResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
public class MetadataRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<MetadataRequest> {
private static final MetadataRequestData ALL_TOPICS_REQUEST_DATA = new MetadataRequestData().
setTopics(null).setAllowAutoTopicCreation(true);
private final MetadataRequestData data;
public Builder(MetadataRequestData data) {
super(ApiKeys.METADATA);
this.data = data;
}
public Builder(List<String> topics, boolean allowAutoTopicCreation, short allowedVersion) {
this(topics, allowAutoTopicCreation, allowedVersion, allowedVersion);
}
public Builder(List<String> topics, boolean allowAutoTopicCreation, short minVersion, short maxVersion) {
super(ApiKeys.METADATA, minVersion, maxVersion);
MetadataRequestData data = new MetadataRequestData();
if (topics == null)
data.setTopics(null);
else {
topics.forEach(topic -> data.topics().add(new MetadataRequestTopic().setName(topic)));
}
data.setAllowAutoTopicCreation(allowAutoTopicCreation);
this.data = data;
}
public Builder(List<String> topics, boolean allowAutoTopicCreation) {
this(topics, allowAutoTopicCreation, ApiKeys.METADATA.oldestVersion(), ApiKeys.METADATA.latestVersion());
}
public Builder(List<Uuid> topicIds) {
super(ApiKeys.METADATA, ApiKeys.METADATA.oldestVersion(), ApiKeys.METADATA.latestVersion());
MetadataRequestData data = new MetadataRequestData();
if (topicIds == null)
data.setTopics(null);
else {
topicIds.forEach(topicId -> data.topics().add(new MetadataRequestTopic().setTopicId(topicId)));
}
// It's impossible to create topic with topicId
data.setAllowAutoTopicCreation(false);
this.data = data;
}
public static Builder allTopics() {
// This never causes auto-creation, but we set the boolean to true because that is the default value when
// deserializing V2 and older. This way, the value is consistent after serialization and deserialization.
return new Builder(ALL_TOPICS_REQUEST_DATA);
}
public boolean emptyTopicList() {
return data.topics().isEmpty();
}
public boolean isAllTopics() {
return data.topics() == null;
}
public List<String> topics() {
return data.topics()
.stream()
.map(MetadataRequestTopic::name)
.collect(Collectors.toList());
}
@Override
public MetadataRequest build(short version) {
if (version < 1)
throw new UnsupportedVersionException("MetadataRequest versions older than 1 are not supported.");
if (!data.allowAutoTopicCreation() && version < 4)
throw new UnsupportedVersionException("MetadataRequest versions older than 4 don't support the " +
"allowAutoTopicCreation field");
if (data.topics() != null) {
data.topics().forEach(topic -> {
if (topic.name() == null && version < 12)
throw new UnsupportedVersionException("MetadataRequest version " + version +
" does not support null topic names.");
if (!Uuid.ZERO_UUID.equals(topic.topicId()) && version < 12)
throw new UnsupportedVersionException("MetadataRequest version " + version +
" does not support non-zero topic IDs.");
});
}
return new MetadataRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final MetadataRequestData data;
public MetadataRequest(MetadataRequestData data, short version) {
super(ApiKeys.METADATA, version);
this.data = data;
}
@Override
public MetadataRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
MetadataResponseData responseData = new MetadataResponseData();
if (data.topics() != null) {
for (MetadataRequestTopic topic : data.topics()) {
// the response does not allow null, so convert to empty string if necessary
String topicName = topic.name() == null ? "" : topic.name();
responseData.topics().add(new MetadataResponseData.MetadataResponseTopic()
.setName(topicName)
.setTopicId(topic.topicId())
.setErrorCode(error.code())
.setIsInternal(false)
.setPartitions(Collections.emptyList()));
}
}
responseData.setThrottleTimeMs(throttleTimeMs);
return new MetadataResponse(responseData, true);
}
public boolean isAllTopics() {
return (data.topics() == null) ||
(data.topics().isEmpty() && version() == 0); // In version 0, an empty topic list indicates
// "request metadata for all topics."
}
public List<String> topics() {
if (isAllTopics()) // In version 0, we return null for empty topic list
return null;
else
return data.topics()
.stream()
.map(MetadataRequestTopic::name)
.collect(Collectors.toList());
}
public List<Uuid> topicIds() {
if (isAllTopics())
return Collections.emptyList();
else if (version() < 10)
return Collections.emptyList();
else
return data.topics()
.stream()
.map(MetadataRequestTopic::topicId)
.collect(Collectors.toList());
}
public boolean allowAutoTopicCreation() {
return data.allowAutoTopicCreation();
}
public static MetadataRequest parse(ByteBuffer buffer, short version) {
return new MetadataRequest(new MetadataRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static List<MetadataRequestTopic> convertToMetadataRequestTopic(final Collection<String> topics) {
return topics.stream().map(topic -> new MetadataRequestTopic()
.setName(topic))
.collect(Collectors.toList());
}
public static List<MetadataRequestTopic> convertTopicIdsToMetadataRequestTopic(final Collection<Uuid> topicIds) {
return topicIds.stream().map(topicId -> new MetadataRequestTopic()
.setTopicId(topicId))
.collect(Collectors.toList());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/MetadataResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.message.MetadataResponseData;
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseBroker;
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition;
import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.utils.Utils;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Possible topic-level error codes:
* UnknownTopic (3)
* LeaderNotAvailable (5)
* InvalidTopic (17)
* TopicAuthorizationFailed (29)
* Possible partition-level error codes:
* LeaderNotAvailable (5)
* ReplicaNotAvailable (9)
*/
public class MetadataResponse extends AbstractResponse {
public static final int NO_CONTROLLER_ID = -1;
public static final int NO_LEADER_ID = -1;
public static final int AUTHORIZED_OPERATIONS_OMITTED = Integer.MIN_VALUE;
private final MetadataResponseData data;
private volatile Holder holder;
private final boolean hasReliableLeaderEpochs;
public MetadataResponse(MetadataResponseData data, short version) {
this(data, hasReliableLeaderEpochs(version));
}
MetadataResponse(MetadataResponseData data, boolean hasReliableLeaderEpochs) {
super(ApiKeys.METADATA);
this.data = data;
this.hasReliableLeaderEpochs = hasReliableLeaderEpochs;
}
@Override
public MetadataResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
/**
* Get a map of the topics which had metadata errors
* @return the map
*/
public Map<String, Errors> errors() {
Map<String, Errors> errors = new HashMap<>();
for (MetadataResponseTopic metadata : data.topics()) {
if (metadata.name() == null) {
throw new IllegalStateException("Use errorsByTopicId() when managing topic using topic id");
}
if (metadata.errorCode() != Errors.NONE.code())
errors.put(metadata.name(), Errors.forCode(metadata.errorCode()));
}
return errors;
}
/**
* Get a map of the topicIds which had metadata errors
* @return the map
*/
public Map<Uuid, Errors> errorsByTopicId() {
Map<Uuid, Errors> errors = new HashMap<>();
for (MetadataResponseTopic metadata : data.topics()) {
if (metadata.topicId() == Uuid.ZERO_UUID) {
throw new IllegalStateException("Use errors() when managing topic using topic name");
}
if (metadata.errorCode() != Errors.NONE.code())
errors.put(metadata.topicId(), Errors.forCode(metadata.errorCode()));
}
return errors;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
data.topics().forEach(metadata -> {
metadata.partitions().forEach(p -> updateErrorCounts(errorCounts, Errors.forCode(p.errorCode())));
updateErrorCounts(errorCounts, Errors.forCode(metadata.errorCode()));
});
return errorCounts;
}
/**
* Returns the set of topics with the specified error
*/
public Set<String> topicsByError(Errors error) {
Set<String> errorTopics = new HashSet<>();
for (MetadataResponseTopic metadata : data.topics()) {
if (metadata.errorCode() == error.code())
errorTopics.add(metadata.name());
}
return errorTopics;
}
/**
* Get a snapshot of the cluster metadata from this response
* @return the cluster snapshot
*/
public Cluster buildCluster() {
Set<String> internalTopics = new HashSet<>();
List<PartitionInfo> partitions = new ArrayList<>();
Map<String, Uuid> topicIds = new HashMap<>();
for (TopicMetadata metadata : topicMetadata()) {
if (metadata.error == Errors.NONE) {
if (metadata.isInternal)
internalTopics.add(metadata.topic);
if (metadata.topicId() != null && !Uuid.ZERO_UUID.equals(metadata.topicId())) {
topicIds.put(metadata.topic, metadata.topicId());
}
for (PartitionMetadata partitionMetadata : metadata.partitionMetadata) {
partitions.add(toPartitionInfo(partitionMetadata, holder().brokers));
}
}
}
return new Cluster(data.clusterId(), brokers(), partitions, topicsByError(Errors.TOPIC_AUTHORIZATION_FAILED),
topicsByError(Errors.INVALID_TOPIC_EXCEPTION), internalTopics, controller(), topicIds);
}
public static PartitionInfo toPartitionInfo(PartitionMetadata metadata, Map<Integer, Node> nodesById) {
return new PartitionInfo(metadata.topic(),
metadata.partition(),
metadata.leaderId.map(nodesById::get).orElse(null),
convertToNodeArray(metadata.replicaIds, nodesById),
convertToNodeArray(metadata.inSyncReplicaIds, nodesById),
convertToNodeArray(metadata.offlineReplicaIds, nodesById));
}
private static Node[] convertToNodeArray(List<Integer> replicaIds, Map<Integer, Node> nodesById) {
return replicaIds.stream().map(replicaId -> {
Node node = nodesById.get(replicaId);
if (node == null)
return new Node(replicaId, "", -1);
return node;
}).toArray(Node[]::new);
}
/**
* Returns a 32-bit bitfield to represent authorized operations for this topic.
*/
public Optional<Integer> topicAuthorizedOperations(String topicName) {
MetadataResponseTopic topic = data.topics().find(topicName);
if (topic == null)
return Optional.empty();
else
return Optional.of(topic.topicAuthorizedOperations());
}
/**
* Returns a 32-bit bitfield to represent authorized operations for this cluster.
*/
public int clusterAuthorizedOperations() {
return data.clusterAuthorizedOperations();
}
private Holder holder() {
if (holder == null) {
synchronized (data) {
if (holder == null)
holder = new Holder(data);
}
}
return holder;
}
/**
* Get all brokers returned in metadata response
* @return the brokers
*/
public Collection<Node> brokers() {
return holder().brokers.values();
}
public Map<Integer, Node> brokersById() {
return holder().brokers;
}
/**
* Get all topic metadata returned in the metadata response
* @return the topicMetadata
*/
public Collection<TopicMetadata> topicMetadata() {
return holder().topicMetadata;
}
/**
* The controller node returned in metadata response
* @return the controller node or null if it doesn't exist
*/
public Node controller() {
return holder().controller;
}
/**
* The cluster identifier returned in the metadata response.
* @return cluster identifier if it is present in the response, null otherwise.
*/
public String clusterId() {
return this.data.clusterId();
}
/**
* Check whether the leader epochs returned from the response can be relied on
* for epoch validation in Fetch, ListOffsets, and OffsetsForLeaderEpoch requests.
* If not, then the client will not retain the leader epochs and hence will not
* forward them in requests.
*
* @return true if the epoch can be used for validation
*/
public boolean hasReliableLeaderEpochs() {
return hasReliableLeaderEpochs;
}
// Prior to Kafka version 2.4 (which coincides with Metadata version 9), the broker
// does not propagate leader epoch information accurately while a reassignment is in
// progress. Relying on a stale epoch can lead to FENCED_LEADER_EPOCH errors which
// can prevent consumption throughout the course of a reassignment. It is safer in
// this case to revert to the behavior in previous protocol versions which checks
// leader status only.
private static boolean hasReliableLeaderEpochs(short version) {
return version >= 9;
}
public static MetadataResponse parse(ByteBuffer buffer, short version) {
return new MetadataResponse(new MetadataResponseData(new ByteBufferAccessor(buffer), version),
hasReliableLeaderEpochs(version));
}
public static class TopicMetadata {
private final Errors error;
private final String topic;
private final Uuid topicId;
private final boolean isInternal;
private final List<PartitionMetadata> partitionMetadata;
private int authorizedOperations;
public TopicMetadata(Errors error,
String topic,
Uuid topicId,
boolean isInternal,
List<PartitionMetadata> partitionMetadata,
int authorizedOperations) {
this.error = error;
this.topic = topic;
this.topicId = topicId;
this.isInternal = isInternal;
this.partitionMetadata = partitionMetadata;
this.authorizedOperations = authorizedOperations;
}
public TopicMetadata(Errors error,
String topic,
boolean isInternal,
List<PartitionMetadata> partitionMetadata) {
this(error, topic, Uuid.ZERO_UUID, isInternal, partitionMetadata, AUTHORIZED_OPERATIONS_OMITTED);
}
public Errors error() {
return error;
}
public String topic() {
return topic;
}
public Uuid topicId() {
return topicId;
}
public boolean isInternal() {
return isInternal;
}
public List<PartitionMetadata> partitionMetadata() {
return partitionMetadata;
}
public void authorizedOperations(int authorizedOperations) {
this.authorizedOperations = authorizedOperations;
}
public int authorizedOperations() {
return authorizedOperations;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final TopicMetadata that = (TopicMetadata) o;
return isInternal == that.isInternal &&
error == that.error &&
Objects.equals(topic, that.topic) &&
Objects.equals(topicId, that.topicId) &&
Objects.equals(partitionMetadata, that.partitionMetadata) &&
Objects.equals(authorizedOperations, that.authorizedOperations);
}
@Override
public int hashCode() {
return Objects.hash(error, topic, isInternal, partitionMetadata, authorizedOperations);
}
@Override
public String toString() {
return "TopicMetadata{" +
"error=" + error +
", topic='" + topic + '\'' +
", topicId='" + topicId + '\'' +
", isInternal=" + isInternal +
", partitionMetadata=" + partitionMetadata +
", authorizedOperations=" + authorizedOperations +
'}';
}
}
// This is used to describe per-partition state in the MetadataResponse
public static class PartitionMetadata {
public final TopicPartition topicPartition;
public final Errors error;
public final Optional<Integer> leaderId;
public final Optional<Integer> leaderEpoch;
public final List<Integer> replicaIds;
public final List<Integer> inSyncReplicaIds;
public final List<Integer> offlineReplicaIds;
public PartitionMetadata(Errors error,
TopicPartition topicPartition,
Optional<Integer> leaderId,
Optional<Integer> leaderEpoch,
List<Integer> replicaIds,
List<Integer> inSyncReplicaIds,
List<Integer> offlineReplicaIds) {
this.error = error;
this.topicPartition = topicPartition;
this.leaderId = leaderId;
this.leaderEpoch = leaderEpoch;
this.replicaIds = replicaIds;
this.inSyncReplicaIds = inSyncReplicaIds;
this.offlineReplicaIds = offlineReplicaIds;
}
public int partition() {
return topicPartition.partition();
}
public String topic() {
return topicPartition.topic();
}
public PartitionMetadata withoutLeaderEpoch() {
return new PartitionMetadata(error,
topicPartition,
leaderId,
Optional.empty(),
replicaIds,
inSyncReplicaIds,
offlineReplicaIds);
}
@Override
public String toString() {
return "PartitionMetadata(" +
"error=" + error +
", partition=" + topicPartition +
", leader=" + leaderId +
", leaderEpoch=" + leaderEpoch +
", replicas=" + Utils.join(replicaIds, ",") +
", isr=" + Utils.join(inSyncReplicaIds, ",") +
", offlineReplicas=" + Utils.join(offlineReplicaIds, ",") + ')';
}
}
private static class Holder {
private final Map<Integer, Node> brokers;
private final Node controller;
private final Collection<TopicMetadata> topicMetadata;
Holder(MetadataResponseData data) {
this.brokers = Collections.unmodifiableMap(createBrokers(data));
this.topicMetadata = createTopicMetadata(data);
this.controller = brokers.get(data.controllerId());
}
private Map<Integer, Node> createBrokers(MetadataResponseData data) {
return data.brokers().valuesList().stream().map(b -> new Node(b.nodeId(), b.host(), b.port(), b.rack()))
.collect(Collectors.toMap(Node::id, Function.identity()));
}
private Collection<TopicMetadata> createTopicMetadata(MetadataResponseData data) {
List<TopicMetadata> topicMetadataList = new ArrayList<>();
for (MetadataResponseTopic topicMetadata : data.topics()) {
Errors topicError = Errors.forCode(topicMetadata.errorCode());
String topic = topicMetadata.name();
Uuid topicId = topicMetadata.topicId();
boolean isInternal = topicMetadata.isInternal();
List<PartitionMetadata> partitionMetadataList = new ArrayList<>();
for (MetadataResponsePartition partitionMetadata : topicMetadata.partitions()) {
Errors partitionError = Errors.forCode(partitionMetadata.errorCode());
int partitionIndex = partitionMetadata.partitionIndex();
int leaderId = partitionMetadata.leaderId();
Optional<Integer> leaderIdOpt = leaderId < 0 ? Optional.empty() : Optional.of(leaderId);
Optional<Integer> leaderEpoch = RequestUtils.getLeaderEpoch(partitionMetadata.leaderEpoch());
TopicPartition topicPartition = new TopicPartition(topic, partitionIndex);
partitionMetadataList.add(new PartitionMetadata(partitionError, topicPartition, leaderIdOpt,
leaderEpoch, partitionMetadata.replicaNodes(), partitionMetadata.isrNodes(),
partitionMetadata.offlineReplicas()));
}
topicMetadataList.add(new TopicMetadata(topicError, topic, topicId, isInternal, partitionMetadataList,
topicMetadata.topicAuthorizedOperations()));
}
return topicMetadataList;
}
}
public static MetadataResponse prepareResponse(short version,
int throttleTimeMs,
Collection<Node> brokers,
String clusterId,
int controllerId,
List<MetadataResponseTopic> topics,
int clusterAuthorizedOperations) {
return prepareResponse(hasReliableLeaderEpochs(version), throttleTimeMs, brokers, clusterId, controllerId,
topics, clusterAuthorizedOperations);
}
// Visible for testing
public static MetadataResponse prepareResponse(boolean hasReliableEpoch,
int throttleTimeMs,
Collection<Node> brokers,
String clusterId,
int controllerId,
List<MetadataResponseTopic> topics,
int clusterAuthorizedOperations) {
MetadataResponseData responseData = new MetadataResponseData();
responseData.setThrottleTimeMs(throttleTimeMs);
brokers.forEach(broker ->
responseData.brokers().add(new MetadataResponseBroker()
.setNodeId(broker.id())
.setHost(broker.host())
.setPort(broker.port())
.setRack(broker.rack()))
);
responseData.setClusterId(clusterId);
responseData.setControllerId(controllerId);
responseData.setClusterAuthorizedOperations(clusterAuthorizedOperations);
topics.forEach(topicMetadata -> responseData.topics().add(topicMetadata));
return new MetadataResponse(responseData, hasReliableEpoch);
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 6;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/OffsetCommitRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.OffsetCommitRequestData;
import org.apache.kafka.common.message.OffsetCommitRequestData.OffsetCommitRequestTopic;
import org.apache.kafka.common.message.OffsetCommitResponseData;
import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponsePartition;
import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponseTopic;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class OffsetCommitRequest extends AbstractRequest {
// default values for the current version
public static final int DEFAULT_GENERATION_ID = -1;
public static final String DEFAULT_MEMBER_ID = "";
public static final long DEFAULT_RETENTION_TIME = -1L;
// default values for old versions, will be removed after these versions are no longer supported
public static final long DEFAULT_TIMESTAMP = -1L; // for V0, V1
private final OffsetCommitRequestData data;
public static class Builder extends AbstractRequest.Builder<OffsetCommitRequest> {
private final OffsetCommitRequestData data;
public Builder(OffsetCommitRequestData data) {
super(ApiKeys.OFFSET_COMMIT);
this.data = data;
}
@Override
public OffsetCommitRequest build(short version) {
if (data.groupInstanceId() != null && version < 7) {
throw new UnsupportedVersionException("The broker offset commit protocol version " +
version + " does not support usage of config group.instance.id.");
}
return new OffsetCommitRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
public OffsetCommitRequest(OffsetCommitRequestData data, short version) {
super(ApiKeys.OFFSET_COMMIT, version);
this.data = data;
}
@Override
public OffsetCommitRequestData data() {
return data;
}
public Map<TopicPartition, Long> offsets() {
Map<TopicPartition, Long> offsets = new HashMap<>();
for (OffsetCommitRequestTopic topic : data.topics()) {
for (OffsetCommitRequestData.OffsetCommitRequestPartition partition : topic.partitions()) {
offsets.put(new TopicPartition(topic.name(), partition.partitionIndex()),
partition.committedOffset());
}
}
return offsets;
}
public static List<OffsetCommitResponseTopic> getErrorResponseTopics(
List<OffsetCommitRequestTopic> requestTopics,
Errors e) {
List<OffsetCommitResponseTopic> responseTopicData = new ArrayList<>();
for (OffsetCommitRequestTopic entry : requestTopics) {
List<OffsetCommitResponsePartition> responsePartitions =
new ArrayList<>();
for (OffsetCommitRequestData.OffsetCommitRequestPartition requestPartition : entry.partitions()) {
responsePartitions.add(new OffsetCommitResponsePartition()
.setPartitionIndex(requestPartition.partitionIndex())
.setErrorCode(e.code()));
}
responseTopicData.add(new OffsetCommitResponseTopic()
.setName(entry.name())
.setPartitions(responsePartitions)
);
}
return responseTopicData;
}
@Override
public OffsetCommitResponse getErrorResponse(int throttleTimeMs, Throwable e) {
List<OffsetCommitResponseTopic>
responseTopicData = getErrorResponseTopics(data.topics(), Errors.forException(e));
return new OffsetCommitResponse(new OffsetCommitResponseData()
.setTopics(responseTopicData)
.setThrottleTimeMs(throttleTimeMs));
}
@Override
public OffsetCommitResponse getErrorResponse(Throwable e) {
return getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e);
}
public static OffsetCommitRequest parse(ByteBuffer buffer, short version) {
return new OffsetCommitRequest(new OffsetCommitRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/OffsetCommitResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.OffsetCommitResponseData;
import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponsePartition;
import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponseTopic;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
/**
* Possible error codes:
*
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION}
* - {@link Errors#REQUEST_TIMED_OUT}
* - {@link Errors#OFFSET_METADATA_TOO_LARGE}
* - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS}
* - {@link Errors#COORDINATOR_NOT_AVAILABLE}
* - {@link Errors#NOT_COORDINATOR}
* - {@link Errors#ILLEGAL_GENERATION}
* - {@link Errors#UNKNOWN_MEMBER_ID}
* - {@link Errors#REBALANCE_IN_PROGRESS}
* - {@link Errors#INVALID_COMMIT_OFFSET_SIZE}
* - {@link Errors#TOPIC_AUTHORIZATION_FAILED}
* - {@link Errors#GROUP_AUTHORIZATION_FAILED}
*/
public class OffsetCommitResponse extends AbstractResponse {
private final OffsetCommitResponseData data;
public OffsetCommitResponse(OffsetCommitResponseData data) {
super(ApiKeys.OFFSET_COMMIT);
this.data = data;
}
public OffsetCommitResponse(int requestThrottleMs, Map<TopicPartition, Errors> responseData) {
super(ApiKeys.OFFSET_COMMIT);
Map<String, OffsetCommitResponseTopic>
responseTopicDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, Errors> entry : responseData.entrySet()) {
TopicPartition topicPartition = entry.getKey();
String topicName = topicPartition.topic();
OffsetCommitResponseTopic topic = responseTopicDataMap.getOrDefault(
topicName, new OffsetCommitResponseTopic().setName(topicName));
topic.partitions().add(new OffsetCommitResponsePartition()
.setErrorCode(entry.getValue().code())
.setPartitionIndex(topicPartition.partition()));
responseTopicDataMap.put(topicName, topic);
}
data = new OffsetCommitResponseData()
.setTopics(new ArrayList<>(responseTopicDataMap.values()))
.setThrottleTimeMs(requestThrottleMs);
}
public OffsetCommitResponse(Map<TopicPartition, Errors> responseData) {
this(DEFAULT_THROTTLE_TIME, responseData);
}
@Override
public OffsetCommitResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(data.topics().stream().flatMap(topicResult ->
topicResult.partitions().stream().map(partitionResult ->
Errors.forCode(partitionResult.errorCode()))));
}
public static OffsetCommitResponse parse(ByteBuffer buffer, short version) {
return new OffsetCommitResponse(new OffsetCommitResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 4;
}
public static class Builder {
OffsetCommitResponseData data = new OffsetCommitResponseData();
HashMap<String, OffsetCommitResponseTopic> byTopicName = new HashMap<>();
private OffsetCommitResponseTopic getOrCreateTopic(
String topicName
) {
OffsetCommitResponseTopic topic = byTopicName.get(topicName);
if (topic == null) {
topic = new OffsetCommitResponseTopic().setName(topicName);
data.topics().add(topic);
byTopicName.put(topicName, topic);
}
return topic;
}
public Builder addPartition(
String topicName,
int partitionIndex,
Errors error
) {
final OffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName);
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partitionIndex)
.setErrorCode(error.code()));
return this;
}
public <P> Builder addPartitions(
String topicName,
List<P> partitions,
Function<P, Integer> partitionIndex,
Errors error
) {
final OffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName);
partitions.forEach(partition -> {
topicResponse.partitions().add(new OffsetCommitResponsePartition()
.setPartitionIndex(partitionIndex.apply(partition))
.setErrorCode(error.code()));
});
return this;
}
public Builder merge(
OffsetCommitResponseData newData
) {
if (data.topics().isEmpty()) {
// If the current data is empty, we can discard it and use the new data.
data = newData;
} else {
// Otherwise, we have to merge them together.
newData.topics().forEach(newTopic -> {
OffsetCommitResponseTopic existingTopic = byTopicName.get(newTopic.name());
if (existingTopic == null) {
// If no topic exists, we can directly copy the new topic data.
data.topics().add(newTopic);
byTopicName.put(newTopic.name(), newTopic);
} else {
// Otherwise, we add the partitions to the existing one. Note we
// expect non-overlapping partitions here as we don't verify
// if the partition is already in the list before adding it.
existingTopic.partitions().addAll(newTopic.partitions());
}
});
}
return this;
}
public OffsetCommitResponse build() {
return new OffsetCommitResponse(data);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/OffsetDeleteRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.OffsetDeleteRequestData;
import org.apache.kafka.common.message.OffsetDeleteResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class OffsetDeleteRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<OffsetDeleteRequest> {
private final OffsetDeleteRequestData data;
public Builder(OffsetDeleteRequestData data) {
super(ApiKeys.OFFSET_DELETE);
this.data = data;
}
@Override
public OffsetDeleteRequest build(short version) {
return new OffsetDeleteRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final OffsetDeleteRequestData data;
public OffsetDeleteRequest(OffsetDeleteRequestData data, short version) {
super(ApiKeys.OFFSET_DELETE, version);
this.data = data;
}
public AbstractResponse getErrorResponse(int throttleTimeMs, Errors error) {
return new OffsetDeleteResponse(
new OffsetDeleteResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code())
);
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return getErrorResponse(throttleTimeMs, Errors.forException(e));
}
public static OffsetDeleteRequest parse(ByteBuffer buffer, short version) {
return new OffsetDeleteRequest(new OffsetDeleteRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public OffsetDeleteRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/OffsetDeleteResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.OffsetDeleteResponseData;
import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponsePartition;
import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponseTopic;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
/**
* Possible error codes:
*
* - Partition errors:
* - {@link Errors#GROUP_SUBSCRIBED_TO_TOPIC}
* - {@link Errors#TOPIC_AUTHORIZATION_FAILED}
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION}
*
* - Group or coordinator errors:
* - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS}
* - {@link Errors#COORDINATOR_NOT_AVAILABLE}
* - {@link Errors#NOT_COORDINATOR}
* - {@link Errors#GROUP_AUTHORIZATION_FAILED}
* - {@link Errors#INVALID_GROUP_ID}
* - {@link Errors#GROUP_ID_NOT_FOUND}
* - {@link Errors#NON_EMPTY_GROUP}
*/
public class OffsetDeleteResponse extends AbstractResponse {
public static class Builder {
OffsetDeleteResponseData data = new OffsetDeleteResponseData();
private OffsetDeleteResponseTopic getOrCreateTopic(
String topicName
) {
OffsetDeleteResponseTopic topic = data.topics().find(topicName);
if (topic == null) {
topic = new OffsetDeleteResponseTopic().setName(topicName);
data.topics().add(topic);
}
return topic;
}
public Builder addPartition(
String topicName,
int partitionIndex,
Errors error
) {
final OffsetDeleteResponseTopic topicResponse = getOrCreateTopic(topicName);
topicResponse.partitions().add(new OffsetDeleteResponsePartition()
.setPartitionIndex(partitionIndex)
.setErrorCode(error.code()));
return this;
}
public <P> Builder addPartitions(
String topicName,
List<P> partitions,
Function<P, Integer> partitionIndex,
Errors error
) {
final OffsetDeleteResponseTopic topicResponse = getOrCreateTopic(topicName);
partitions.forEach(partition -> {
topicResponse.partitions().add(new OffsetDeleteResponsePartition()
.setPartitionIndex(partitionIndex.apply(partition))
.setErrorCode(error.code()));
});
return this;
}
public Builder merge(
OffsetDeleteResponseData newData
) {
if (data.topics().isEmpty()) {
// If the current data is empty, we can discard it and use the new data.
data = newData;
} else {
// Otherwise, we have to merge them together.
newData.topics().forEach(newTopic -> {
OffsetDeleteResponseTopic existingTopic = data.topics().find(newTopic.name());
if (existingTopic == null) {
// If no topic exists, we can directly copy the new topic data.
data.topics().add(newTopic.duplicate());
} else {
// Otherwise, we add the partitions to the existing one. Note we
// expect non-overlapping partitions here as we don't verify
// if the partition is already in the list before adding it.
newTopic.partitions().forEach(partition -> {
existingTopic.partitions().add(partition.duplicate());
});
}
});
}
return this;
}
public OffsetDeleteResponse build() {
return new OffsetDeleteResponse(data);
}
}
private final OffsetDeleteResponseData data;
public OffsetDeleteResponse(OffsetDeleteResponseData data) {
super(ApiKeys.OFFSET_DELETE);
this.data = data;
}
@Override
public OffsetDeleteResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new HashMap<>();
updateErrorCounts(counts, Errors.forCode(data.errorCode()));
data.topics().forEach(topic ->
topic.partitions().forEach(partition ->
updateErrorCounts(counts, Errors.forCode(partition.errorCode()))
)
);
return counts;
}
public static OffsetDeleteResponse parse(ByteBuffer buffer, short version) {
return new OffsetDeleteResponse(new OffsetDeleteResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 0;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/OffsetFetchRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.util.Collections;
import java.util.Map.Entry;
import java.util.stream.Collectors;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.OffsetFetchRequestData;
import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestGroup;
import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopic;
import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopics;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
public class OffsetFetchRequest extends AbstractRequest {
private static final Logger log = LoggerFactory.getLogger(OffsetFetchRequest.class);
private static final List<OffsetFetchRequestTopic> ALL_TOPIC_PARTITIONS = null;
private static final List<OffsetFetchRequestTopics> ALL_TOPIC_PARTITIONS_BATCH = null;
private final OffsetFetchRequestData data;
public static class Builder extends AbstractRequest.Builder<OffsetFetchRequest> {
public final OffsetFetchRequestData data;
private final boolean throwOnFetchStableOffsetsUnsupported;
public Builder(String groupId,
boolean requireStable,
List<TopicPartition> partitions,
boolean throwOnFetchStableOffsetsUnsupported) {
super(ApiKeys.OFFSET_FETCH);
final List<OffsetFetchRequestTopic> topics;
if (partitions != null) {
Map<String, OffsetFetchRequestTopic> offsetFetchRequestTopicMap = new HashMap<>();
for (TopicPartition topicPartition : partitions) {
String topicName = topicPartition.topic();
OffsetFetchRequestTopic topic = offsetFetchRequestTopicMap.getOrDefault(
topicName, new OffsetFetchRequestTopic().setName(topicName));
topic.partitionIndexes().add(topicPartition.partition());
offsetFetchRequestTopicMap.put(topicName, topic);
}
topics = new ArrayList<>(offsetFetchRequestTopicMap.values());
} else {
// If passed in partition list is null, it is requesting offsets for all topic partitions.
topics = ALL_TOPIC_PARTITIONS;
}
this.data = new OffsetFetchRequestData()
.setGroupId(groupId)
.setRequireStable(requireStable)
.setTopics(topics);
this.throwOnFetchStableOffsetsUnsupported = throwOnFetchStableOffsetsUnsupported;
}
boolean isAllTopicPartitions() {
return this.data.topics() == ALL_TOPIC_PARTITIONS;
}
public Builder(Map<String, List<TopicPartition>> groupIdToTopicPartitionMap,
boolean requireStable,
boolean throwOnFetchStableOffsetsUnsupported) {
super(ApiKeys.OFFSET_FETCH);
List<OffsetFetchRequestGroup> groups = new ArrayList<>();
for (Entry<String, List<TopicPartition>> entry : groupIdToTopicPartitionMap.entrySet()) {
String groupName = entry.getKey();
List<TopicPartition> tpList = entry.getValue();
final List<OffsetFetchRequestTopics> topics;
if (tpList != null) {
Map<String, OffsetFetchRequestTopics> offsetFetchRequestTopicMap =
new HashMap<>();
for (TopicPartition topicPartition : tpList) {
String topicName = topicPartition.topic();
OffsetFetchRequestTopics topic = offsetFetchRequestTopicMap.getOrDefault(
topicName, new OffsetFetchRequestTopics().setName(topicName));
topic.partitionIndexes().add(topicPartition.partition());
offsetFetchRequestTopicMap.put(topicName, topic);
}
topics = new ArrayList<>(offsetFetchRequestTopicMap.values());
} else {
topics = ALL_TOPIC_PARTITIONS_BATCH;
}
groups.add(new OffsetFetchRequestGroup()
.setGroupId(groupName)
.setTopics(topics));
}
this.data = new OffsetFetchRequestData()
.setGroups(groups)
.setRequireStable(requireStable);
this.throwOnFetchStableOffsetsUnsupported = throwOnFetchStableOffsetsUnsupported;
}
@Override
public OffsetFetchRequest build(short version) {
if (isAllTopicPartitions() && version < 2) {
throw new UnsupportedVersionException("The broker only supports OffsetFetchRequest " +
"v" + version + ", but we need v2 or newer to request all topic partitions.");
}
if (data.groups().size() > 1 && version < 8) {
throw new NoBatchedOffsetFetchRequestException("Broker does not support"
+ " batching groups for fetch offset request on version " + version);
}
if (data.requireStable() && version < 7) {
if (throwOnFetchStableOffsetsUnsupported) {
throw new UnsupportedVersionException("Broker unexpectedly " +
"doesn't support requireStable flag on version " + version);
} else {
log.trace("Fallback the requireStable flag to false as broker " +
"only supports OffsetFetchRequest version {}. Need " +
"v7 or newer to enable this feature", version);
data.setRequireStable(false);
}
}
// convert data to use the appropriate version since version 8 uses different format
if (version < 8) {
OffsetFetchRequestData oldDataFormat = null;
if (!data.groups().isEmpty()) {
OffsetFetchRequestGroup group = data.groups().get(0);
String groupName = group.groupId();
List<OffsetFetchRequestTopics> topics = group.topics();
List<OffsetFetchRequestTopic> oldFormatTopics = null;
if (topics != null) {
oldFormatTopics = topics
.stream()
.map(t ->
new OffsetFetchRequestTopic()
.setName(t.name())
.setPartitionIndexes(t.partitionIndexes()))
.collect(Collectors.toList());
}
oldDataFormat = new OffsetFetchRequestData()
.setGroupId(groupName)
.setTopics(oldFormatTopics)
.setRequireStable(data.requireStable());
}
return new OffsetFetchRequest(oldDataFormat == null ? data : oldDataFormat, version);
} else {
if (data.groups().isEmpty()) {
String groupName = data.groupId();
List<OffsetFetchRequestTopic> oldFormatTopics = data.topics();
List<OffsetFetchRequestTopics> topics = null;
if (oldFormatTopics != null) {
topics = oldFormatTopics
.stream()
.map(t -> new OffsetFetchRequestTopics()
.setName(t.name())
.setPartitionIndexes(t.partitionIndexes()))
.collect(Collectors.toList());
}
OffsetFetchRequestData convertedDataFormat =
new OffsetFetchRequestData()
.setGroups(Collections.singletonList(
new OffsetFetchRequestGroup()
.setGroupId(groupName)
.setTopics(topics)))
.setRequireStable(data.requireStable());
return new OffsetFetchRequest(convertedDataFormat, version);
}
}
return new OffsetFetchRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
/**
* Indicates that it is not possible to fetch consumer groups in batches with FetchOffset.
* Instead consumer groups' offsets must be fetched one by one.
*/
public static class NoBatchedOffsetFetchRequestException extends UnsupportedVersionException {
private static final long serialVersionUID = 1L;
public NoBatchedOffsetFetchRequestException(String message) {
super(message);
}
}
public List<TopicPartition> partitions() {
if (isAllPartitions()) {
return null;
}
List<TopicPartition> partitions = new ArrayList<>();
for (OffsetFetchRequestTopic topic : data.topics()) {
for (Integer partitionIndex : topic.partitionIndexes()) {
partitions.add(new TopicPartition(topic.name(), partitionIndex));
}
}
return partitions;
}
public String groupId() {
return data.groupId();
}
public boolean requireStable() {
return data.requireStable();
}
public List<OffsetFetchRequestData.OffsetFetchRequestGroup> groups() {
if (version() >= 8) {
return data.groups();
} else {
OffsetFetchRequestData.OffsetFetchRequestGroup group =
new OffsetFetchRequestData.OffsetFetchRequestGroup()
.setGroupId(data.groupId());
if (data.topics() == null) {
// If topics is null, it means that all topic-partitions should
// be fetched hence we preserve it.
group.setTopics(null);
} else {
// Otherwise, topics are translated to the new structure.
data.topics().forEach(topic -> {
group.topics().add(new OffsetFetchRequestTopics()
.setName(topic.name())
.setPartitionIndexes(topic.partitionIndexes())
);
});
}
return Collections.singletonList(group);
}
}
public Map<String, List<TopicPartition>> groupIdsToPartitions() {
Map<String, List<TopicPartition>> groupIdsToPartitions = new HashMap<>();
for (OffsetFetchRequestGroup group : data.groups()) {
List<TopicPartition> tpList = null;
if (group.topics() != ALL_TOPIC_PARTITIONS_BATCH) {
tpList = new ArrayList<>();
for (OffsetFetchRequestTopics topic : group.topics()) {
for (Integer partitionIndex : topic.partitionIndexes()) {
tpList.add(new TopicPartition(topic.name(), partitionIndex));
}
}
}
groupIdsToPartitions.put(group.groupId(), tpList);
}
return groupIdsToPartitions;
}
public Map<String, List<OffsetFetchRequestTopics>> groupIdsToTopics() {
Map<String, List<OffsetFetchRequestTopics>> groupIdsToTopics =
new HashMap<>(data.groups().size());
data.groups().forEach(g -> groupIdsToTopics.put(g.groupId(), g.topics()));
return groupIdsToTopics;
}
public List<String> groupIds() {
return data.groups()
.stream()
.map(OffsetFetchRequestGroup::groupId)
.collect(Collectors.toList());
}
private OffsetFetchRequest(OffsetFetchRequestData data, short version) {
super(ApiKeys.OFFSET_FETCH, version);
this.data = data;
}
public OffsetFetchResponse getErrorResponse(Errors error) {
return getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, error);
}
public OffsetFetchResponse getErrorResponse(int throttleTimeMs, Errors error) {
Map<TopicPartition, OffsetFetchResponse.PartitionData> responsePartitions = new HashMap<>();
if (version() < 2) {
OffsetFetchResponse.PartitionData partitionError = new OffsetFetchResponse.PartitionData(
OffsetFetchResponse.INVALID_OFFSET,
Optional.empty(),
OffsetFetchResponse.NO_METADATA,
error);
for (OffsetFetchRequestTopic topic : this.data.topics()) {
for (int partitionIndex : topic.partitionIndexes()) {
responsePartitions.put(
new TopicPartition(topic.name(), partitionIndex), partitionError);
}
}
return new OffsetFetchResponse(error, responsePartitions);
}
if (version() == 2) {
return new OffsetFetchResponse(error, responsePartitions);
}
if (version() >= 3 && version() < 8) {
return new OffsetFetchResponse(throttleTimeMs, error, responsePartitions);
}
List<String> groupIds = groupIds();
Map<String, Errors> errorsMap = new HashMap<>(groupIds.size());
Map<String, Map<TopicPartition, OffsetFetchResponse.PartitionData>> partitionMap =
new HashMap<>(groupIds.size());
for (String g : groupIds) {
errorsMap.put(g, error);
partitionMap.put(g, responsePartitions);
}
return new OffsetFetchResponse(throttleTimeMs, errorsMap, partitionMap);
}
@Override
public OffsetFetchResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return getErrorResponse(throttleTimeMs, Errors.forException(e));
}
public static OffsetFetchRequest parse(ByteBuffer buffer, short version) {
return new OffsetFetchRequest(new OffsetFetchRequestData(new ByteBufferAccessor(buffer), version), version);
}
public boolean isAllPartitions() {
return data.topics() == ALL_TOPIC_PARTITIONS;
}
public boolean isAllPartitionsForGroup(String groupId) {
OffsetFetchRequestGroup group = data
.groups()
.stream()
.filter(g -> g.groupId().equals(groupId))
.collect(Collectors.toList())
.get(0);
return group.topics() == ALL_TOPIC_PARTITIONS_BATCH;
}
@Override
public OffsetFetchRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/OffsetFetchResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.util.Map.Entry;
import java.util.stream.Collectors;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.OffsetFetchResponseData;
import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseGroup;
import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartition;
import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartitions;
import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopic;
import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopics;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH;
/**
* Possible error codes:
*
* - Partition errors:
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION}
* - {@link Errors#TOPIC_AUTHORIZATION_FAILED}
* - {@link Errors#UNSTABLE_OFFSET_COMMIT}
*
* - Group or coordinator errors:
* - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS}
* - {@link Errors#COORDINATOR_NOT_AVAILABLE}
* - {@link Errors#NOT_COORDINATOR}
* - {@link Errors#GROUP_AUTHORIZATION_FAILED}
*/
public class OffsetFetchResponse extends AbstractResponse {
public static final long INVALID_OFFSET = -1L;
public static final String NO_METADATA = "";
public static final PartitionData UNKNOWN_PARTITION = new PartitionData(INVALID_OFFSET,
Optional.empty(),
NO_METADATA,
Errors.UNKNOWN_TOPIC_OR_PARTITION);
public static final PartitionData UNAUTHORIZED_PARTITION = new PartitionData(INVALID_OFFSET,
Optional.empty(),
NO_METADATA,
Errors.TOPIC_AUTHORIZATION_FAILED);
private static final List<Errors> PARTITION_ERRORS = Arrays.asList(
Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.TOPIC_AUTHORIZATION_FAILED);
private final OffsetFetchResponseData data;
private final Errors error;
private final Map<String, Errors> groupLevelErrors = new HashMap<>();
public static final class PartitionData {
public final long offset;
public final String metadata;
public final Errors error;
public final Optional<Integer> leaderEpoch;
public PartitionData(long offset,
Optional<Integer> leaderEpoch,
String metadata,
Errors error) {
this.offset = offset;
this.leaderEpoch = leaderEpoch;
this.metadata = metadata;
this.error = error;
}
public boolean hasError() {
return this.error != Errors.NONE;
}
@Override
public boolean equals(Object other) {
if (!(other instanceof PartitionData))
return false;
PartitionData otherPartition = (PartitionData) other;
return Objects.equals(this.offset, otherPartition.offset)
&& Objects.equals(this.leaderEpoch, otherPartition.leaderEpoch)
&& Objects.equals(this.metadata, otherPartition.metadata)
&& Objects.equals(this.error, otherPartition.error);
}
@Override
public String toString() {
return "PartitionData("
+ "offset=" + offset
+ ", leaderEpoch=" + leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH)
+ ", metadata=" + metadata
+ ", error='" + error.toString()
+ ")";
}
@Override
public int hashCode() {
return Objects.hash(offset, leaderEpoch, metadata, error);
}
}
/**
* Constructor without throttle time.
* @param error Potential coordinator or group level error code (for api version 2 and later)
* @param responseData Fetched offset information grouped by topic-partition
*/
public OffsetFetchResponse(Errors error, Map<TopicPartition, PartitionData> responseData) {
this(DEFAULT_THROTTLE_TIME, error, responseData);
}
/**
* Constructor with throttle time for version 0 to 7
* @param throttleTimeMs The time in milliseconds that this response was throttled
* @param error Potential coordinator or group level error code (for api version 2 and later)
* @param responseData Fetched offset information grouped by topic-partition
*/
public OffsetFetchResponse(int throttleTimeMs, Errors error, Map<TopicPartition, PartitionData> responseData) {
super(ApiKeys.OFFSET_FETCH);
Map<String, OffsetFetchResponseTopic> offsetFetchResponseTopicMap = new HashMap<>();
for (Map.Entry<TopicPartition, PartitionData> entry : responseData.entrySet()) {
String topicName = entry.getKey().topic();
OffsetFetchResponseTopic topic = offsetFetchResponseTopicMap.getOrDefault(
topicName, new OffsetFetchResponseTopic().setName(topicName));
PartitionData partitionData = entry.getValue();
topic.partitions().add(new OffsetFetchResponsePartition()
.setPartitionIndex(entry.getKey().partition())
.setErrorCode(partitionData.error.code())
.setCommittedOffset(partitionData.offset)
.setCommittedLeaderEpoch(
partitionData.leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH))
.setMetadata(partitionData.metadata)
);
offsetFetchResponseTopicMap.put(topicName, topic);
}
this.data = new OffsetFetchResponseData()
.setTopics(new ArrayList<>(offsetFetchResponseTopicMap.values()))
.setErrorCode(error.code())
.setThrottleTimeMs(throttleTimeMs);
this.error = error;
}
/**
* Constructor with throttle time for version 8 and above.
* @param throttleTimeMs The time in milliseconds that this response was throttled
* @param errors Potential coordinator or group level error code
* @param responseData Fetched offset information grouped by topic-partition and by group
*/
public OffsetFetchResponse(int throttleTimeMs,
Map<String, Errors> errors,
Map<String, Map<TopicPartition, PartitionData>> responseData) {
super(ApiKeys.OFFSET_FETCH);
List<OffsetFetchResponseGroup> groupList = new ArrayList<>();
for (Entry<String, Map<TopicPartition, PartitionData>> entry : responseData.entrySet()) {
String groupName = entry.getKey();
Map<TopicPartition, PartitionData> partitionDataMap = entry.getValue();
Map<String, OffsetFetchResponseTopics> offsetFetchResponseTopicsMap = new HashMap<>();
for (Entry<TopicPartition, PartitionData> partitionEntry : partitionDataMap.entrySet()) {
String topicName = partitionEntry.getKey().topic();
OffsetFetchResponseTopics topic =
offsetFetchResponseTopicsMap.getOrDefault(topicName,
new OffsetFetchResponseTopics().setName(topicName));
PartitionData partitionData = partitionEntry.getValue();
topic.partitions().add(new OffsetFetchResponsePartitions()
.setPartitionIndex(partitionEntry.getKey().partition())
.setErrorCode(partitionData.error.code())
.setCommittedOffset(partitionData.offset)
.setCommittedLeaderEpoch(
partitionData.leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH))
.setMetadata(partitionData.metadata));
offsetFetchResponseTopicsMap.put(topicName, topic);
}
groupList.add(new OffsetFetchResponseGroup()
.setGroupId(groupName)
.setTopics(new ArrayList<>(offsetFetchResponseTopicsMap.values()))
.setErrorCode(errors.get(groupName).code()));
groupLevelErrors.put(groupName, errors.get(groupName));
}
this.data = new OffsetFetchResponseData()
.setGroups(groupList)
.setThrottleTimeMs(throttleTimeMs);
this.error = null;
}
public OffsetFetchResponse(List<OffsetFetchResponseGroup> groups, short version) {
super(ApiKeys.OFFSET_FETCH);
data = new OffsetFetchResponseData();
if (version >= 8) {
data.setGroups(groups);
error = null;
for (OffsetFetchResponseGroup group : data.groups()) {
this.groupLevelErrors.put(group.groupId(), Errors.forCode(group.errorCode()));
}
} else {
if (groups.size() != 1) {
throw new UnsupportedVersionException(
"Version " + version + " of OffsetFetchResponse only supports one group."
);
}
OffsetFetchResponseGroup group = groups.get(0);
data.setErrorCode(group.errorCode());
error = Errors.forCode(group.errorCode());
group.topics().forEach(topic -> {
OffsetFetchResponseTopic newTopic = new OffsetFetchResponseTopic().setName(topic.name());
data.topics().add(newTopic);
topic.partitions().forEach(partition -> {
OffsetFetchResponsePartition newPartition;
if (version < 2 && group.errorCode() != Errors.NONE.code()) {
// Versions prior to version 2 do not support a top level error. Therefore,
// we put it at the partition level.
newPartition = new OffsetFetchResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(group.errorCode())
.setCommittedOffset(INVALID_OFFSET)
.setMetadata(NO_METADATA)
.setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH);
} else {
newPartition = new OffsetFetchResponsePartition()
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(partition.errorCode())
.setCommittedOffset(partition.committedOffset())
.setMetadata(partition.metadata())
.setCommittedLeaderEpoch(partition.committedLeaderEpoch());
}
newTopic.partitions().add(newPartition);
});
});
}
}
public OffsetFetchResponse(OffsetFetchResponseData data, short version) {
super(ApiKeys.OFFSET_FETCH);
this.data = data;
// for version 2 and later use the top-level error code (in ERROR_CODE_KEY_NAME) from the response.
// for older versions there is no top-level error in the response and all errors are partition errors,
// so if there is a group or coordinator error at the partition level use that as the top-level error.
// this way clients can depend on the top-level error regardless of the offset fetch version.
// we return the error differently starting with version 8, so we will only populate the
// error field if we are between version 2 and 7. if we are in version 8 or greater, then
// we will populate the map of group id to error codes.
if (version < 8) {
this.error = version >= 2 ? Errors.forCode(data.errorCode()) : topLevelError(data);
} else {
for (OffsetFetchResponseGroup group : data.groups()) {
this.groupLevelErrors.put(group.groupId(), Errors.forCode(group.errorCode()));
}
this.error = null;
}
}
private static Errors topLevelError(OffsetFetchResponseData data) {
for (OffsetFetchResponseTopic topic : data.topics()) {
for (OffsetFetchResponsePartition partition : topic.partitions()) {
Errors partitionError = Errors.forCode(partition.errorCode());
if (partitionError != Errors.NONE && !PARTITION_ERRORS.contains(partitionError)) {
return partitionError;
}
}
}
return Errors.NONE;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public boolean hasError() {
return error != Errors.NONE;
}
public boolean groupHasError(String groupId) {
Errors error = groupLevelErrors.get(groupId);
if (error == null) {
return this.error != null && this.error != Errors.NONE;
}
return error != Errors.NONE;
}
public Errors error() {
return error;
}
public Errors groupLevelError(String groupId) {
if (error != null) {
return error;
}
return groupLevelErrors.get(groupId);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> counts = new HashMap<>();
if (!groupLevelErrors.isEmpty()) {
// built response with v8 or above
for (Map.Entry<String, Errors> entry : groupLevelErrors.entrySet()) {
updateErrorCounts(counts, entry.getValue());
}
for (OffsetFetchResponseGroup group : data.groups()) {
group.topics().forEach(topic ->
topic.partitions().forEach(partition ->
updateErrorCounts(counts, Errors.forCode(partition.errorCode()))));
}
} else {
// built response with v0-v7
updateErrorCounts(counts, error);
data.topics().forEach(topic ->
topic.partitions().forEach(partition ->
updateErrorCounts(counts, Errors.forCode(partition.errorCode()))));
}
return counts;
}
// package-private for testing purposes
Map<TopicPartition, PartitionData> responseDataV0ToV7() {
Map<TopicPartition, PartitionData> responseData = new HashMap<>();
for (OffsetFetchResponseTopic topic : data.topics()) {
for (OffsetFetchResponsePartition partition : topic.partitions()) {
responseData.put(new TopicPartition(topic.name(), partition.partitionIndex()),
new PartitionData(partition.committedOffset(),
RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()),
partition.metadata(),
Errors.forCode(partition.errorCode()))
);
}
}
return responseData;
}
private Map<TopicPartition, PartitionData> buildResponseData(String groupId) {
Map<TopicPartition, PartitionData> responseData = new HashMap<>();
OffsetFetchResponseGroup group = data
.groups()
.stream()
.filter(g -> g.groupId().equals(groupId))
.collect(Collectors.toList())
.get(0);
for (OffsetFetchResponseTopics topic : group.topics()) {
for (OffsetFetchResponsePartitions partition : topic.partitions()) {
responseData.put(new TopicPartition(topic.name(), partition.partitionIndex()),
new PartitionData(partition.committedOffset(),
RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()),
partition.metadata(),
Errors.forCode(partition.errorCode()))
);
}
}
return responseData;
}
public Map<TopicPartition, PartitionData> partitionDataMap(String groupId) {
if (groupLevelErrors.isEmpty()) {
return responseDataV0ToV7();
}
return buildResponseData(groupId);
}
public static OffsetFetchResponse parse(ByteBuffer buffer, short version) {
return new OffsetFetchResponse(new OffsetFetchResponseData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public OffsetFetchResponseData data() {
return data;
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 4;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData;
import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopicCollection;
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData;
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset;
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH;
import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET;
public class OffsetsForLeaderEpochRequest extends AbstractRequest {
/**
* Sentinel replica_id value to indicate a regular consumer rather than another broker
*/
public static final int CONSUMER_REPLICA_ID = -1;
/**
* Sentinel replica_id which indicates either a debug consumer or a replica which is using
* an old version of the protocol.
*/
public static final int DEBUGGING_REPLICA_ID = -2;
private final OffsetForLeaderEpochRequestData data;
public static class Builder extends AbstractRequest.Builder<OffsetsForLeaderEpochRequest> {
private final OffsetForLeaderEpochRequestData data;
Builder(short oldestAllowedVersion, short latestAllowedVersion, OffsetForLeaderEpochRequestData data) {
super(ApiKeys.OFFSET_FOR_LEADER_EPOCH, oldestAllowedVersion, latestAllowedVersion);
this.data = data;
}
public static Builder forConsumer(OffsetForLeaderTopicCollection epochsByPartition) {
// Old versions of this API require CLUSTER permission which is not typically granted
// to clients. Beginning with version 3, the broker requires only TOPIC Describe
// permission for the topic of each requested partition. In order to ensure client
// compatibility, we only send this request when we can guarantee the relaxed permissions.
OffsetForLeaderEpochRequestData data = new OffsetForLeaderEpochRequestData();
data.setReplicaId(CONSUMER_REPLICA_ID);
data.setTopics(epochsByPartition);
return new Builder((short) 3, ApiKeys.OFFSET_FOR_LEADER_EPOCH.latestVersion(), data);
}
public static Builder forFollower(short version, OffsetForLeaderTopicCollection epochsByPartition, int replicaId) {
OffsetForLeaderEpochRequestData data = new OffsetForLeaderEpochRequestData();
data.setReplicaId(replicaId);
data.setTopics(epochsByPartition);
return new Builder(version, version, data);
}
@Override
public OffsetsForLeaderEpochRequest build(short version) {
if (version < oldestAllowedVersion() || version > latestAllowedVersion())
throw new UnsupportedVersionException("Cannot build " + this + " with version " + version);
return new OffsetsForLeaderEpochRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
public OffsetsForLeaderEpochRequest(OffsetForLeaderEpochRequestData data, short version) {
super(ApiKeys.OFFSET_FOR_LEADER_EPOCH, version);
this.data = data;
}
@Override
public OffsetForLeaderEpochRequestData data() {
return data;
}
public int replicaId() {
return data.replicaId();
}
public static OffsetsForLeaderEpochRequest parse(ByteBuffer buffer, short version) {
return new OffsetsForLeaderEpochRequest(new OffsetForLeaderEpochRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
OffsetForLeaderEpochResponseData responseData = new OffsetForLeaderEpochResponseData();
data.topics().forEach(topic -> {
OffsetForLeaderTopicResult topicData = new OffsetForLeaderTopicResult()
.setTopic(topic.topic());
topic.partitions().forEach(partition ->
topicData.partitions().add(new EpochEndOffset()
.setPartition(partition.partition())
.setErrorCode(error.code())
.setLeaderEpoch(UNDEFINED_EPOCH)
.setEndOffset(UNDEFINED_EPOCH_OFFSET)));
responseData.topics().add(topicData);
});
return new OffsetsForLeaderEpochResponse(responseData);
}
/**
* Check whether a broker allows Topic-level permissions in order to use the
* OffsetForLeaderEpoch API. Old versions require Cluster permission.
*/
public static boolean supportsTopicPermission(short latestUsableVersion) {
return latestUsableVersion >= 3;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/OffsetsForLeaderEpochResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH;
/**
* Possible error codes:
* - {@link Errors#TOPIC_AUTHORIZATION_FAILED} If the user does not have DESCRIBE access to a requested topic
* - {@link Errors#REPLICA_NOT_AVAILABLE} If the request is received by a broker with version < 2.6 which is not a replica
* - {@link Errors#NOT_LEADER_OR_FOLLOWER} If the broker is not a leader or follower and either the provided leader epoch
* matches the known leader epoch on the broker or is empty
* - {@link Errors#FENCED_LEADER_EPOCH} If the epoch is lower than the broker's epoch
* - {@link Errors#UNKNOWN_LEADER_EPOCH} If the epoch is larger than the broker's epoch
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION} If the broker does not have metadata for a topic or partition
* - {@link Errors#KAFKA_STORAGE_ERROR} If the log directory for one of the requested partitions is offline
* - {@link Errors#UNKNOWN_SERVER_ERROR} For any unexpected errors
*/
public class OffsetsForLeaderEpochResponse extends AbstractResponse {
public static final long UNDEFINED_EPOCH_OFFSET = NO_PARTITION_LEADER_EPOCH;
public static final int UNDEFINED_EPOCH = NO_PARTITION_LEADER_EPOCH;
private final OffsetForLeaderEpochResponseData data;
public OffsetsForLeaderEpochResponse(OffsetForLeaderEpochResponseData data) {
super(ApiKeys.OFFSET_FOR_LEADER_EPOCH);
this.data = data;
}
@Override
public OffsetForLeaderEpochResponseData data() {
return data;
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
data.topics().forEach(topic ->
topic.partitions().forEach(partition ->
updateErrorCounts(errorCounts, Errors.forCode(partition.errorCode()))));
return errorCounts;
}
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public static OffsetsForLeaderEpochResponse parse(ByteBuffer buffer, short version) {
return new OffsetsForLeaderEpochResponse(new OffsetForLeaderEpochResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ProduceRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.InvalidRecordException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.UnsupportedCompressionTypeException;
import org.apache.kafka.common.message.ProduceRequestData;
import org.apache.kafka.common.message.ProduceResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.BaseRecords;
import org.apache.kafka.common.record.CompressionType;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.record.Records;
import org.apache.kafka.common.utils.Utils;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.stream.Collectors;
import static org.apache.kafka.common.requests.ProduceResponse.INVALID_OFFSET;
public class ProduceRequest extends AbstractRequest {
public static Builder forMagic(byte magic, ProduceRequestData data) {
// Message format upgrades correspond with a bump in the produce request version. Older
// message format versions are generally not supported by the produce request versions
// following the bump.
final short minVersion;
final short maxVersion;
if (magic < RecordBatch.MAGIC_VALUE_V2) {
minVersion = 2;
maxVersion = 2;
} else {
minVersion = 3;
maxVersion = ApiKeys.PRODUCE.latestVersion();
}
return new Builder(minVersion, maxVersion, data);
}
public static Builder forCurrentMagic(ProduceRequestData data) {
return forMagic(RecordBatch.CURRENT_MAGIC_VALUE, data);
}
public static class Builder extends AbstractRequest.Builder<ProduceRequest> {
private final ProduceRequestData data;
public Builder(short minVersion,
short maxVersion,
ProduceRequestData data) {
super(ApiKeys.PRODUCE, minVersion, maxVersion);
this.data = data;
}
@Override
public ProduceRequest build(short version) {
return build(version, true);
}
// Visible for testing only
public ProduceRequest buildUnsafe(short version) {
return build(version, false);
}
private ProduceRequest build(short version, boolean validate) {
if (validate) {
// Validate the given records first
data.topicData().forEach(tpd ->
tpd.partitionData().forEach(partitionProduceData ->
ProduceRequest.validateRecords(version, partitionProduceData.records())));
}
return new ProduceRequest(data, version);
}
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("(type=ProduceRequest")
.append(", acks=").append(data.acks())
.append(", timeout=").append(data.timeoutMs())
.append(", partitionRecords=(").append(data.topicData().stream().flatMap(d -> d.partitionData().stream()).collect(Collectors.toList()))
.append("), transactionalId='").append(data.transactionalId() != null ? data.transactionalId() : "")
.append("'");
return bld.toString();
}
}
/**
* We have to copy acks, timeout, transactionalId and partitionSizes from data since data maybe reset to eliminate
* the reference to ByteBuffer but those metadata are still useful.
*/
private final short acks;
private final int timeout;
private final String transactionalId;
// This is set to null by `clearPartitionRecords` to prevent unnecessary memory retention when a produce request is
// put in the purgatory (due to client throttling, it can take a while before the response is sent).
// Care should be taken in methods that use this field.
private volatile ProduceRequestData data;
// the partitionSizes is lazily initialized since it is used by server-side in production.
private volatile Map<TopicPartition, Integer> partitionSizes;
public ProduceRequest(ProduceRequestData produceRequestData, short version) {
super(ApiKeys.PRODUCE, version);
this.data = produceRequestData;
this.acks = data.acks();
this.timeout = data.timeoutMs();
this.transactionalId = data.transactionalId();
}
// visible for testing
Map<TopicPartition, Integer> partitionSizes() {
if (partitionSizes == null) {
// this method may be called by different thread (see the comment on data)
synchronized (this) {
if (partitionSizes == null) {
partitionSizes = new HashMap<>();
data.topicData().forEach(topicData ->
topicData.partitionData().forEach(partitionData ->
partitionSizes.compute(new TopicPartition(topicData.name(), partitionData.index()),
(ignored, previousValue) ->
partitionData.records().sizeInBytes() + (previousValue == null ? 0 : previousValue))
)
);
}
}
}
return partitionSizes;
}
/**
* @return data or IllegalStateException if the data is removed (to prevent unnecessary memory retention).
*/
@Override
public ProduceRequestData data() {
// Store it in a local variable to protect against concurrent updates
ProduceRequestData tmp = data;
if (tmp == null)
throw new IllegalStateException("The partition records are no longer available because clearPartitionRecords() has been invoked.");
return tmp;
}
@Override
public String toString(boolean verbose) {
// Use the same format as `Struct.toString()`
StringBuilder bld = new StringBuilder();
bld.append("{acks=").append(acks)
.append(",timeout=").append(timeout);
if (verbose)
bld.append(",partitionSizes=").append(Utils.mkString(partitionSizes(), "[", "]", "=", ","));
else
bld.append(",numPartitions=").append(partitionSizes().size());
bld.append("}");
return bld.toString();
}
@Override
public ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e) {
/* In case the producer doesn't actually want any response */
if (acks == 0) return null;
ApiError apiError = ApiError.fromThrowable(e);
ProduceResponseData data = new ProduceResponseData().setThrottleTimeMs(throttleTimeMs);
partitionSizes().forEach((tp, ignored) -> {
ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tp.topic());
if (tpr == null) {
tpr = new ProduceResponseData.TopicProduceResponse().setName(tp.topic());
data.responses().add(tpr);
}
tpr.partitionResponses().add(new ProduceResponseData.PartitionProduceResponse()
.setIndex(tp.partition())
.setRecordErrors(Collections.emptyList())
.setBaseOffset(INVALID_OFFSET)
.setLogAppendTimeMs(RecordBatch.NO_TIMESTAMP)
.setLogStartOffset(INVALID_OFFSET)
.setErrorMessage(apiError.message())
.setErrorCode(apiError.error().code()));
});
return new ProduceResponse(data);
}
@Override
public Map<Errors, Integer> errorCounts(Throwable e) {
Errors error = Errors.forException(e);
return Collections.singletonMap(error, partitionSizes().size());
}
public short acks() {
return acks;
}
public int timeout() {
return timeout;
}
public String transactionalId() {
return transactionalId;
}
public void clearPartitionRecords() {
// lazily initialize partitionSizes.
partitionSizes();
data = null;
}
public static void validateRecords(short version, BaseRecords baseRecords) {
if (version >= 3) {
if (baseRecords instanceof Records) {
Records records = (Records) baseRecords;
Iterator<? extends RecordBatch> iterator = records.batches().iterator();
if (!iterator.hasNext())
throw new InvalidRecordException("Produce requests with version " + version + " must have at least " +
"one record batch");
RecordBatch entry = iterator.next();
if (entry.magic() != RecordBatch.MAGIC_VALUE_V2)
throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " +
"contain record batches with magic version 2");
if (version < 7 && entry.compressionType() == CompressionType.ZSTD) {
throw new UnsupportedCompressionTypeException("Produce requests with version " + version + " are not allowed to " +
"use ZStandard compression");
}
if (iterator.hasNext())
throw new InvalidRecordException("Produce requests with version " + version + " are only allowed to " +
"contain exactly one record batch");
}
}
// Note that we do not do similar validation for older versions to ensure compatibility with
// clients which send the wrong magic version in the wrong version of the produce request. The broker
// did not do this validation before, so we maintain that behavior here.
}
public static ProduceRequest parse(ByteBuffer buffer, short version) {
return new ProduceRequest(new ProduceRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static byte requiredMagicForVersion(short produceRequestVersion) {
if (produceRequestVersion < ApiKeys.PRODUCE.oldestVersion() || produceRequestVersion > ApiKeys.PRODUCE.latestVersion())
throw new IllegalArgumentException("Magic value to use for produce request version " +
produceRequestVersion + " is not known");
switch (produceRequestVersion) {
case 0:
case 1:
return RecordBatch.MAGIC_VALUE_V0;
case 2:
return RecordBatch.MAGIC_VALUE_V1;
default:
return RecordBatch.MAGIC_VALUE_V2;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ProduceResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.ProduceResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordBatch;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.stream.Collectors;
/**
* This wrapper supports both v0 and v8 of ProduceResponse.
*
* Possible error code:
*
* {@link Errors#CORRUPT_MESSAGE}
* {@link Errors#UNKNOWN_TOPIC_OR_PARTITION}
* {@link Errors#NOT_LEADER_OR_FOLLOWER}
* {@link Errors#MESSAGE_TOO_LARGE}
* {@link Errors#INVALID_TOPIC_EXCEPTION}
* {@link Errors#RECORD_LIST_TOO_LARGE}
* {@link Errors#NOT_ENOUGH_REPLICAS}
* {@link Errors#NOT_ENOUGH_REPLICAS_AFTER_APPEND}
* {@link Errors#INVALID_REQUIRED_ACKS}
* {@link Errors#TOPIC_AUTHORIZATION_FAILED}
* {@link Errors#UNSUPPORTED_FOR_MESSAGE_FORMAT}
* {@link Errors#INVALID_PRODUCER_EPOCH}
* {@link Errors#CLUSTER_AUTHORIZATION_FAILED}
* {@link Errors#TRANSACTIONAL_ID_AUTHORIZATION_FAILED}
* {@link Errors#INVALID_RECORD}
*/
public class ProduceResponse extends AbstractResponse {
public static final long INVALID_OFFSET = -1L;
private final ProduceResponseData data;
public ProduceResponse(ProduceResponseData produceResponseData) {
super(ApiKeys.PRODUCE);
this.data = produceResponseData;
}
/**
* Constructor for Version 0
* @param responses Produced data grouped by topic-partition
*/
@Deprecated
public ProduceResponse(Map<TopicPartition, PartitionResponse> responses) {
this(responses, DEFAULT_THROTTLE_TIME);
}
/**
* Constructor for the latest version
* @param responses Produced data grouped by topic-partition
* @param throttleTimeMs Time in milliseconds the response was throttled
*/
@Deprecated
public ProduceResponse(Map<TopicPartition, PartitionResponse> responses, int throttleTimeMs) {
this(toData(responses, throttleTimeMs));
}
private static ProduceResponseData toData(Map<TopicPartition, PartitionResponse> responses, int throttleTimeMs) {
ProduceResponseData data = new ProduceResponseData().setThrottleTimeMs(throttleTimeMs);
responses.forEach((tp, response) -> {
ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tp.topic());
if (tpr == null) {
tpr = new ProduceResponseData.TopicProduceResponse().setName(tp.topic());
data.responses().add(tpr);
}
tpr.partitionResponses()
.add(new ProduceResponseData.PartitionProduceResponse()
.setIndex(tp.partition())
.setBaseOffset(response.baseOffset)
.setLogStartOffset(response.logStartOffset)
.setLogAppendTimeMs(response.logAppendTime)
.setErrorMessage(response.errorMessage)
.setErrorCode(response.error.code())
.setRecordErrors(response.recordErrors
.stream()
.map(e -> new ProduceResponseData.BatchIndexAndErrorMessage()
.setBatchIndex(e.batchIndex)
.setBatchIndexErrorMessage(e.message))
.collect(Collectors.toList())));
});
return data;
}
@Override
public ProduceResponseData data() {
return this.data;
}
@Override
public int throttleTimeMs() {
return this.data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
data.responses().forEach(t -> t.partitionResponses().forEach(p -> updateErrorCounts(errorCounts, Errors.forCode(p.errorCode()))));
return errorCounts;
}
public static final class PartitionResponse {
public Errors error;
public long baseOffset;
public long logAppendTime;
public long logStartOffset;
public List<RecordError> recordErrors;
public String errorMessage;
public PartitionResponse(Errors error) {
this(error, INVALID_OFFSET, RecordBatch.NO_TIMESTAMP, INVALID_OFFSET);
}
public PartitionResponse(Errors error, String errorMessage) {
this(error, INVALID_OFFSET, RecordBatch.NO_TIMESTAMP, INVALID_OFFSET, Collections.emptyList(), errorMessage);
}
public PartitionResponse(Errors error, long baseOffset, long logAppendTime, long logStartOffset) {
this(error, baseOffset, logAppendTime, logStartOffset, Collections.emptyList(), null);
}
public PartitionResponse(Errors error, long baseOffset, long logAppendTime, long logStartOffset, List<RecordError> recordErrors) {
this(error, baseOffset, logAppendTime, logStartOffset, recordErrors, null);
}
public PartitionResponse(Errors error, long baseOffset, long logAppendTime, long logStartOffset, List<RecordError> recordErrors, String errorMessage) {
this.error = error;
this.baseOffset = baseOffset;
this.logAppendTime = logAppendTime;
this.logStartOffset = logStartOffset;
this.recordErrors = recordErrors;
this.errorMessage = errorMessage;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
PartitionResponse that = (PartitionResponse) o;
return baseOffset == that.baseOffset &&
logAppendTime == that.logAppendTime &&
logStartOffset == that.logStartOffset &&
error == that.error &&
Objects.equals(recordErrors, that.recordErrors) &&
Objects.equals(errorMessage, that.errorMessage);
}
@Override
public int hashCode() {
return Objects.hash(error, baseOffset, logAppendTime, logStartOffset, recordErrors, errorMessage);
}
@Override
public String toString() {
StringBuilder b = new StringBuilder();
b.append('{');
b.append("error: ");
b.append(error);
b.append(",offset: ");
b.append(baseOffset);
b.append(",logAppendTime: ");
b.append(logAppendTime);
b.append(", logStartOffset: ");
b.append(logStartOffset);
b.append(", recordErrors: ");
b.append(recordErrors);
b.append(", errorMessage: ");
if (errorMessage != null) {
b.append(errorMessage);
} else {
b.append("null");
}
b.append('}');
return b.toString();
}
}
public static final class RecordError {
public final int batchIndex;
public final String message;
public RecordError(int batchIndex, String message) {
this.batchIndex = batchIndex;
this.message = message;
}
public RecordError(int batchIndex) {
this.batchIndex = batchIndex;
this.message = null;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RecordError that = (RecordError) o;
return batchIndex == that.batchIndex &&
Objects.equals(message, that.message);
}
@Override
public int hashCode() {
return Objects.hash(batchIndex, message);
}
@Override
public String toString() {
return "RecordError("
+ "batchIndex=" + batchIndex
+ ", message=" + ((message == null) ? "null" : "'" + message + "'")
+ ")";
}
}
public static ProduceResponse parse(ByteBuffer buffer, short version) {
return new ProduceResponse(new ProduceResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 6;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/RenewDelegationTokenRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import org.apache.kafka.common.message.RenewDelegationTokenRequestData;
import org.apache.kafka.common.message.RenewDelegationTokenResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
public class RenewDelegationTokenRequest extends AbstractRequest {
private final RenewDelegationTokenRequestData data;
public RenewDelegationTokenRequest(RenewDelegationTokenRequestData data, short version) {
super(ApiKeys.RENEW_DELEGATION_TOKEN, version);
this.data = data;
}
public static RenewDelegationTokenRequest parse(ByteBuffer buffer, short version) {
return new RenewDelegationTokenRequest(new RenewDelegationTokenRequestData(
new ByteBufferAccessor(buffer), version), version);
}
@Override
public RenewDelegationTokenRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new RenewDelegationTokenResponse(
new RenewDelegationTokenResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(Errors.forException(e).code()));
}
public static class Builder extends AbstractRequest.Builder<RenewDelegationTokenRequest> {
private final RenewDelegationTokenRequestData data;
public Builder(RenewDelegationTokenRequestData data) {
super(ApiKeys.RENEW_DELEGATION_TOKEN);
this.data = data;
}
@Override
public RenewDelegationTokenRequest build(short version) {
return new RenewDelegationTokenRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/RenewDelegationTokenResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import java.nio.ByteBuffer;
import java.util.Map;
import org.apache.kafka.common.message.RenewDelegationTokenResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
public class RenewDelegationTokenResponse extends AbstractResponse {
private final RenewDelegationTokenResponseData data;
public RenewDelegationTokenResponse(RenewDelegationTokenResponseData data) {
super(ApiKeys.RENEW_DELEGATION_TOKEN);
this.data = data;
}
public static RenewDelegationTokenResponse parse(ByteBuffer buffer, short version) {
return new RenewDelegationTokenResponse(new RenewDelegationTokenResponseData(
new ByteBufferAccessor(buffer), version));
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
@Override
public RenewDelegationTokenResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
public long expiryTimestamp() {
return data.expiryTimestampMs();
}
public boolean hasError() {
return error() != Errors.NONE;
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/RequestAndSize.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
public class RequestAndSize {
public final AbstractRequest request;
public final int size;
public RequestAndSize(AbstractRequest request, int size) {
this.request = request;
this.size = size;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/RequestContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.message.ApiVersionsRequestData;
import org.apache.kafka.common.network.ClientInformation;
import org.apache.kafka.common.network.ListenerName;
import org.apache.kafka.common.network.Send;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.auth.KafkaPrincipalSerde;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.server.authorizer.AuthorizableRequestContext;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.util.Optional;
import static org.apache.kafka.common.protocol.ApiKeys.API_VERSIONS;
public class RequestContext implements AuthorizableRequestContext {
public final RequestHeader header;
public final String connectionId;
public final InetAddress clientAddress;
public final KafkaPrincipal principal;
public final ListenerName listenerName;
public final SecurityProtocol securityProtocol;
public final ClientInformation clientInformation;
public final boolean fromPrivilegedListener;
public final Optional<KafkaPrincipalSerde> principalSerde;
public RequestContext(RequestHeader header,
String connectionId,
InetAddress clientAddress,
KafkaPrincipal principal,
ListenerName listenerName,
SecurityProtocol securityProtocol,
ClientInformation clientInformation,
boolean fromPrivilegedListener) {
this(header,
connectionId,
clientAddress,
principal,
listenerName,
securityProtocol,
clientInformation,
fromPrivilegedListener,
Optional.empty());
}
public RequestContext(RequestHeader header,
String connectionId,
InetAddress clientAddress,
KafkaPrincipal principal,
ListenerName listenerName,
SecurityProtocol securityProtocol,
ClientInformation clientInformation,
boolean fromPrivilegedListener,
Optional<KafkaPrincipalSerde> principalSerde) {
this.header = header;
this.connectionId = connectionId;
this.clientAddress = clientAddress;
this.principal = principal;
this.listenerName = listenerName;
this.securityProtocol = securityProtocol;
this.clientInformation = clientInformation;
this.fromPrivilegedListener = fromPrivilegedListener;
this.principalSerde = principalSerde;
}
public RequestAndSize parseRequest(ByteBuffer buffer) {
if (isUnsupportedApiVersionsRequest()) {
// Unsupported ApiVersion requests are treated as v0 requests and are not parsed
ApiVersionsRequest apiVersionsRequest = new ApiVersionsRequest(new ApiVersionsRequestData(), (short) 0, header.apiVersion());
return new RequestAndSize(apiVersionsRequest, 0);
} else {
ApiKeys apiKey = header.apiKey();
try {
short apiVersion = header.apiVersion();
return AbstractRequest.parseRequest(apiKey, apiVersion, buffer);
} catch (Throwable ex) {
throw new InvalidRequestException("Error getting request for apiKey: " + apiKey +
", apiVersion: " + header.apiVersion() +
", connectionId: " + connectionId +
", listenerName: " + listenerName +
", principal: " + principal, ex);
}
}
}
/**
* Build a {@link Send} for direct transmission of the provided response
* over the network.
*/
public Send buildResponseSend(AbstractResponse body) {
return body.toSend(header.toResponseHeader(), apiVersion());
}
/**
* Serialize a response into a {@link ByteBuffer}. This is used when the response
* will be encapsulated in an {@link EnvelopeResponse}. The buffer will contain
* both the serialized {@link ResponseHeader} as well as the bytes from the response.
* There is no `size` prefix unlike the output from {@link #buildResponseSend(AbstractResponse)}.
*
* Note that envelope requests are reserved only for APIs which have set the
* {@link ApiKeys#forwardable} flag. Notably the `Fetch` API cannot be forwarded,
* so we do not lose the benefit of "zero copy" transfers from disk.
*/
public ByteBuffer buildResponseEnvelopePayload(AbstractResponse body) {
return body.serializeWithHeader(header.toResponseHeader(), apiVersion());
}
private boolean isUnsupportedApiVersionsRequest() {
return header.apiKey() == API_VERSIONS && !API_VERSIONS.isVersionSupported(header.apiVersion());
}
public short apiVersion() {
// Use v0 when serializing an unhandled ApiVersion response
if (isUnsupportedApiVersionsRequest())
return 0;
return header.apiVersion();
}
@Override
public String listenerName() {
return listenerName.value();
}
@Override
public SecurityProtocol securityProtocol() {
return securityProtocol;
}
@Override
public KafkaPrincipal principal() {
return principal;
}
@Override
public InetAddress clientAddress() {
return clientAddress;
}
@Override
public int requestType() {
return header.apiKey().id;
}
@Override
public int requestVersion() {
return header.apiVersion();
}
@Override
public String clientId() {
return header.clientId();
}
@Override
public int correlationId() {
return header.correlationId();
}
@Override
public String toString() {
return "RequestContext(" +
"header=" + header +
", connectionId='" + connectionId + '\'' +
", clientAddress=" + clientAddress +
", principal=" + principal +
", listenerName=" + listenerName +
", securityProtocol=" + securityProtocol +
", clientInformation=" + clientInformation +
", fromPrivilegedListener=" + fromPrivilegedListener +
", principalSerde=" + principalSerde +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/RequestHeader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.RequestHeaderData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import java.nio.ByteBuffer;
import java.util.Objects;
/**
* The header for a request in the Kafka protocol
*/
public class RequestHeader implements AbstractRequestResponse {
private final static int SIZE_NOT_INITIALIZED = -1;
private final RequestHeaderData data;
private final short headerVersion;
private int size = SIZE_NOT_INITIALIZED;
public RequestHeader(ApiKeys requestApiKey, short requestVersion, String clientId, int correlationId) {
this(new RequestHeaderData().
setRequestApiKey(requestApiKey.id).
setRequestApiVersion(requestVersion).
setClientId(clientId).
setCorrelationId(correlationId),
requestApiKey.requestHeaderVersion(requestVersion));
}
public RequestHeader(RequestHeaderData data, short headerVersion) {
this.data = data;
this.headerVersion = headerVersion;
}
public ApiKeys apiKey() {
return ApiKeys.forId(data.requestApiKey());
}
public short apiVersion() {
return data.requestApiVersion();
}
public short headerVersion() {
return headerVersion;
}
public String clientId() {
return data.clientId();
}
public int correlationId() {
return data.correlationId();
}
public RequestHeaderData data() {
return data;
}
// Visible for testing.
void write(ByteBuffer buffer, ObjectSerializationCache serializationCache) {
data.write(new ByteBufferAccessor(buffer), serializationCache, headerVersion);
}
/**
* Calculates the size of {@link RequestHeader} in bytes.
*
* This method to calculate size should be only when it is immediately followed by
* {@link #write(ByteBuffer, ObjectSerializationCache)} method call. In such cases, ObjectSerializationCache
* helps to avoid the serialization twice. In all other cases, {@link #size()} should be preferred instead.
*
* Calls to this method leads to calculation of size every time it is invoked. {@link #size()} should be preferred
* instead.
*
* Visible for testing.
*/
int size(ObjectSerializationCache serializationCache) {
this.size = data.size(serializationCache, headerVersion);
return size;
}
/**
* Returns the size of {@link RequestHeader} in bytes.
*
* Calls to this method are idempotent and inexpensive since it returns the cached value of size after the first
* invocation.
*/
public int size() {
if (this.size == SIZE_NOT_INITIALIZED) {
this.size = size(new ObjectSerializationCache());
}
return size;
}
public ResponseHeader toResponseHeader() {
return new ResponseHeader(data.correlationId(), apiKey().responseHeaderVersion(apiVersion()));
}
public static RequestHeader parse(ByteBuffer buffer) {
short apiKey = -1;
try {
// We derive the header version from the request api version, so we read that first.
// The request api version is part of `RequestHeaderData`, so we reset the buffer position after the read.
int bufferStartPositionForHeader = buffer.position();
apiKey = buffer.getShort();
short apiVersion = buffer.getShort();
short headerVersion = ApiKeys.forId(apiKey).requestHeaderVersion(apiVersion);
buffer.position(bufferStartPositionForHeader);
final RequestHeaderData headerData = new RequestHeaderData(new ByteBufferAccessor(buffer), headerVersion);
// Due to a quirk in the protocol, client ID is marked as nullable.
// However, we treat a null client ID as equivalent to an empty client ID.
if (headerData.clientId() == null) {
headerData.setClientId("");
}
final RequestHeader header = new RequestHeader(headerData, headerVersion);
// Size of header is calculated by the shift in the position of buffer's start position during parsing.
// Prior to parsing, the buffer's start position points to header data and after the parsing operation
// the buffer's start position points to api message. For more information on how the buffer is
// constructed, see RequestUtils#serialize()
header.size = Math.max(buffer.position() - bufferStartPositionForHeader, 0);
return header;
} catch (UnsupportedVersionException e) {
throw new InvalidRequestException("Unknown API key " + apiKey, e);
} catch (Throwable ex) {
throw new InvalidRequestException("Error parsing request header. Our best guess of the apiKey is: " +
apiKey, ex);
}
}
@Override
public String toString() {
return "RequestHeader(apiKey=" + apiKey() +
", apiVersion=" + apiVersion() +
", clientId=" + clientId() +
", correlationId=" + correlationId() +
", headerVersion=" + headerVersion +
")";
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RequestHeader that = (RequestHeader) o;
return headerVersion == that.headerVersion &&
Objects.equals(data, that.data);
}
@Override
public int hashCode() {
return Objects.hash(data, headerVersion);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/RequestUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ProduceRequestData;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Message;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.record.Records;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.Optional;
import java.util.function.Predicate;
public final class RequestUtils {
private RequestUtils() {}
public static Optional<Integer> getLeaderEpoch(int leaderEpoch) {
return leaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH ?
Optional.empty() : Optional.of(leaderEpoch);
}
public static boolean hasTransactionalRecords(ProduceRequest request) {
return flag(request, RecordBatch::isTransactional);
}
/**
* find a flag from all records of a produce request.
* @param request produce request
* @param predicate used to predicate the record
* @return true if there is any matched flag in the produce request. Otherwise, false
*/
static boolean flag(ProduceRequest request, Predicate<RecordBatch> predicate) {
for (ProduceRequestData.TopicProduceData tp : request.data().topicData()) {
for (ProduceRequestData.PartitionProduceData p : tp.partitionData()) {
if (p.records() instanceof Records) {
Iterator<? extends RecordBatch> iter = (((Records) p.records())).batchIterator();
if (iter.hasNext() && predicate.test(iter.next())) return true;
}
}
}
return false;
}
public static ByteBuffer serialize(
Message header,
short headerVersion,
Message apiMessage,
short apiVersion
) {
ObjectSerializationCache cache = new ObjectSerializationCache();
int headerSize = header.size(cache, headerVersion);
int messageSize = apiMessage.size(cache, apiVersion);
ByteBufferAccessor writable = new ByteBufferAccessor(ByteBuffer.allocate(headerSize + messageSize));
header.write(writable, cache, headerVersion);
apiMessage.write(writable, cache, apiVersion);
writable.flip();
return writable.buffer();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/ResponseHeader.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.ResponseHeaderData;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.ObjectSerializationCache;
import java.nio.ByteBuffer;
import java.util.Objects;
/**
* A response header in the kafka protocol.
*/
public class ResponseHeader implements AbstractRequestResponse {
private final static int SIZE_NOT_INITIALIZED = -1;
private final ResponseHeaderData data;
private final short headerVersion;
private int size = SIZE_NOT_INITIALIZED;
public ResponseHeader(int correlationId, short headerVersion) {
this(new ResponseHeaderData().setCorrelationId(correlationId), headerVersion);
}
public ResponseHeader(ResponseHeaderData data, short headerVersion) {
this.data = data;
this.headerVersion = headerVersion;
}
/**
* Calculates the size of {@link ResponseHeader} in bytes.
*
* This method to calculate size should be only when it is immediately followed by
* {@link #write(ByteBuffer, ObjectSerializationCache)} method call. In such cases, ObjectSerializationCache
* helps to avoid the serialization twice. In all other cases, {@link #size()} should be preferred instead.
*
* Calls to this method leads to calculation of size every time it is invoked. {@link #size()} should be preferred
* instead.
*
* Visible for testing.
*/
int size(ObjectSerializationCache serializationCache) {
return data().size(serializationCache, headerVersion);
}
/**
* Returns the size of {@link ResponseHeader} in bytes.
*
* Calls to this method are idempotent and inexpensive since it returns the cached value of size after the first
* invocation.
*/
public int size() {
if (this.size == SIZE_NOT_INITIALIZED) {
this.size = size(new ObjectSerializationCache());
}
return size;
}
public int correlationId() {
return this.data.correlationId();
}
public short headerVersion() {
return headerVersion;
}
public ResponseHeaderData data() {
return data;
}
// visible for testing
void write(ByteBuffer buffer, ObjectSerializationCache serializationCache) {
data.write(new ByteBufferAccessor(buffer), serializationCache, headerVersion);
}
@Override
public String toString() {
return "ResponseHeader("
+ "correlationId=" + data.correlationId()
+ ", headerVersion=" + headerVersion
+ ")";
}
public static ResponseHeader parse(ByteBuffer buffer, short headerVersion) {
final int bufferStartPositionForHeader = buffer.position();
final ResponseHeader header = new ResponseHeader(
new ResponseHeaderData(new ByteBufferAccessor(buffer), headerVersion), headerVersion);
// Size of header is calculated by the shift in the position of buffer's start position during parsing.
// Prior to parsing, the buffer's start position points to header data and after the parsing operation
// the buffer's start position points to api message. For more information on how the buffer is
// constructed, see RequestUtils#serialize()
header.size = Math.max(buffer.position() - bufferStartPositionForHeader, 0);
return header;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ResponseHeader that = (ResponseHeader) o;
return headerVersion == that.headerVersion &&
Objects.equals(data, that.data);
}
@Override
public int hashCode() {
return Objects.hash(data, headerVersion);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/SaslAuthenticateRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.SaslAuthenticateRequestData;
import org.apache.kafka.common.message.SaslAuthenticateResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
/**
* Request from SASL client containing client SASL authentication token as defined by the
* SASL protocol for the configured SASL mechanism.
* <p/>
* For interoperability with versions prior to Kafka 1.0.0, this request is used only with broker
* version 1.0.0 and higher that support SaslHandshake request v1. Clients connecting to older
* brokers will send SaslHandshake request v0 followed by SASL tokens without the Kafka request headers.
*/
public class SaslAuthenticateRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<SaslAuthenticateRequest> {
private final SaslAuthenticateRequestData data;
public Builder(SaslAuthenticateRequestData data) {
super(ApiKeys.SASL_AUTHENTICATE);
this.data = data;
}
@Override
public SaslAuthenticateRequest build(short version) {
return new SaslAuthenticateRequest(data, version);
}
@Override
public String toString() {
return "(type=SaslAuthenticateRequest)";
}
}
private final SaslAuthenticateRequestData data;
public SaslAuthenticateRequest(SaslAuthenticateRequestData data, short version) {
super(ApiKeys.SASL_AUTHENTICATE, version);
this.data = data;
}
@Override
public SaslAuthenticateRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
ApiError apiError = ApiError.fromThrowable(e);
SaslAuthenticateResponseData response = new SaslAuthenticateResponseData()
.setErrorCode(apiError.error().code())
.setErrorMessage(apiError.message());
return new SaslAuthenticateResponse(response);
}
public static SaslAuthenticateRequest parse(ByteBuffer buffer, short version) {
return new SaslAuthenticateRequest(new SaslAuthenticateRequestData(new ByteBufferAccessor(buffer), version),
version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/SaslAuthenticateResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.SaslAuthenticateResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
/**
* Response from SASL server which for a SASL challenge as defined by the SASL protocol
* for the mechanism configured for the client.
*/
public class SaslAuthenticateResponse extends AbstractResponse {
private final SaslAuthenticateResponseData data;
public SaslAuthenticateResponse(SaslAuthenticateResponseData data) {
super(ApiKeys.SASL_AUTHENTICATE);
this.data = data;
}
/**
* Possible error codes:
* SASL_AUTHENTICATION_FAILED(57) : Authentication failed
*/
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
public String errorMessage() {
return data.errorMessage();
}
public long sessionLifetimeMs() {
return data.sessionLifetimeMs();
}
public byte[] saslAuthBytes() {
return data.authBytes();
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
@Override
public SaslAuthenticateResponseData data() {
return data;
}
public static SaslAuthenticateResponse parse(ByteBuffer buffer, short version) {
return new SaslAuthenticateResponse(new SaslAuthenticateResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/SaslHandshakeRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.SaslHandshakeRequestData;
import org.apache.kafka.common.message.SaslHandshakeResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
/**
* Request from SASL client containing client SASL mechanism.
* <p/>
* For interoperability with Kafka 0.9.0.x, the mechanism flow may be omitted when using GSSAPI. Hence
* this request should not conflict with the first GSSAPI client packet. For GSSAPI, the first context
* establishment packet starts with byte 0x60 (APPLICATION-0 tag) followed by a variable-length encoded size.
* This handshake request starts with a request header two-byte API key set to 17, followed by a mechanism name,
* making it easy to distinguish from a GSSAPI packet.
*/
public class SaslHandshakeRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<SaslHandshakeRequest> {
private final SaslHandshakeRequestData data;
public Builder(SaslHandshakeRequestData data) {
super(ApiKeys.SASL_HANDSHAKE);
this.data = data;
}
@Override
public SaslHandshakeRequest build(short version) {
return new SaslHandshakeRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final SaslHandshakeRequestData data;
public SaslHandshakeRequest(SaslHandshakeRequestData data, short version) {
super(ApiKeys.SASL_HANDSHAKE, version);
this.data = data;
}
@Override
public SaslHandshakeRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
SaslHandshakeResponseData response = new SaslHandshakeResponseData();
response.setErrorCode(ApiError.fromThrowable(e).error().code());
return new SaslHandshakeResponse(response);
}
public static SaslHandshakeRequest parse(ByteBuffer buffer, short version) {
return new SaslHandshakeRequest(new SaslHandshakeRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/SaslHandshakeResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.SaslHandshakeResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
/**
* Response from SASL server which indicates if the client-chosen mechanism is enabled in the server.
* For error responses, the list of enabled mechanisms is included in the response.
*/
public class SaslHandshakeResponse extends AbstractResponse {
private final SaslHandshakeResponseData data;
public SaslHandshakeResponse(SaslHandshakeResponseData data) {
super(ApiKeys.SASL_HANDSHAKE);
this.data = data;
}
/*
* Possible error codes:
* UNSUPPORTED_SASL_MECHANISM(33): Client mechanism not enabled in server
* ILLEGAL_SASL_STATE(34) : Invalid request during SASL handshake
*/
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
@Override
public SaslHandshakeResponseData data() {
return data;
}
public List<String> enabledMechanisms() {
return data.mechanisms();
}
public static SaslHandshakeResponse parse(ByteBuffer buffer, short version) {
return new SaslHandshakeResponse(new SaslHandshakeResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/StopReplicaRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.StopReplicaRequestData;
import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionState;
import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaPartitionV0;
import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaTopicV1;
import org.apache.kafka.common.message.StopReplicaRequestData.StopReplicaTopicState;
import org.apache.kafka.common.message.StopReplicaResponseData;
import org.apache.kafka.common.message.StopReplicaResponseData.StopReplicaPartitionError;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.utils.MappedIterator;
import org.apache.kafka.common.utils.Utils;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
public class StopReplicaRequest extends AbstractControlRequest {
public static class Builder extends AbstractControlRequest.Builder<StopReplicaRequest> {
private final boolean deletePartitions;
private final List<StopReplicaTopicState> topicStates;
public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch,
boolean deletePartitions, List<StopReplicaTopicState> topicStates) {
this(version, controllerId, controllerEpoch, brokerEpoch, deletePartitions,
topicStates, false);
}
public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch,
boolean deletePartitions, List<StopReplicaTopicState> topicStates,
boolean kraftController) {
super(ApiKeys.STOP_REPLICA, version, controllerId, controllerEpoch, brokerEpoch, kraftController);
this.deletePartitions = deletePartitions;
this.topicStates = topicStates;
}
public StopReplicaRequest build(short version) {
StopReplicaRequestData data = new StopReplicaRequestData()
.setControllerId(controllerId)
.setControllerEpoch(controllerEpoch)
.setBrokerEpoch(brokerEpoch);
if (version >= 4) {
data.setIsKRaftController(kraftController);
}
if (version >= 3) {
data.setTopicStates(topicStates);
} else if (version >= 1) {
data.setDeletePartitions(deletePartitions);
List<StopReplicaTopicV1> topics = topicStates.stream().map(topic ->
new StopReplicaTopicV1()
.setName(topic.topicName())
.setPartitionIndexes(topic.partitionStates().stream()
.map(StopReplicaPartitionState::partitionIndex)
.collect(Collectors.toList())))
.collect(Collectors.toList());
data.setTopics(topics);
} else {
data.setDeletePartitions(deletePartitions);
List<StopReplicaPartitionV0> partitions = topicStates.stream().flatMap(topic ->
topic.partitionStates().stream().map(partition ->
new StopReplicaPartitionV0()
.setTopicName(topic.topicName())
.setPartitionIndex(partition.partitionIndex())))
.collect(Collectors.toList());
data.setUngroupedPartitions(partitions);
}
return new StopReplicaRequest(data, version);
}
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("(type=StopReplicaRequest").
append(", controllerId=").append(controllerId).
append(", controllerEpoch=").append(controllerEpoch).
append(", brokerEpoch=").append(brokerEpoch).
append(", deletePartitions=").append(deletePartitions).
append(", topicStates=").append(Utils.join(topicStates, ",")).
append(")");
return bld.toString();
}
}
private final StopReplicaRequestData data;
private StopReplicaRequest(StopReplicaRequestData data, short version) {
super(ApiKeys.STOP_REPLICA, version);
this.data = data;
}
@Override
public StopReplicaResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
StopReplicaResponseData data = new StopReplicaResponseData();
data.setErrorCode(error.code());
List<StopReplicaPartitionError> partitions = new ArrayList<>();
for (StopReplicaTopicState topic : topicStates()) {
for (StopReplicaPartitionState partition : topic.partitionStates()) {
partitions.add(new StopReplicaPartitionError()
.setTopicName(topic.topicName())
.setPartitionIndex(partition.partitionIndex())
.setErrorCode(error.code()));
}
}
data.setPartitionErrors(partitions);
return new StopReplicaResponse(data);
}
/**
* Note that this method has allocation overhead per iterated element, so callers should copy the result into
* another collection if they need to iterate more than once.
*
* Implementation note: we should strive to avoid allocation overhead per element, see
* `UpdateMetadataRequest.partitionStates()` for the preferred approach. That's not possible in this case and
* StopReplicaRequest should be relatively rare in comparison to other request types.
*/
public Iterable<StopReplicaTopicState> topicStates() {
if (version() < 1) {
Map<String, StopReplicaTopicState> topicStates = new HashMap<>();
for (StopReplicaPartitionV0 partition : data.ungroupedPartitions()) {
StopReplicaTopicState topicState = topicStates.computeIfAbsent(partition.topicName(),
topic -> new StopReplicaTopicState().setTopicName(topic));
topicState.partitionStates().add(new StopReplicaPartitionState()
.setPartitionIndex(partition.partitionIndex())
.setDeletePartition(data.deletePartitions()));
}
return topicStates.values();
} else if (version() < 3) {
return () -> new MappedIterator<>(data.topics().iterator(), topic ->
new StopReplicaTopicState()
.setTopicName(topic.name())
.setPartitionStates(topic.partitionIndexes().stream()
.map(partition -> new StopReplicaPartitionState()
.setPartitionIndex(partition)
.setDeletePartition(data.deletePartitions()))
.collect(Collectors.toList())));
} else {
return data.topicStates();
}
}
public Map<TopicPartition, StopReplicaPartitionState> partitionStates() {
Map<TopicPartition, StopReplicaPartitionState> partitionStates = new HashMap<>();
if (version() < 1) {
for (StopReplicaPartitionV0 partition : data.ungroupedPartitions()) {
partitionStates.put(
new TopicPartition(partition.topicName(), partition.partitionIndex()),
new StopReplicaPartitionState()
.setPartitionIndex(partition.partitionIndex())
.setDeletePartition(data.deletePartitions()));
}
} else if (version() < 3) {
for (StopReplicaTopicV1 topic : data.topics()) {
for (Integer partitionIndex : topic.partitionIndexes()) {
partitionStates.put(
new TopicPartition(topic.name(), partitionIndex),
new StopReplicaPartitionState()
.setPartitionIndex(partitionIndex)
.setDeletePartition(data.deletePartitions()));
}
}
} else {
for (StopReplicaTopicState topicState : data.topicStates()) {
for (StopReplicaPartitionState partitionState: topicState.partitionStates()) {
partitionStates.put(
new TopicPartition(topicState.topicName(), partitionState.partitionIndex()),
partitionState);
}
}
}
return partitionStates;
}
@Override
public int controllerId() {
return data.controllerId();
}
@Override
public boolean isKRaftController() {
return data.isKRaftController();
}
@Override
public int controllerEpoch() {
return data.controllerEpoch();
}
@Override
public long brokerEpoch() {
return data.brokerEpoch();
}
public static StopReplicaRequest parse(ByteBuffer buffer, short version) {
return new StopReplicaRequest(new StopReplicaRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public StopReplicaRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/StopReplicaResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.StopReplicaResponseData;
import org.apache.kafka.common.message.StopReplicaResponseData.StopReplicaPartitionError;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.List;
import java.util.Map;
public class StopReplicaResponse extends AbstractResponse {
/**
* Possible error code:
* - {@link Errors#STALE_CONTROLLER_EPOCH}
* - {@link Errors#STALE_BROKER_EPOCH}
* - {@link Errors#FENCED_LEADER_EPOCH}
* - {@link Errors#KAFKA_STORAGE_ERROR}
*/
private final StopReplicaResponseData data;
public StopReplicaResponse(StopReplicaResponseData data) {
super(ApiKeys.STOP_REPLICA);
this.data = data;
}
public List<StopReplicaPartitionError> partitionErrors() {
return data.partitionErrors();
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
if (data.errorCode() != Errors.NONE.code())
// Minor optimization since the top-level error applies to all partitions
return Collections.singletonMap(error(), data.partitionErrors().size() + 1);
Map<Errors, Integer> errors = errorCounts(data.partitionErrors().stream().map(p -> Errors.forCode(p.errorCode())));
updateErrorCounts(errors, Errors.forCode(data.errorCode())); // top level error
return errors;
}
public static StopReplicaResponse parse(ByteBuffer buffer, short version) {
return new StopReplicaResponse(new StopReplicaResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
@Override
public StopReplicaResponseData data() {
return data;
}
@Override
public String toString() {
return data.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/SyncGroupRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.SyncGroupRequestData;
import org.apache.kafka.common.message.SyncGroupResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class SyncGroupRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<SyncGroupRequest> {
private final SyncGroupRequestData data;
public Builder(SyncGroupRequestData data) {
super(ApiKeys.SYNC_GROUP);
this.data = data;
}
@Override
public SyncGroupRequest build(short version) {
if (data.groupInstanceId() != null && version < 3) {
throw new UnsupportedVersionException("The broker sync group protocol version " +
version + " does not support usage of config group.instance.id.");
}
return new SyncGroupRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final SyncGroupRequestData data;
public SyncGroupRequest(SyncGroupRequestData data, short version) {
super(ApiKeys.SYNC_GROUP, version);
this.data = data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new SyncGroupResponse(new SyncGroupResponseData()
.setErrorCode(Errors.forException(e).code())
.setAssignment(new byte[0])
.setThrottleTimeMs(throttleTimeMs));
}
public Map<String, ByteBuffer> groupAssignments() {
Map<String, ByteBuffer> groupAssignments = new HashMap<>();
for (SyncGroupRequestData.SyncGroupRequestAssignment assignment : data.assignments()) {
groupAssignments.put(assignment.memberId(), ByteBuffer.wrap(assignment.assignment()));
}
return groupAssignments;
}
/**
* ProtocolType and ProtocolName are mandatory since version 5. This methods verifies that
* they are defined for version 5 or higher, or returns true otherwise for older versions.
*/
public boolean areMandatoryProtocolTypeAndNamePresent() {
if (version() >= 5)
return data.protocolType() != null && data.protocolName() != null;
else
return true;
}
public static SyncGroupRequest parse(ByteBuffer buffer, short version) {
return new SyncGroupRequest(new SyncGroupRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public SyncGroupRequestData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/SyncGroupResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.SyncGroupResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
public class SyncGroupResponse extends AbstractResponse {
private final SyncGroupResponseData data;
public SyncGroupResponse(SyncGroupResponseData data) {
super(ApiKeys.SYNC_GROUP);
this.data = data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(Errors.forCode(data.errorCode()));
}
@Override
public SyncGroupResponseData data() {
return data;
}
@Override
public String toString() {
return data.toString();
}
public static SyncGroupResponse parse(ByteBuffer buffer, short version) {
return new SyncGroupResponse(new SyncGroupResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 2;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/TransactionResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
public enum TransactionResult {
ABORT(false), COMMIT(true);
public final boolean id;
TransactionResult(boolean id) {
this.id = id;
}
public static TransactionResult forId(boolean id) {
if (id) {
return TransactionResult.COMMIT;
}
return TransactionResult.ABORT;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.TxnOffsetCommitRequestData;
import org.apache.kafka.common.message.TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition;
import org.apache.kafka.common.message.TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic;
import org.apache.kafka.common.message.TxnOffsetCommitResponseData;
import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition;
import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.record.RecordBatch;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.stream.Collectors;
public class TxnOffsetCommitRequest extends AbstractRequest {
private final TxnOffsetCommitRequestData data;
public static class Builder extends AbstractRequest.Builder<TxnOffsetCommitRequest> {
public final TxnOffsetCommitRequestData data;
public Builder(final String transactionalId,
final String consumerGroupId,
final long producerId,
final short producerEpoch,
final Map<TopicPartition, CommittedOffset> pendingTxnOffsetCommits) {
this(transactionalId,
consumerGroupId,
producerId,
producerEpoch,
pendingTxnOffsetCommits,
JoinGroupRequest.UNKNOWN_MEMBER_ID,
JoinGroupRequest.UNKNOWN_GENERATION_ID,
Optional.empty());
}
public Builder(final String transactionalId,
final String consumerGroupId,
final long producerId,
final short producerEpoch,
final Map<TopicPartition, CommittedOffset> pendingTxnOffsetCommits,
final String memberId,
final int generationId,
final Optional<String> groupInstanceId) {
super(ApiKeys.TXN_OFFSET_COMMIT);
this.data = new TxnOffsetCommitRequestData()
.setTransactionalId(transactionalId)
.setGroupId(consumerGroupId)
.setProducerId(producerId)
.setProducerEpoch(producerEpoch)
.setTopics(getTopics(pendingTxnOffsetCommits))
.setMemberId(memberId)
.setGenerationId(generationId)
.setGroupInstanceId(groupInstanceId.orElse(null));
}
public Builder(final TxnOffsetCommitRequestData data) {
super(ApiKeys.TXN_OFFSET_COMMIT);
this.data = data;
}
@Override
public TxnOffsetCommitRequest build(short version) {
if (version < 3 && groupMetadataSet()) {
throw new UnsupportedVersionException("Broker doesn't support group metadata commit API on version " + version
+ ", minimum supported request version is 3 which requires brokers to be on version 2.5 or above.");
}
return new TxnOffsetCommitRequest(data, version);
}
private boolean groupMetadataSet() {
return !data.memberId().equals(JoinGroupRequest.UNKNOWN_MEMBER_ID) ||
data.generationId() != JoinGroupRequest.UNKNOWN_GENERATION_ID ||
data.groupInstanceId() != null;
}
@Override
public String toString() {
return data.toString();
}
}
public TxnOffsetCommitRequest(TxnOffsetCommitRequestData data, short version) {
super(ApiKeys.TXN_OFFSET_COMMIT, version);
this.data = data;
}
public Map<TopicPartition, CommittedOffset> offsets() {
List<TxnOffsetCommitRequestTopic> topics = data.topics();
Map<TopicPartition, CommittedOffset> offsetMap = new HashMap<>();
for (TxnOffsetCommitRequestTopic topic : topics) {
for (TxnOffsetCommitRequestPartition partition : topic.partitions()) {
offsetMap.put(new TopicPartition(topic.name(), partition.partitionIndex()),
new CommittedOffset(partition.committedOffset(),
partition.committedMetadata(),
RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()))
);
}
}
return offsetMap;
}
static List<TxnOffsetCommitRequestTopic> getTopics(Map<TopicPartition, CommittedOffset> pendingTxnOffsetCommits) {
Map<String, List<TxnOffsetCommitRequestPartition>> topicPartitionMap = new HashMap<>();
for (Map.Entry<TopicPartition, CommittedOffset> entry : pendingTxnOffsetCommits.entrySet()) {
TopicPartition topicPartition = entry.getKey();
CommittedOffset offset = entry.getValue();
List<TxnOffsetCommitRequestPartition> partitions =
topicPartitionMap.getOrDefault(topicPartition.topic(), new ArrayList<>());
partitions.add(new TxnOffsetCommitRequestPartition()
.setPartitionIndex(topicPartition.partition())
.setCommittedOffset(offset.offset)
.setCommittedLeaderEpoch(offset.leaderEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH))
.setCommittedMetadata(offset.metadata)
);
topicPartitionMap.put(topicPartition.topic(), partitions);
}
return topicPartitionMap.entrySet().stream()
.map(entry -> new TxnOffsetCommitRequestTopic()
.setName(entry.getKey())
.setPartitions(entry.getValue()))
.collect(Collectors.toList());
}
@Override
public TxnOffsetCommitRequestData data() {
return data;
}
static List<TxnOffsetCommitResponseTopic> getErrorResponseTopics(List<TxnOffsetCommitRequestTopic> requestTopics,
Errors e) {
List<TxnOffsetCommitResponseTopic> responseTopicData = new ArrayList<>();
for (TxnOffsetCommitRequestTopic entry : requestTopics) {
List<TxnOffsetCommitResponsePartition> responsePartitions = new ArrayList<>();
for (TxnOffsetCommitRequestPartition requestPartition : entry.partitions()) {
responsePartitions.add(new TxnOffsetCommitResponsePartition()
.setPartitionIndex(requestPartition.partitionIndex())
.setErrorCode(e.code()));
}
responseTopicData.add(new TxnOffsetCommitResponseTopic()
.setName(entry.name())
.setPartitions(responsePartitions)
);
}
return responseTopicData;
}
@Override
public TxnOffsetCommitResponse getErrorResponse(int throttleTimeMs, Throwable e) {
List<TxnOffsetCommitResponseTopic> responseTopicData =
getErrorResponseTopics(data.topics(), Errors.forException(e));
return new TxnOffsetCommitResponse(new TxnOffsetCommitResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setTopics(responseTopicData));
}
@Override
public TxnOffsetCommitResponse getErrorResponse(Throwable e) {
return getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e);
}
public static TxnOffsetCommitRequest parse(ByteBuffer buffer, short version) {
return new TxnOffsetCommitRequest(new TxnOffsetCommitRequestData(
new ByteBufferAccessor(buffer), version), version);
}
public static class CommittedOffset {
public final long offset;
public final String metadata;
public final Optional<Integer> leaderEpoch;
public CommittedOffset(long offset, String metadata, Optional<Integer> leaderEpoch) {
this.offset = offset;
this.metadata = metadata;
this.leaderEpoch = leaderEpoch;
}
@Override
public String toString() {
return "CommittedOffset(" +
"offset=" + offset +
", leaderEpoch=" + leaderEpoch +
", metadata='" + metadata + "')";
}
@Override
public boolean equals(Object other) {
if (!(other instanceof CommittedOffset)) {
return false;
}
CommittedOffset otherOffset = (CommittedOffset) other;
return this.offset == otherOffset.offset
&& this.leaderEpoch.equals(otherOffset.leaderEpoch)
&& Objects.equals(this.metadata, otherOffset.metadata);
}
@Override
public int hashCode() {
return Objects.hash(offset, leaderEpoch, metadata);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.TxnOffsetCommitResponseData;
import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition;
import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.function.Function;
/**
* Possible error codes:
*
* - {@link Errors#INVALID_PRODUCER_EPOCH}
* - {@link Errors#NOT_COORDINATOR}
* - {@link Errors#COORDINATOR_NOT_AVAILABLE}
* - {@link Errors#COORDINATOR_LOAD_IN_PROGRESS}
* - {@link Errors#OFFSET_METADATA_TOO_LARGE}
* - {@link Errors#GROUP_AUTHORIZATION_FAILED}
* - {@link Errors#INVALID_COMMIT_OFFSET_SIZE}
* - {@link Errors#TRANSACTIONAL_ID_AUTHORIZATION_FAILED}
* - {@link Errors#REQUEST_TIMED_OUT}
* - {@link Errors#UNKNOWN_MEMBER_ID}
* - {@link Errors#FENCED_INSTANCE_ID}
* - {@link Errors#ILLEGAL_GENERATION}
*/
public class TxnOffsetCommitResponse extends AbstractResponse {
public static class Builder {
TxnOffsetCommitResponseData data = new TxnOffsetCommitResponseData();
HashMap<String, TxnOffsetCommitResponseTopic> byTopicName = new HashMap<>();
private TxnOffsetCommitResponseTopic getOrCreateTopic(
String topicName
) {
TxnOffsetCommitResponseTopic topic = byTopicName.get(topicName);
if (topic == null) {
topic = new TxnOffsetCommitResponseTopic().setName(topicName);
data.topics().add(topic);
byTopicName.put(topicName, topic);
}
return topic;
}
public Builder addPartition(
String topicName,
int partitionIndex,
Errors error
) {
final TxnOffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName);
topicResponse.partitions().add(new TxnOffsetCommitResponsePartition()
.setPartitionIndex(partitionIndex)
.setErrorCode(error.code()));
return this;
}
public <P> Builder addPartitions(
String topicName,
List<P> partitions,
Function<P, Integer> partitionIndex,
Errors error
) {
final TxnOffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName);
partitions.forEach(partition -> {
topicResponse.partitions().add(new TxnOffsetCommitResponsePartition()
.setPartitionIndex(partitionIndex.apply(partition))
.setErrorCode(error.code()));
});
return this;
}
public Builder merge(
TxnOffsetCommitResponseData newData
) {
if (data.topics().isEmpty()) {
// If the current data is empty, we can discard it and use the new data.
data = newData;
} else {
// Otherwise, we have to merge them together.
newData.topics().forEach(newTopic -> {
TxnOffsetCommitResponseTopic existingTopic = byTopicName.get(newTopic.name());
if (existingTopic == null) {
// If no topic exists, we can directly copy the new topic data.
data.topics().add(newTopic);
byTopicName.put(newTopic.name(), newTopic);
} else {
// Otherwise, we add the partitions to the existing one. Note we
// expect non-overlapping partitions here as we don't verify
// if the partition is already in the list before adding it.
existingTopic.partitions().addAll(newTopic.partitions());
}
});
}
return this;
}
public TxnOffsetCommitResponse build() {
return new TxnOffsetCommitResponse(data);
}
}
private final TxnOffsetCommitResponseData data;
public TxnOffsetCommitResponse(TxnOffsetCommitResponseData data) {
super(ApiKeys.TXN_OFFSET_COMMIT);
this.data = data;
}
public TxnOffsetCommitResponse(int requestThrottleMs, Map<TopicPartition, Errors> responseData) {
super(ApiKeys.TXN_OFFSET_COMMIT);
Map<String, TxnOffsetCommitResponseTopic> responseTopicDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, Errors> entry : responseData.entrySet()) {
TopicPartition topicPartition = entry.getKey();
String topicName = topicPartition.topic();
TxnOffsetCommitResponseTopic topic = responseTopicDataMap.getOrDefault(
topicName, new TxnOffsetCommitResponseTopic().setName(topicName));
topic.partitions().add(new TxnOffsetCommitResponsePartition()
.setErrorCode(entry.getValue().code())
.setPartitionIndex(topicPartition.partition())
);
responseTopicDataMap.put(topicName, topic);
}
data = new TxnOffsetCommitResponseData()
.setTopics(new ArrayList<>(responseTopicDataMap.values()))
.setThrottleTimeMs(requestThrottleMs);
}
@Override
public TxnOffsetCommitResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(data.topics().stream().flatMap(topic ->
topic.partitions().stream().map(partition ->
Errors.forCode(partition.errorCode()))));
}
public Map<TopicPartition, Errors> errors() {
Map<TopicPartition, Errors> errorMap = new HashMap<>();
for (TxnOffsetCommitResponseTopic topic : data.topics()) {
for (TxnOffsetCommitResponsePartition partition : topic.partitions()) {
errorMap.put(new TopicPartition(topic.name(), partition.partitionIndex()),
Errors.forCode(partition.errorCode()));
}
}
return errorMap;
}
public static TxnOffsetCommitResponse parse(ByteBuffer buffer, short version) {
return new TxnOffsetCommitResponse(new TxnOffsetCommitResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public String toString() {
return data.toString();
}
@Override
public boolean shouldClientThrottle(short version) {
return version >= 1;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/UnregisterBrokerRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.UnregisterBrokerRequestData;
import org.apache.kafka.common.message.UnregisterBrokerResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
public class UnregisterBrokerRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<UnregisterBrokerRequest> {
private final UnregisterBrokerRequestData data;
public Builder(UnregisterBrokerRequestData data) {
super(ApiKeys.UNREGISTER_BROKER);
this.data = data;
}
@Override
public UnregisterBrokerRequest build(short version) {
return new UnregisterBrokerRequest(data, version);
}
}
private final UnregisterBrokerRequestData data;
public UnregisterBrokerRequest(UnregisterBrokerRequestData data, short version) {
super(ApiKeys.UNREGISTER_BROKER, version);
this.data = data;
}
@Override
public UnregisterBrokerRequestData data() {
return data;
}
@Override
public UnregisterBrokerResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
return new UnregisterBrokerResponse(new UnregisterBrokerResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(error.code()));
}
public static UnregisterBrokerRequest parse(ByteBuffer buffer, short version) {
return new UnregisterBrokerRequest(new UnregisterBrokerRequestData(new ByteBufferAccessor(buffer), version),
version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/UnregisterBrokerResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.UnregisterBrokerResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
public class UnregisterBrokerResponse extends AbstractResponse {
private final UnregisterBrokerResponseData data;
public UnregisterBrokerResponse(UnregisterBrokerResponseData data) {
super(ApiKeys.UNREGISTER_BROKER);
this.data = data;
}
@Override
public UnregisterBrokerResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
if (data.errorCode() != 0) {
errorCounts.put(Errors.forCode(data.errorCode()), 1);
}
return errorCounts;
}
public static UnregisterBrokerResponse parse(ByteBuffer buffer, short version) {
return new UnregisterBrokerResponse(new UnregisterBrokerResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public boolean shouldClientThrottle(short version) {
return true;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/UpdateFeaturesRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.clients.admin.FeatureUpdate;
import org.apache.kafka.common.message.UpdateFeaturesRequestData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.Collections;
import java.util.stream.Collectors;
public class UpdateFeaturesRequest extends AbstractRequest {
public static class FeatureUpdateItem {
private final String featureName;
private final short featureLevel;
private final FeatureUpdate.UpgradeType upgradeType;
public FeatureUpdateItem(String featureName, short featureLevel, FeatureUpdate.UpgradeType upgradeType) {
this.featureName = featureName;
this.featureLevel = featureLevel;
this.upgradeType = upgradeType;
}
public String feature() {
return featureName;
}
public short versionLevel() {
return featureLevel;
}
public FeatureUpdate.UpgradeType upgradeType() {
return upgradeType;
}
public boolean isDeleteRequest() {
return featureLevel < 1 && !upgradeType.equals(FeatureUpdate.UpgradeType.UPGRADE);
}
}
public static class Builder extends AbstractRequest.Builder<UpdateFeaturesRequest> {
private final UpdateFeaturesRequestData data;
public Builder(UpdateFeaturesRequestData data) {
super(ApiKeys.UPDATE_FEATURES);
this.data = data;
}
@Override
public UpdateFeaturesRequest build(short version) {
return new UpdateFeaturesRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final UpdateFeaturesRequestData data;
public UpdateFeaturesRequest(UpdateFeaturesRequestData data, short version) {
super(ApiKeys.UPDATE_FEATURES, version);
this.data = data;
}
public FeatureUpdateItem getFeature(String name) {
UpdateFeaturesRequestData.FeatureUpdateKey update = data.featureUpdates().find(name);
if (super.version() == 0) {
if (update.allowDowngrade()) {
return new FeatureUpdateItem(update.feature(), update.maxVersionLevel(), FeatureUpdate.UpgradeType.SAFE_DOWNGRADE);
} else {
return new FeatureUpdateItem(update.feature(), update.maxVersionLevel(), FeatureUpdate.UpgradeType.UPGRADE);
}
} else {
return new FeatureUpdateItem(update.feature(), update.maxVersionLevel(), FeatureUpdate.UpgradeType.fromCode(update.upgradeType()));
}
}
public Collection<FeatureUpdateItem> featureUpdates() {
return data.featureUpdates().stream()
.map(update -> getFeature(update.feature()))
.collect(Collectors.toList());
}
@Override
public UpdateFeaturesResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return UpdateFeaturesResponse.createWithErrors(
ApiError.fromThrowable(e),
Collections.emptyMap(),
throttleTimeMs
);
}
@Override
public UpdateFeaturesRequestData data() {
return data;
}
public static UpdateFeaturesRequest parse(ByteBuffer buffer, short version) {
return new UpdateFeaturesRequest(new UpdateFeaturesRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/UpdateFeaturesResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.UpdateFeaturesResponseData;
import org.apache.kafka.common.message.UpdateFeaturesResponseData.UpdatableFeatureResult;
import org.apache.kafka.common.message.UpdateFeaturesResponseData.UpdatableFeatureResultCollection;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
/**
* Possible error codes:
*
* - {@link Errors#CLUSTER_AUTHORIZATION_FAILED}
* - {@link Errors#NOT_CONTROLLER}
* - {@link Errors#INVALID_REQUEST}
* - {@link Errors#FEATURE_UPDATE_FAILED}
*/
public class UpdateFeaturesResponse extends AbstractResponse {
private final UpdateFeaturesResponseData data;
public UpdateFeaturesResponse(UpdateFeaturesResponseData data) {
super(ApiKeys.UPDATE_FEATURES);
this.data = data;
}
public ApiError topLevelError() {
return new ApiError(Errors.forCode(data.errorCode()), data.errorMessage());
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
updateErrorCounts(errorCounts, Errors.forCode(data.errorCode()));
for (UpdatableFeatureResult result : data.results()) {
updateErrorCounts(errorCounts, Errors.forCode(result.errorCode()));
}
return errorCounts;
}
@Override
public int throttleTimeMs() {
return data.throttleTimeMs();
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
data.setThrottleTimeMs(throttleTimeMs);
}
@Override
public String toString() {
return data.toString();
}
@Override
public UpdateFeaturesResponseData data() {
return data;
}
public static UpdateFeaturesResponse parse(ByteBuffer buffer, short version) {
return new UpdateFeaturesResponse(new UpdateFeaturesResponseData(new ByteBufferAccessor(buffer), version));
}
public static UpdateFeaturesResponse createWithErrors(ApiError topLevelError, Map<String, ApiError> updateErrors, int throttleTimeMs) {
final UpdatableFeatureResultCollection results = new UpdatableFeatureResultCollection();
for (final Map.Entry<String, ApiError> updateError : updateErrors.entrySet()) {
final String feature = updateError.getKey();
final ApiError error = updateError.getValue();
final UpdatableFeatureResult result = new UpdatableFeatureResult();
result.setFeature(feature)
.setErrorCode(error.error().code())
.setErrorMessage(error.message());
results.add(result);
}
final UpdateFeaturesResponseData responseData = new UpdateFeaturesResponseData()
.setThrottleTimeMs(throttleTimeMs)
.setErrorCode(topLevelError.error().code())
.setErrorMessage(topLevelError.message())
.setResults(results)
.setThrottleTimeMs(throttleTimeMs);
return new UpdateFeaturesResponse(responseData);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/UpdateMetadataRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.UpdateMetadataRequestData;
import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataBroker;
import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataEndpoint;
import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataPartitionState;
import org.apache.kafka.common.message.UpdateMetadataRequestData.UpdateMetadataTopicState;
import org.apache.kafka.common.message.UpdateMetadataResponseData;
import org.apache.kafka.common.network.ListenerName;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.utils.FlattenedIterator;
import org.apache.kafka.common.utils.Utils;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import static java.util.Collections.singletonList;
public class UpdateMetadataRequest extends AbstractControlRequest {
public static class Builder extends AbstractControlRequest.Builder<UpdateMetadataRequest> {
private final List<UpdateMetadataPartitionState> partitionStates;
private final List<UpdateMetadataBroker> liveBrokers;
private final Map<String, Uuid> topicIds;
public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch,
List<UpdateMetadataPartitionState> partitionStates, List<UpdateMetadataBroker> liveBrokers,
Map<String, Uuid> topicIds) {
this(version, controllerId, controllerEpoch, brokerEpoch, partitionStates,
liveBrokers, topicIds, false);
}
public Builder(short version, int controllerId, int controllerEpoch, long brokerEpoch,
List<UpdateMetadataPartitionState> partitionStates, List<UpdateMetadataBroker> liveBrokers,
Map<String, Uuid> topicIds, boolean kraftController) {
super(ApiKeys.UPDATE_METADATA, version, controllerId, controllerEpoch, brokerEpoch, kraftController);
this.partitionStates = partitionStates;
this.liveBrokers = liveBrokers;
this.topicIds = topicIds;
}
@Override
public UpdateMetadataRequest build(short version) {
if (version < 3) {
for (UpdateMetadataBroker broker : liveBrokers) {
if (version == 0) {
if (broker.endpoints().size() != 1)
throw new UnsupportedVersionException("UpdateMetadataRequest v0 requires a single endpoint");
if (broker.endpoints().get(0).securityProtocol() != SecurityProtocol.PLAINTEXT.id)
throw new UnsupportedVersionException("UpdateMetadataRequest v0 only handles PLAINTEXT endpoints");
// Don't null out `endpoints` since it's ignored by the generated code if version >= 1
UpdateMetadataEndpoint endpoint = broker.endpoints().get(0);
broker.setV0Host(endpoint.host());
broker.setV0Port(endpoint.port());
} else {
if (broker.endpoints().stream().anyMatch(endpoint -> !endpoint.listener().isEmpty() &&
!endpoint.listener().equals(listenerNameFromSecurityProtocol(endpoint)))) {
throw new UnsupportedVersionException("UpdateMetadataRequest v0-v3 does not support custom " +
"listeners, request version: " + version + ", endpoints: " + broker.endpoints());
}
}
}
}
UpdateMetadataRequestData data = new UpdateMetadataRequestData()
.setControllerId(controllerId)
.setControllerEpoch(controllerEpoch)
.setBrokerEpoch(brokerEpoch)
.setLiveBrokers(liveBrokers);
if (version >= 8) {
data.setIsKRaftController(kraftController);
}
if (version >= 5) {
Map<String, UpdateMetadataTopicState> topicStatesMap = groupByTopic(topicIds, partitionStates);
data.setTopicStates(new ArrayList<>(topicStatesMap.values()));
} else {
data.setUngroupedPartitionStates(partitionStates);
}
return new UpdateMetadataRequest(data, version);
}
private static Map<String, UpdateMetadataTopicState> groupByTopic(Map<String, Uuid> topicIds, List<UpdateMetadataPartitionState> partitionStates) {
Map<String, UpdateMetadataTopicState> topicStates = new HashMap<>();
for (UpdateMetadataPartitionState partition : partitionStates) {
// We don't null out the topic name in UpdateMetadataPartitionState since it's ignored by the generated
// code if version >= 5
UpdateMetadataTopicState topicState = topicStates.computeIfAbsent(partition.topicName(),
t -> new UpdateMetadataTopicState()
.setTopicName(partition.topicName())
.setTopicId(topicIds.getOrDefault(partition.topicName(), Uuid.ZERO_UUID))
);
topicState.partitionStates().add(partition);
}
return topicStates;
}
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
bld.append("(type: UpdateMetadataRequest=").
append(", controllerId=").append(controllerId).
append(", controllerEpoch=").append(controllerEpoch).
append(", brokerEpoch=").append(brokerEpoch).
append(", partitionStates=").append(partitionStates).
append(", liveBrokers=").append(Utils.join(liveBrokers, ", ")).
append(")");
return bld.toString();
}
}
private final UpdateMetadataRequestData data;
UpdateMetadataRequest(UpdateMetadataRequestData data, short version) {
super(ApiKeys.UPDATE_METADATA, version);
this.data = data;
// Do this from the constructor to make it thread-safe (even though it's only needed when some methods are called)
normalize();
}
private void normalize() {
// Version 0 only supported a single host and port and the protocol was always plaintext
// Version 1 added support for multiple endpoints, each with its own security protocol
// Version 2 added support for rack
// Version 3 added support for listener name, which we can infer from the security protocol for older versions
if (version() < 3) {
for (UpdateMetadataBroker liveBroker : data.liveBrokers()) {
// Set endpoints so that callers can rely on it always being present
if (version() == 0 && liveBroker.endpoints().isEmpty()) {
SecurityProtocol securityProtocol = SecurityProtocol.PLAINTEXT;
liveBroker.setEndpoints(singletonList(new UpdateMetadataEndpoint()
.setHost(liveBroker.v0Host())
.setPort(liveBroker.v0Port())
.setSecurityProtocol(securityProtocol.id)
.setListener(ListenerName.forSecurityProtocol(securityProtocol).value())));
} else {
for (UpdateMetadataEndpoint endpoint : liveBroker.endpoints()) {
// Set listener so that callers can rely on it always being present
if (endpoint.listener().isEmpty())
endpoint.setListener(listenerNameFromSecurityProtocol(endpoint));
}
}
}
}
if (version() >= 5) {
for (UpdateMetadataTopicState topicState : data.topicStates()) {
for (UpdateMetadataPartitionState partitionState : topicState.partitionStates()) {
// Set the topic name so that we can always present the ungrouped view to callers
partitionState.setTopicName(topicState.topicName());
}
}
}
}
private static String listenerNameFromSecurityProtocol(UpdateMetadataEndpoint endpoint) {
SecurityProtocol securityProtocol = SecurityProtocol.forId(endpoint.securityProtocol());
return ListenerName.forSecurityProtocol(securityProtocol).value();
}
@Override
public int controllerId() {
return data.controllerId();
}
@Override
public boolean isKRaftController() {
return data.isKRaftController();
}
@Override
public int controllerEpoch() {
return data.controllerEpoch();
}
@Override
public long brokerEpoch() {
return data.brokerEpoch();
}
@Override
public UpdateMetadataResponse getErrorResponse(int throttleTimeMs, Throwable e) {
UpdateMetadataResponseData data = new UpdateMetadataResponseData()
.setErrorCode(Errors.forException(e).code());
return new UpdateMetadataResponse(data);
}
public Iterable<UpdateMetadataPartitionState> partitionStates() {
if (version() >= 5) {
return () -> new FlattenedIterator<>(data.topicStates().iterator(),
topicState -> topicState.partitionStates().iterator());
}
return data.ungroupedPartitionStates();
}
public List<UpdateMetadataTopicState> topicStates() {
if (version() >= 5) {
return data.topicStates();
}
return Collections.emptyList();
}
public List<UpdateMetadataBroker> liveBrokers() {
return data.liveBrokers();
}
@Override
public UpdateMetadataRequestData data() {
return data;
}
public static UpdateMetadataRequest parse(ByteBuffer buffer, short version) {
return new UpdateMetadataRequest(new UpdateMetadataRequestData(new ByteBufferAccessor(buffer), version), version);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/UpdateMetadataResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.message.UpdateMetadataResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Map;
public class UpdateMetadataResponse extends AbstractResponse {
private final UpdateMetadataResponseData data;
public UpdateMetadataResponse(UpdateMetadataResponseData data) {
super(ApiKeys.UPDATE_METADATA);
this.data = data;
}
public Errors error() {
return Errors.forCode(data.errorCode());
}
@Override
public Map<Errors, Integer> errorCounts() {
return errorCounts(error());
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
public static UpdateMetadataResponse parse(ByteBuffer buffer, short version) {
return new UpdateMetadataResponse(new UpdateMetadataResponseData(new ByteBufferAccessor(buffer), version));
}
@Override
public UpdateMetadataResponseData data() {
return data;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/VoteRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.VoteRequestData;
import org.apache.kafka.common.message.VoteResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
public class VoteRequest extends AbstractRequest {
public static class Builder extends AbstractRequest.Builder<VoteRequest> {
private final VoteRequestData data;
public Builder(VoteRequestData data) {
super(ApiKeys.VOTE);
this.data = data;
}
@Override
public VoteRequest build(short version) {
return new VoteRequest(data, version);
}
@Override
public String toString() {
return data.toString();
}
}
private final VoteRequestData data;
private VoteRequest(VoteRequestData data, short version) {
super(ApiKeys.VOTE, version);
this.data = data;
}
@Override
public VoteRequestData data() {
return data;
}
@Override
public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) {
return new VoteResponse(new VoteResponseData()
.setErrorCode(Errors.forException(e).code()));
}
public static VoteRequest parse(ByteBuffer buffer, short version) {
return new VoteRequest(new VoteRequestData(new ByteBufferAccessor(buffer), version), version);
}
public static VoteRequestData singletonRequest(TopicPartition topicPartition,
int candidateEpoch,
int candidateId,
int lastEpoch,
long lastEpochEndOffset) {
return singletonRequest(topicPartition,
null,
candidateEpoch,
candidateId,
lastEpoch,
lastEpochEndOffset);
}
public static VoteRequestData singletonRequest(TopicPartition topicPartition,
String clusterId,
int candidateEpoch,
int candidateId,
int lastEpoch,
long lastEpochEndOffset) {
return new VoteRequestData()
.setClusterId(clusterId)
.setTopics(Collections.singletonList(
new VoteRequestData.TopicData()
.setTopicName(topicPartition.topic())
.setPartitions(Collections.singletonList(
new VoteRequestData.PartitionData()
.setPartitionIndex(topicPartition.partition())
.setCandidateEpoch(candidateEpoch)
.setCandidateId(candidateId)
.setLastOffsetEpoch(lastEpoch)
.setLastOffset(lastEpochEndOffset))
)));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/VoteResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.VoteResponseData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* Possible error codes.
*
* Top level errors:
* - {@link Errors#CLUSTER_AUTHORIZATION_FAILED}
* - {@link Errors#BROKER_NOT_AVAILABLE}
*
* Partition level errors:
* - {@link Errors#FENCED_LEADER_EPOCH}
* - {@link Errors#INVALID_REQUEST}
* - {@link Errors#INCONSISTENT_VOTER_SET}
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION}
*/
public class VoteResponse extends AbstractResponse {
private final VoteResponseData data;
public VoteResponse(VoteResponseData data) {
super(ApiKeys.VOTE);
this.data = data;
}
public static VoteResponseData singletonResponse(Errors topLevelError,
TopicPartition topicPartition,
Errors partitionLevelError,
int leaderEpoch,
int leaderId,
boolean voteGranted) {
return new VoteResponseData()
.setErrorCode(topLevelError.code())
.setTopics(Collections.singletonList(
new VoteResponseData.TopicData()
.setTopicName(topicPartition.topic())
.setPartitions(Collections.singletonList(
new VoteResponseData.PartitionData()
.setErrorCode(partitionLevelError.code())
.setLeaderId(leaderId)
.setLeaderEpoch(leaderEpoch)
.setVoteGranted(voteGranted)))));
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errors = new HashMap<>();
errors.put(Errors.forCode(data.errorCode()), 1);
for (VoteResponseData.TopicData topicResponse : data.topics()) {
for (VoteResponseData.PartitionData partitionResponse : topicResponse.partitions()) {
updateErrorCounts(errors, Errors.forCode(partitionResponse.errorCode()));
}
}
return errors;
}
@Override
public VoteResponseData data() {
return data;
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
public static VoteResponse parse(ByteBuffer buffer, short version) {
return new VoteResponse(new VoteResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/WriteTxnMarkersRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.WriteTxnMarkersRequestData;
import org.apache.kafka.common.message.WriteTxnMarkersRequestData.WritableTxnMarker;
import org.apache.kafka.common.message.WriteTxnMarkersRequestData.WritableTxnMarkerTopic;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
public class WriteTxnMarkersRequest extends AbstractRequest {
public static class TxnMarkerEntry {
private final long producerId;
private final short producerEpoch;
private final int coordinatorEpoch;
private final TransactionResult result;
private final List<TopicPartition> partitions;
public TxnMarkerEntry(long producerId,
short producerEpoch,
int coordinatorEpoch,
TransactionResult result,
List<TopicPartition> partitions) {
this.producerId = producerId;
this.producerEpoch = producerEpoch;
this.coordinatorEpoch = coordinatorEpoch;
this.result = result;
this.partitions = partitions;
}
public long producerId() {
return producerId;
}
public short producerEpoch() {
return producerEpoch;
}
public int coordinatorEpoch() {
return coordinatorEpoch;
}
public TransactionResult transactionResult() {
return result;
}
public List<TopicPartition> partitions() {
return partitions;
}
@Override
public String toString() {
return "TxnMarkerEntry{" +
"producerId=" + producerId +
", producerEpoch=" + producerEpoch +
", coordinatorEpoch=" + coordinatorEpoch +
", result=" + result +
", partitions=" + partitions +
'}';
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final TxnMarkerEntry that = (TxnMarkerEntry) o;
return producerId == that.producerId &&
producerEpoch == that.producerEpoch &&
coordinatorEpoch == that.coordinatorEpoch &&
result == that.result &&
Objects.equals(partitions, that.partitions);
}
@Override
public int hashCode() {
return Objects.hash(producerId, producerEpoch, coordinatorEpoch, result, partitions);
}
}
public static class Builder extends AbstractRequest.Builder<WriteTxnMarkersRequest> {
public final WriteTxnMarkersRequestData data;
public Builder(WriteTxnMarkersRequestData data) {
super(ApiKeys.WRITE_TXN_MARKERS);
this.data = data;
}
public Builder(short version, final List<TxnMarkerEntry> markers) {
super(ApiKeys.WRITE_TXN_MARKERS, version);
List<WritableTxnMarker> dataMarkers = new ArrayList<>();
for (TxnMarkerEntry marker : markers) {
final Map<String, WritableTxnMarkerTopic> topicMap = new HashMap<>();
for (TopicPartition topicPartition : marker.partitions) {
WritableTxnMarkerTopic topic = topicMap.getOrDefault(topicPartition.topic(),
new WritableTxnMarkerTopic()
.setName(topicPartition.topic()));
topic.partitionIndexes().add(topicPartition.partition());
topicMap.put(topicPartition.topic(), topic);
}
dataMarkers.add(new WritableTxnMarker()
.setProducerId(marker.producerId)
.setProducerEpoch(marker.producerEpoch)
.setCoordinatorEpoch(marker.coordinatorEpoch)
.setTransactionResult(marker.transactionResult().id)
.setTopics(new ArrayList<>(topicMap.values())));
}
this.data = new WriteTxnMarkersRequestData().setMarkers(dataMarkers);
}
@Override
public WriteTxnMarkersRequest build(short version) {
return new WriteTxnMarkersRequest(data, version);
}
}
private final WriteTxnMarkersRequestData data;
private WriteTxnMarkersRequest(WriteTxnMarkersRequestData data, short version) {
super(ApiKeys.WRITE_TXN_MARKERS, version);
this.data = data;
}
@Override
public WriteTxnMarkersRequestData data() {
return data;
}
@Override
public WriteTxnMarkersResponse getErrorResponse(int throttleTimeMs, Throwable e) {
Errors error = Errors.forException(e);
final Map<Long, Map<TopicPartition, Errors>> errors = new HashMap<>(data.markers().size());
for (WritableTxnMarker markerEntry : data.markers()) {
Map<TopicPartition, Errors> errorsPerPartition = new HashMap<>();
for (WritableTxnMarkerTopic topic : markerEntry.topics()) {
for (Integer partitionIdx : topic.partitionIndexes()) {
errorsPerPartition.put(new TopicPartition(topic.name(), partitionIdx), error);
}
}
errors.put(markerEntry.producerId(), errorsPerPartition);
}
return new WriteTxnMarkersResponse(errors);
}
public List<TxnMarkerEntry> markers() {
List<TxnMarkerEntry> markers = new ArrayList<>();
for (WritableTxnMarker markerEntry : data.markers()) {
List<TopicPartition> topicPartitions = new ArrayList<>();
for (WritableTxnMarkerTopic topic : markerEntry.topics()) {
for (Integer partitionIdx : topic.partitionIndexes()) {
topicPartitions.add(new TopicPartition(topic.name(), partitionIdx));
}
}
markers.add(new TxnMarkerEntry(
markerEntry.producerId(),
markerEntry.producerEpoch(),
markerEntry.coordinatorEpoch(),
TransactionResult.forId(markerEntry.transactionResult()),
topicPartitions)
);
}
return markers;
}
public static WriteTxnMarkersRequest parse(ByteBuffer buffer, short version) {
return new WriteTxnMarkersRequest(new WriteTxnMarkersRequestData(new ByteBufferAccessor(buffer), version), version);
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final WriteTxnMarkersRequest that = (WriteTxnMarkersRequest) o;
return Objects.equals(this.data, that.data);
}
@Override
public int hashCode() {
return Objects.hash(this.data);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/WriteTxnMarkersResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.requests;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.message.WriteTxnMarkersResponseData;
import org.apache.kafka.common.message.WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult;
import org.apache.kafka.common.message.WriteTxnMarkersResponseData.WritableTxnMarkerResult;
import org.apache.kafka.common.message.WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.Errors;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Possible error codes:
*
* - {@link Errors#CORRUPT_MESSAGE}
* - {@link Errors#INVALID_PRODUCER_EPOCH}
* - {@link Errors#UNKNOWN_TOPIC_OR_PARTITION}
* - {@link Errors#NOT_LEADER_OR_FOLLOWER}
* - {@link Errors#MESSAGE_TOO_LARGE}
* - {@link Errors#RECORD_LIST_TOO_LARGE}
* - {@link Errors#NOT_ENOUGH_REPLICAS}
* - {@link Errors#NOT_ENOUGH_REPLICAS_AFTER_APPEND}
* - {@link Errors#INVALID_REQUIRED_ACKS}
* - {@link Errors#TRANSACTION_COORDINATOR_FENCED}
* - {@link Errors#REQUEST_TIMED_OUT}
* - {@link Errors#CLUSTER_AUTHORIZATION_FAILED}
*/
public class WriteTxnMarkersResponse extends AbstractResponse {
private final WriteTxnMarkersResponseData data;
public WriteTxnMarkersResponse(Map<Long, Map<TopicPartition, Errors>> errors) {
super(ApiKeys.WRITE_TXN_MARKERS);
List<WritableTxnMarkerResult> markers = new ArrayList<>();
for (Map.Entry<Long, Map<TopicPartition, Errors>> markerEntry : errors.entrySet()) {
Map<String, WritableTxnMarkerTopicResult> responseTopicDataMap = new HashMap<>();
for (Map.Entry<TopicPartition, Errors> topicEntry : markerEntry.getValue().entrySet()) {
TopicPartition topicPartition = topicEntry.getKey();
String topicName = topicPartition.topic();
WritableTxnMarkerTopicResult topic =
responseTopicDataMap.getOrDefault(topicName, new WritableTxnMarkerTopicResult().setName(topicName));
topic.partitions().add(new WritableTxnMarkerPartitionResult()
.setErrorCode(topicEntry.getValue().code())
.setPartitionIndex(topicPartition.partition())
);
responseTopicDataMap.put(topicName, topic);
}
markers.add(new WritableTxnMarkerResult()
.setProducerId(markerEntry.getKey())
.setTopics(new ArrayList<>(responseTopicDataMap.values()))
);
}
this.data = new WriteTxnMarkersResponseData()
.setMarkers(markers);
}
public WriteTxnMarkersResponse(WriteTxnMarkersResponseData data) {
super(ApiKeys.WRITE_TXN_MARKERS);
this.data = data;
}
@Override
public WriteTxnMarkersResponseData data() {
return data;
}
public Map<Long, Map<TopicPartition, Errors>> errorsByProducerId() {
Map<Long, Map<TopicPartition, Errors>> errors = new HashMap<>();
for (WritableTxnMarkerResult marker : data.markers()) {
Map<TopicPartition, Errors> topicPartitionErrorsMap = new HashMap<>();
for (WritableTxnMarkerTopicResult topic : marker.topics()) {
for (WritableTxnMarkerPartitionResult partitionResult : topic.partitions()) {
topicPartitionErrorsMap.put(new TopicPartition(topic.name(), partitionResult.partitionIndex()),
Errors.forCode(partitionResult.errorCode()));
}
}
errors.put(marker.producerId(), topicPartitionErrorsMap);
}
return errors;
}
@Override
public int throttleTimeMs() {
return DEFAULT_THROTTLE_TIME;
}
@Override
public void maybeSetThrottleTimeMs(int throttleTimeMs) {
// Not supported by the response schema
}
@Override
public Map<Errors, Integer> errorCounts() {
Map<Errors, Integer> errorCounts = new HashMap<>();
for (WritableTxnMarkerResult marker : data.markers()) {
for (WritableTxnMarkerTopicResult topic : marker.topics()) {
for (WritableTxnMarkerPartitionResult partitionResult : topic.partitions())
updateErrorCounts(errorCounts, Errors.forCode(partitionResult.errorCode()));
}
}
return errorCounts;
}
public static WriteTxnMarkersResponse parse(ByteBuffer buffer, short version) {
return new WriteTxnMarkersResponse(new WriteTxnMarkersResponseData(new ByteBufferAccessor(buffer), version));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/requests/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provide Request and Response class representations for RPCs executed via the Kafka Protocol.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.requests; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/resource/PatternType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.resource;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* Resource pattern type.
*/
@InterfaceStability.Evolving
public enum PatternType {
/**
* Represents any PatternType which this client cannot understand, perhaps because this client is too old.
*/
UNKNOWN((byte) 0),
/**
* In a filter, matches any resource pattern type.
*/
ANY((byte) 1),
/**
* In a filter, will perform pattern matching.
*
* e.g. Given a filter of {@code ResourcePatternFilter(TOPIC, "payments.received", MATCH)`}, the filter match
* any {@link ResourcePattern} that matches topic 'payments.received'. This might include:
* <ul>
* <li>A Literal pattern with the same type and name, e.g. {@code ResourcePattern(TOPIC, "payments.received", LITERAL)}</li>
* <li>A Wildcard pattern with the same type, e.g. {@code ResourcePattern(TOPIC, "*", LITERAL)}</li>
* <li>A Prefixed pattern with the same type and where the name is a matching prefix, e.g. {@code ResourcePattern(TOPIC, "payments.", PREFIXED)}</li>
* </ul>
*/
MATCH((byte) 2),
/**
* A literal resource name.
*
* A literal name defines the full name of a resource, e.g. topic with name 'foo', or group with name 'bob'.
*
* The special wildcard character {@code *} can be used to represent a resource with any name.
*/
LITERAL((byte) 3),
/**
* A prefixed resource name.
*
* A prefixed name defines a prefix for a resource, e.g. topics with names that start with 'foo'.
*/
PREFIXED((byte) 4);
private final static Map<Byte, PatternType> CODE_TO_VALUE =
Collections.unmodifiableMap(
Arrays.stream(PatternType.values())
.collect(Collectors.toMap(PatternType::code, Function.identity()))
);
private final static Map<String, PatternType> NAME_TO_VALUE =
Collections.unmodifiableMap(
Arrays.stream(PatternType.values())
.collect(Collectors.toMap(PatternType::name, Function.identity()))
);
private final byte code;
PatternType(byte code) {
this.code = code;
}
/**
* @return the code of this resource.
*/
public byte code() {
return code;
}
/**
* @return whether this resource pattern type is UNKNOWN.
*/
public boolean isUnknown() {
return this == UNKNOWN;
}
/**
* @return whether this resource pattern type is a concrete type, rather than UNKNOWN or one of the filter types.
*/
public boolean isSpecific() {
return this != UNKNOWN && this != ANY && this != MATCH;
}
/**
* Return the PatternType with the provided code or {@link #UNKNOWN} if one cannot be found.
*/
public static PatternType fromCode(byte code) {
return CODE_TO_VALUE.getOrDefault(code, UNKNOWN);
}
/**
* Return the PatternType with the provided name or {@link #UNKNOWN} if one cannot be found.
*/
public static PatternType fromString(String name) {
return NAME_TO_VALUE.getOrDefault(name, UNKNOWN);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/resource/Resource.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.resource;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Objects;
/**
* Represents a cluster resource with a tuple of (type, name).
*
* The API for this class is still evolving and we may break compatibility in minor releases, if necessary.
*/
@InterfaceStability.Evolving
public class Resource {
private final ResourceType resourceType;
private final String name;
/**
* The name of the CLUSTER resource.
*/
public final static String CLUSTER_NAME = "kafka-cluster";
/**
* A resource representing the whole cluster.
*/
public final static Resource CLUSTER = new Resource(ResourceType.CLUSTER, CLUSTER_NAME);
/**
* Create an instance of this class with the provided parameters.
*
* @param resourceType non-null resource type
* @param name non-null resource name
*/
public Resource(ResourceType resourceType, String name) {
Objects.requireNonNull(resourceType);
this.resourceType = resourceType;
Objects.requireNonNull(name);
this.name = name;
}
/**
* Return the resource type.
*/
public ResourceType resourceType() {
return resourceType;
}
/**
* Return the resource name.
*/
public String name() {
return name;
}
@Override
public String toString() {
return "(resourceType=" + resourceType + ", name=" + ((name == null) ? "<any>" : name) + ")";
}
/**
* Return true if this Resource has any UNKNOWN components.
*/
public boolean isUnknown() {
return resourceType.isUnknown();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof Resource))
return false;
Resource other = (Resource) o;
return resourceType.equals(other.resourceType) && Objects.equals(name, other.name);
}
@Override
public int hashCode() {
return Objects.hash(resourceType, name);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/resource/ResourcePattern.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.resource;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Objects;
/**
* Represents a pattern that is used by ACLs to match zero or more
* {@link org.apache.kafka.common.resource.Resource Resources}.
*
* The API for this class is still evolving and we may break compatibility in minor releases, if necessary.
*/
@InterfaceStability.Evolving
public class ResourcePattern {
/**
* A special literal resource name that corresponds to 'all resources of a certain type'.
*/
public static final String WILDCARD_RESOURCE = "*";
private final ResourceType resourceType;
private final String name;
private final PatternType patternType;
/**
* Create a pattern using the supplied parameters.
*
* @param resourceType non-null, specific, resource type
* @param name non-null resource name, which can be the {@link #WILDCARD_RESOURCE}.
* @param patternType non-null, specific, resource pattern type, which controls how the pattern will match resource names.
*/
public ResourcePattern(ResourceType resourceType, String name, PatternType patternType) {
this.resourceType = Objects.requireNonNull(resourceType, "resourceType");
this.name = Objects.requireNonNull(name, "name");
this.patternType = Objects.requireNonNull(patternType, "patternType");
if (resourceType == ResourceType.ANY) {
throw new IllegalArgumentException("resourceType must not be ANY");
}
if (patternType == PatternType.MATCH || patternType == PatternType.ANY) {
throw new IllegalArgumentException("patternType must not be " + patternType);
}
}
/**
* @return the specific resource type this pattern matches
*/
public ResourceType resourceType() {
return resourceType;
}
/**
* @return the resource name.
*/
public String name() {
return name;
}
/**
* @return the resource pattern type.
*/
public PatternType patternType() {
return patternType;
}
/**
* @return a filter which matches only this pattern.
*/
public ResourcePatternFilter toFilter() {
return new ResourcePatternFilter(resourceType, name, patternType);
}
@Override
public String toString() {
return "ResourcePattern(resourceType=" + resourceType + ", name=" + name + ", patternType=" + patternType + ")";
}
/**
* @return {@code true} if this Resource has any UNKNOWN components.
*/
public boolean isUnknown() {
return resourceType.isUnknown() || patternType.isUnknown();
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
final ResourcePattern resource = (ResourcePattern) o;
return resourceType == resource.resourceType &&
Objects.equals(name, resource.name) &&
patternType == resource.patternType;
}
@Override
public int hashCode() {
return Objects.hash(resourceType, name, patternType);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/resource/ResourcePatternFilter.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.resource;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Objects;
import static org.apache.kafka.common.resource.ResourcePattern.WILDCARD_RESOURCE;
/**
* Represents a filter that can match {@link ResourcePattern}.
* <p>
* The API for this class is still evolving and we may break compatibility in minor releases, if necessary.
*/
@InterfaceStability.Evolving
public class ResourcePatternFilter {
/**
* Matches any resource pattern.
*/
public static final ResourcePatternFilter ANY = new ResourcePatternFilter(ResourceType.ANY, null, PatternType.ANY);
private final ResourceType resourceType;
private final String name;
private final PatternType patternType;
/**
* Create a filter using the supplied parameters.
*
* @param resourceType non-null resource type.
* If {@link ResourceType#ANY}, the filter will ignore the resource type of the pattern.
* If any other resource type, the filter will match only patterns with the same type.
* @param name resource name or {@code null}.
* If {@code null}, the filter will ignore the name of resources.
* If {@link ResourcePattern#WILDCARD_RESOURCE}, will match only wildcard patterns.
* @param patternType non-null resource pattern type.
* If {@link PatternType#ANY}, the filter will match patterns regardless of pattern type.
* If {@link PatternType#MATCH}, the filter will match patterns that would match the supplied
* {@code name}, including a matching prefixed and wildcards patterns.
* If any other resource pattern type, the filter will match only patterns with the same type.
*/
public ResourcePatternFilter(ResourceType resourceType, String name, PatternType patternType) {
this.resourceType = Objects.requireNonNull(resourceType, "resourceType");
this.name = name;
this.patternType = Objects.requireNonNull(patternType, "patternType");
}
/**
* @return {@code true} if this filter has any UNKNOWN components.
*/
public boolean isUnknown() {
return resourceType.isUnknown() || patternType.isUnknown();
}
/**
* @return the specific resource type this pattern matches
*/
public ResourceType resourceType() {
return resourceType;
}
/**
* @return the resource name.
*/
public String name() {
return name;
}
/**
* @return the resource pattern type.
*/
public PatternType patternType() {
return patternType;
}
/**
* @return {@code true} if this filter matches the given pattern.
*/
public boolean matches(ResourcePattern pattern) {
if (!resourceType.equals(ResourceType.ANY) && !resourceType.equals(pattern.resourceType())) {
return false;
}
if (!patternType.equals(PatternType.ANY) && !patternType.equals(PatternType.MATCH) && !patternType.equals(pattern.patternType())) {
return false;
}
if (name == null) {
return true;
}
if (patternType.equals(PatternType.ANY) || patternType.equals(pattern.patternType())) {
return name.equals(pattern.name());
}
switch (pattern.patternType()) {
case LITERAL:
return name.equals(pattern.name()) || pattern.name().equals(WILDCARD_RESOURCE);
case PREFIXED:
return name.startsWith(pattern.name());
default:
throw new IllegalArgumentException("Unsupported PatternType: " + pattern.patternType());
}
}
/**
* @return {@code true} if this filter could only match one pattern.
* In other words, if there are no ANY or UNKNOWN fields.
*/
public boolean matchesAtMostOne() {
return findIndefiniteField() == null;
}
/**
* @return a string describing any ANY or UNKNOWN field, or null if there is no such field.
*/
public String findIndefiniteField() {
if (resourceType == ResourceType.ANY)
return "Resource type is ANY.";
if (resourceType == ResourceType.UNKNOWN)
return "Resource type is UNKNOWN.";
if (name == null)
return "Resource name is NULL.";
if (patternType == PatternType.MATCH)
return "Resource pattern type is MATCH.";
if (patternType == PatternType.UNKNOWN)
return "Resource pattern type is UNKNOWN.";
return null;
}
@Override
public String toString() {
return "ResourcePattern(resourceType=" + resourceType + ", name=" + ((name == null) ? "<any>" : name) + ", patternType=" + patternType + ")";
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
final ResourcePatternFilter resource = (ResourcePatternFilter) o;
return resourceType == resource.resourceType &&
Objects.equals(name, resource.name) &&
patternType == resource.patternType;
}
@Override
public int hashCode() {
return Objects.hash(resourceType, name, patternType);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/resource/ResourceType.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.resource;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.HashMap;
import java.util.Locale;
/**
* Represents a type of resource which an ACL can be applied to.
*
* The API for this class is still evolving and we may break compatibility in minor releases, if necessary.
*/
@InterfaceStability.Evolving
public enum ResourceType {
/**
* Represents any ResourceType which this client cannot understand,
* perhaps because this client is too old.
*/
UNKNOWN((byte) 0),
/**
* In a filter, matches any ResourceType.
*/
ANY((byte) 1),
/**
* A Kafka topic.
*/
TOPIC((byte) 2),
/**
* A consumer group.
*/
GROUP((byte) 3),
/**
* The cluster as a whole.
*/
CLUSTER((byte) 4),
/**
* A transactional ID.
*/
TRANSACTIONAL_ID((byte) 5),
/**
* A token ID.
*/
DELEGATION_TOKEN((byte) 6),
/**
* A user principal
*/
USER((byte) 7);
private final static HashMap<Byte, ResourceType> CODE_TO_VALUE = new HashMap<>();
static {
for (ResourceType resourceType : ResourceType.values()) {
CODE_TO_VALUE.put(resourceType.code, resourceType);
}
}
/**
* Parse the given string as an ACL resource type.
*
* @param str The string to parse.
*
* @return The ResourceType, or UNKNOWN if the string could not be matched.
*/
public static ResourceType fromString(String str) throws IllegalArgumentException {
try {
return ResourceType.valueOf(str.toUpperCase(Locale.ROOT));
} catch (IllegalArgumentException e) {
return UNKNOWN;
}
}
/**
* Return the ResourceType with the provided code or `ResourceType.UNKNOWN` if one cannot be found.
*/
public static ResourceType fromCode(byte code) {
ResourceType resourceType = CODE_TO_VALUE.get(code);
if (resourceType == null) {
return UNKNOWN;
}
return resourceType;
}
private final byte code;
ResourceType(byte code) {
this.code = code;
}
/**
* Return the code of this resource.
*/
public byte code() {
return code;
}
/**
* Return whether this resource type is UNKNOWN.
*/
public boolean isUnknown() {
return this == UNKNOWN;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/resource/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides client handles representing logical resources in a Kafka cluster.
*/
package org.apache.kafka.common.resource; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/JaasConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security;
import java.io.IOException;
import java.io.StreamTokenizer;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.config.SaslConfigs;
/**
* JAAS configuration parser that constructs a JAAS configuration object with a single
* login context from the Kafka configuration option {@link SaslConfigs#SASL_JAAS_CONFIG}.
* <p/>
* JAAS configuration file format is described <a href="http://docs.oracle.com/javase/8/docs/technotes/guides/security/jgss/tutorials/LoginConfigFile.html">here</a>.
* The format of the property value is:
* <pre>
* {@code
* <loginModuleClass> <controlFlag> (<optionName>=<optionValue>)*;
* }
* </pre>
*/
class JaasConfig extends Configuration {
private final String loginContextName;
private final List<AppConfigurationEntry> configEntries;
public JaasConfig(String loginContextName, String jaasConfigParams) {
StreamTokenizer tokenizer = new StreamTokenizer(new StringReader(jaasConfigParams));
tokenizer.slashSlashComments(true);
tokenizer.slashStarComments(true);
tokenizer.wordChars('-', '-');
tokenizer.wordChars('_', '_');
tokenizer.wordChars('$', '$');
try {
configEntries = new ArrayList<>();
while (tokenizer.nextToken() != StreamTokenizer.TT_EOF) {
configEntries.add(parseAppConfigurationEntry(tokenizer));
}
if (configEntries.isEmpty())
throw new IllegalArgumentException("Login module not specified in JAAS config");
this.loginContextName = loginContextName;
} catch (IOException e) {
throw new KafkaException("Unexpected exception while parsing JAAS config");
}
}
@Override
public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
if (this.loginContextName.equals(name))
return configEntries.toArray(new AppConfigurationEntry[0]);
else
return null;
}
private LoginModuleControlFlag loginModuleControlFlag(String flag) {
if (flag == null)
throw new IllegalArgumentException("Login module control flag is not available in the JAAS config");
LoginModuleControlFlag controlFlag;
switch (flag.toUpperCase(Locale.ROOT)) {
case "REQUIRED":
controlFlag = LoginModuleControlFlag.REQUIRED;
break;
case "REQUISITE":
controlFlag = LoginModuleControlFlag.REQUISITE;
break;
case "SUFFICIENT":
controlFlag = LoginModuleControlFlag.SUFFICIENT;
break;
case "OPTIONAL":
controlFlag = LoginModuleControlFlag.OPTIONAL;
break;
default:
throw new IllegalArgumentException("Invalid login module control flag '" + flag + "' in JAAS config");
}
return controlFlag;
}
private AppConfigurationEntry parseAppConfigurationEntry(StreamTokenizer tokenizer) throws IOException {
String loginModule = tokenizer.sval;
if (tokenizer.nextToken() == StreamTokenizer.TT_EOF)
throw new IllegalArgumentException("Login module control flag not specified in JAAS config");
LoginModuleControlFlag controlFlag = loginModuleControlFlag(tokenizer.sval);
Map<String, String> options = new HashMap<>();
while (tokenizer.nextToken() != StreamTokenizer.TT_EOF && tokenizer.ttype != ';') {
String key = tokenizer.sval;
if (tokenizer.nextToken() != '=' || tokenizer.nextToken() == StreamTokenizer.TT_EOF || tokenizer.sval == null)
throw new IllegalArgumentException("Value not specified for key '" + key + "' in JAAS config");
String value = tokenizer.sval;
options.put(key, value);
}
if (tokenizer.ttype != ';')
throw new IllegalArgumentException("JAAS config entry not terminated by semi-colon");
return new AppConfigurationEntry(loginModule, controlFlag, options);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/JaasContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.config.types.Password;
import org.apache.kafka.common.network.ListenerName;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import static org.apache.kafka.common.security.JaasUtils.DISALLOWED_LOGIN_MODULES_CONFIG;
import static org.apache.kafka.common.security.JaasUtils.DISALLOWED_LOGIN_MODULES_DEFAULT;
public class JaasContext {
private static final Logger LOG = LoggerFactory.getLogger(JaasContext.class);
private static final String GLOBAL_CONTEXT_NAME_SERVER = "KafkaServer";
private static final String GLOBAL_CONTEXT_NAME_CLIENT = "KafkaClient";
/**
* Returns an instance of this class.
*
* The context will contain the configuration specified by the JAAS configuration property
* {@link SaslConfigs#SASL_JAAS_CONFIG} with prefix `listener.name.{listenerName}.{mechanism}.`
* with listenerName and mechanism in lower case. The context `KafkaServer` will be returned
* with a single login context entry loaded from the property.
* <p>
* If the property is not defined, the context will contain the default Configuration and
* the context name will be one of:
* <ol>
* <li>Lowercased listener name followed by a period and the string `KafkaServer`</li>
* <li>The string `KafkaServer`</li>
* </ol>
* If both are valid entries in the default JAAS configuration, the first option is chosen.
* </p>
*
* @throws IllegalArgumentException if listenerName or mechanism is not defined.
*/
public static JaasContext loadServerContext(ListenerName listenerName, String mechanism, Map<String, ?> configs) {
if (listenerName == null)
throw new IllegalArgumentException("listenerName should not be null for SERVER");
if (mechanism == null)
throw new IllegalArgumentException("mechanism should not be null for SERVER");
String listenerContextName = listenerName.value().toLowerCase(Locale.ROOT) + "." + GLOBAL_CONTEXT_NAME_SERVER;
Password dynamicJaasConfig = (Password) configs.get(mechanism.toLowerCase(Locale.ROOT) + "." + SaslConfigs.SASL_JAAS_CONFIG);
if (dynamicJaasConfig == null && configs.get(SaslConfigs.SASL_JAAS_CONFIG) != null)
LOG.warn("Server config {} should be prefixed with SASL mechanism name, ignoring config", SaslConfigs.SASL_JAAS_CONFIG);
return load(Type.SERVER, listenerContextName, GLOBAL_CONTEXT_NAME_SERVER, dynamicJaasConfig);
}
/**
* Returns an instance of this class.
*
* If JAAS configuration property @link SaslConfigs#SASL_JAAS_CONFIG} is specified,
* the configuration object is created by parsing the property value. Otherwise, the default Configuration
* is returned. The context name is always `KafkaClient`.
*
*/
public static JaasContext loadClientContext(Map<String, ?> configs) {
Password dynamicJaasConfig = (Password) configs.get(SaslConfigs.SASL_JAAS_CONFIG);
return load(JaasContext.Type.CLIENT, null, GLOBAL_CONTEXT_NAME_CLIENT, dynamicJaasConfig);
}
static JaasContext load(JaasContext.Type contextType, String listenerContextName,
String globalContextName, Password dynamicJaasConfig) {
if (dynamicJaasConfig != null) {
JaasConfig jaasConfig = new JaasConfig(globalContextName, dynamicJaasConfig.value());
AppConfigurationEntry[] contextModules = jaasConfig.getAppConfigurationEntry(globalContextName);
if (contextModules == null || contextModules.length == 0)
throw new IllegalArgumentException("JAAS config property does not contain any login modules");
else if (contextModules.length != 1)
throw new IllegalArgumentException("JAAS config property contains " + contextModules.length + " login modules, should be 1 module");
throwIfLoginModuleIsNotAllowed(contextModules[0]);
return new JaasContext(globalContextName, contextType, jaasConfig, dynamicJaasConfig);
} else
return defaultContext(contextType, listenerContextName, globalContextName);
}
private static void throwIfLoginModuleIsNotAllowed(AppConfigurationEntry appConfigurationEntry) {
Set<String> disallowedLoginModuleList = Arrays.stream(
System.getProperty(DISALLOWED_LOGIN_MODULES_CONFIG, DISALLOWED_LOGIN_MODULES_DEFAULT).split(","))
.map(String::trim)
.collect(Collectors.toSet());
String loginModuleName = appConfigurationEntry.getLoginModuleName().trim();
if (disallowedLoginModuleList.contains(loginModuleName)) {
throw new IllegalArgumentException(loginModuleName + " is not allowed. Update System property '"
+ DISALLOWED_LOGIN_MODULES_CONFIG + "' to allow " + loginModuleName);
}
}
private static JaasContext defaultContext(JaasContext.Type contextType, String listenerContextName,
String globalContextName) {
String jaasConfigFile = System.getProperty(JaasUtils.JAVA_LOGIN_CONFIG_PARAM);
if (jaasConfigFile == null) {
if (contextType == Type.CLIENT) {
LOG.debug("System property '" + JaasUtils.JAVA_LOGIN_CONFIG_PARAM + "' and Kafka SASL property '" +
SaslConfigs.SASL_JAAS_CONFIG + "' are not set, using default JAAS configuration.");
} else {
LOG.debug("System property '" + JaasUtils.JAVA_LOGIN_CONFIG_PARAM + "' is not set, using default JAAS " +
"configuration.");
}
}
Configuration jaasConfig = Configuration.getConfiguration();
AppConfigurationEntry[] configEntries = null;
String contextName = globalContextName;
if (listenerContextName != null) {
configEntries = jaasConfig.getAppConfigurationEntry(listenerContextName);
if (configEntries != null)
contextName = listenerContextName;
}
if (configEntries == null)
configEntries = jaasConfig.getAppConfigurationEntry(globalContextName);
if (configEntries == null) {
String listenerNameText = listenerContextName == null ? "" : " or '" + listenerContextName + "'";
String errorMessage = "Could not find a '" + globalContextName + "'" + listenerNameText + " entry in the JAAS " +
"configuration. System property '" + JaasUtils.JAVA_LOGIN_CONFIG_PARAM + "' is " +
(jaasConfigFile == null ? "not set" : jaasConfigFile);
throw new IllegalArgumentException(errorMessage);
}
for (AppConfigurationEntry appConfigurationEntry : configEntries) {
throwIfLoginModuleIsNotAllowed(appConfigurationEntry);
}
return new JaasContext(contextName, contextType, jaasConfig, null);
}
/**
* The type of the SASL login context, it should be SERVER for the broker and CLIENT for the clients (consumer, producer,
* etc.). This is used to validate behaviour (e.g. some functionality is only available in the broker or clients).
*/
public enum Type { CLIENT, SERVER }
private final String name;
private final Type type;
private final Configuration configuration;
private final List<AppConfigurationEntry> configurationEntries;
private final Password dynamicJaasConfig;
public JaasContext(String name, Type type, Configuration configuration, Password dynamicJaasConfig) {
this.name = name;
this.type = type;
this.configuration = configuration;
AppConfigurationEntry[] entries = configuration.getAppConfigurationEntry(name);
if (entries == null)
throw new IllegalArgumentException("Could not find a '" + name + "' entry in this JAAS configuration.");
this.configurationEntries = Collections.unmodifiableList(new ArrayList<>(Arrays.asList(entries)));
this.dynamicJaasConfig = dynamicJaasConfig;
}
public String name() {
return name;
}
public Type type() {
return type;
}
public Configuration configuration() {
return configuration;
}
public List<AppConfigurationEntry> configurationEntries() {
return configurationEntries;
}
public Password dynamicJaasConfig() {
return dynamicJaasConfig;
}
/**
* Returns the configuration option for <code>key</code> from this context.
* If login module name is specified, return option value only from that module.
*/
public static String configEntryOption(List<AppConfigurationEntry> configurationEntries, String key, String loginModuleName) {
for (AppConfigurationEntry entry : configurationEntries) {
if (loginModuleName != null && !loginModuleName.equals(entry.getLoginModuleName()))
continue;
Object val = entry.getOptions().get(key);
if (val != null)
return (String) val;
}
return null;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/JaasUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security;
import org.apache.kafka.common.KafkaException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.login.Configuration;
public final class JaasUtils {
private static final Logger LOG = LoggerFactory.getLogger(JaasUtils.class);
public static final String JAVA_LOGIN_CONFIG_PARAM = "java.security.auth.login.config";
public static final String DISALLOWED_LOGIN_MODULES_CONFIG = "org.apache.kafka.disallowed.login.modules";
public static final String DISALLOWED_LOGIN_MODULES_DEFAULT = "com.sun.security.auth.module.JndiLoginModule";
public static final String SERVICE_NAME = "serviceName";
public static final String ZK_SASL_CLIENT = "zookeeper.sasl.client";
public static final String ZK_LOGIN_CONTEXT_NAME_KEY = "zookeeper.sasl.clientconfig";
private static final String DEFAULT_ZK_LOGIN_CONTEXT_NAME = "Client";
private static final String DEFAULT_ZK_SASL_CLIENT = "true";
private JaasUtils() {}
public static String zkSecuritySysConfigString() {
String loginConfig = System.getProperty(JAVA_LOGIN_CONFIG_PARAM);
String clientEnabled = System.getProperty(ZK_SASL_CLIENT, "default:" + DEFAULT_ZK_SASL_CLIENT);
String contextName = System.getProperty(ZK_LOGIN_CONTEXT_NAME_KEY, "default:" + DEFAULT_ZK_LOGIN_CONTEXT_NAME);
return "[" +
JAVA_LOGIN_CONFIG_PARAM + "=" + loginConfig +
", " +
ZK_SASL_CLIENT + "=" + clientEnabled +
", " +
ZK_LOGIN_CONTEXT_NAME_KEY + "=" + contextName +
"]";
}
public static boolean isZkSaslEnabled() {
// Technically a client must also check if TLS mutual authentication has been configured,
// but we will leave that up to the client code to determine since direct connectivity to ZooKeeper
// has been deprecated in many clients and we don't wish to re-introduce a ZooKeeper jar dependency here.
boolean zkSaslEnabled = Boolean.parseBoolean(System.getProperty(ZK_SASL_CLIENT, DEFAULT_ZK_SASL_CLIENT));
String zkLoginContextName = System.getProperty(ZK_LOGIN_CONTEXT_NAME_KEY, DEFAULT_ZK_LOGIN_CONTEXT_NAME);
LOG.debug("Checking login config for Zookeeper JAAS context {}", zkSecuritySysConfigString());
boolean foundLoginConfigEntry;
try {
Configuration loginConf = Configuration.getConfiguration();
foundLoginConfigEntry = loginConf.getAppConfigurationEntry(zkLoginContextName) != null;
} catch (Exception e) {
throw new KafkaException("Exception while loading Zookeeper JAAS login context " +
zkSecuritySysConfigString(), e);
}
if (foundLoginConfigEntry && !zkSaslEnabled) {
LOG.error("JAAS configuration is present, but system property " +
ZK_SASL_CLIENT + " is set to false, which disables " +
"SASL in the ZooKeeper client");
throw new KafkaException("Exception while determining if ZooKeeper is secure " +
zkSecuritySysConfigString());
}
return foundLoginConfigEntry;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides mechanisms for securing a Kafka cluster and authenticating Kafka clients.
*/
package org.apache.kafka.common.security; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/AuthenticateCallbackHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import java.util.List;
import java.util.Map;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.login.AppConfigurationEntry;
/*
* Callback handler for SASL-based authentication
*/
public interface AuthenticateCallbackHandler extends CallbackHandler {
/**
* Configures this callback handler for the specified SASL mechanism.
*
* @param configs Key-value pairs containing the parsed configuration options of
* the client or broker. Note that these are the Kafka configuration options
* and not the JAAS configuration options. JAAS config options may be obtained
* from `jaasConfigEntries` for callbacks which obtain some configs from the
* JAAS configuration. For configs that may be specified as both Kafka config
* as well as JAAS config (e.g. sasl.kerberos.service.name), the configuration
* is treated as invalid if conflicting values are provided.
* @param saslMechanism Negotiated SASL mechanism. For clients, this is the SASL
* mechanism configured for the client. For brokers, this is the mechanism
* negotiated with the client and is one of the mechanisms enabled on the broker.
* @param jaasConfigEntries JAAS configuration entries from the JAAS login context.
* This list contains a single entry for clients and may contain more than
* one entry for brokers if multiple mechanisms are enabled on a listener using
* static JAAS configuration where there is no mapping between mechanisms and
* login module entries. In this case, callback handlers can use the login module in
* `jaasConfigEntries` to identify the entry corresponding to `saslMechanism`.
* Alternatively, dynamic JAAS configuration option
* {@link org.apache.kafka.common.config.SaslConfigs#SASL_JAAS_CONFIG} may be
* configured on brokers with listener and mechanism prefix, in which case
* only the configuration entry corresponding to `saslMechanism` will be provided
* in `jaasConfigEntries`.
*/
void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries);
/**
* Closes this instance.
*/
void close();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/AuthenticationContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import java.net.InetAddress;
/**
* An object representing contextual information from the authentication session. See
* {@link PlaintextAuthenticationContext}, {@link SaslAuthenticationContext}
* and {@link SslAuthenticationContext}. This class is only used in the broker.
*/
public interface AuthenticationContext {
/**
* Underlying security protocol of the authentication session.
*/
SecurityProtocol securityProtocol();
/**
* Address of the authenticated client
*/
InetAddress clientAddress();
/**
* Name of the listener used for the connection
*/
String listenerName();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/KafkaPrincipal.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import java.security.Principal;
import static java.util.Objects.requireNonNull;
/**
* <p>Principals in Kafka are defined by a type and a name. The principal type will always be <code>"User"</code>
* for the simple authorizer that is enabled by default, but custom authorizers can leverage different
* principal types (such as to enable group or role-based ACLs). The {@link KafkaPrincipalBuilder} interface
* is used when you need to derive a different principal type from the authentication context, or when
* you need to represent relations between different principals. For example, you could extend
* {@link KafkaPrincipal} in order to link a user principal to one or more role principals.
*
* <p>For custom extensions of {@link KafkaPrincipal}, there two key points to keep in mind:
* <ol>
* <li>To be compatible with the ACL APIs provided by Kafka (including the command line tool), each ACL
* can only represent a permission granted to a single principal (consisting of a principal type and name).
* It is possible to use richer ACL semantics, but you must implement your own mechanisms for adding
* and removing ACLs.
* <li>In general, {@link KafkaPrincipal} extensions are only useful when the corresponding Authorizer
* is also aware of the extension. If you have a {@link KafkaPrincipalBuilder} which derives user groups
* from the authentication context (e.g. from an SSL client certificate), then you need a custom
* authorizer which is capable of using the additional group information.
* </ol>
*/
public class KafkaPrincipal implements Principal {
public static final String USER_TYPE = "User";
public final static KafkaPrincipal ANONYMOUS = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "ANONYMOUS");
private final String principalType;
private final String name;
private volatile boolean tokenAuthenticated;
public KafkaPrincipal(String principalType, String name) {
this(principalType, name, false);
}
public KafkaPrincipal(String principalType, String name, boolean tokenAuthenticated) {
this.principalType = requireNonNull(principalType, "Principal type cannot be null");
this.name = requireNonNull(name, "Principal name cannot be null");
this.tokenAuthenticated = tokenAuthenticated;
}
@Override
public String toString() {
return principalType + ":" + name;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null) return false;
if (getClass() != o.getClass()) return false;
KafkaPrincipal that = (KafkaPrincipal) o;
return principalType.equals(that.principalType) && name.equals(that.name);
}
@Override
public int hashCode() {
int result = principalType != null ? principalType.hashCode() : 0;
result = 31 * result + (name != null ? name.hashCode() : 0);
return result;
}
@Override
public String getName() {
return name;
}
public String getPrincipalType() {
return principalType;
}
public void tokenAuthenticated(boolean tokenAuthenticated) {
this.tokenAuthenticated = tokenAuthenticated;
}
public boolean tokenAuthenticated() {
return tokenAuthenticated;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
/**
* Pluggable principal builder interface which supports both SSL authentication through
* {@link SslAuthenticationContext} and SASL through {@link SaslAuthenticationContext}.
*
* Note that the {@link org.apache.kafka.common.Configurable} and {@link java.io.Closeable}
* interfaces are respected if implemented. Additionally, implementations must provide a
* default no-arg constructor.
*/
public interface KafkaPrincipalBuilder {
/**
* Build a kafka principal from the authentication context.
* @param context The authentication context (either {@link SslAuthenticationContext} or
* {@link SaslAuthenticationContext})
* @return The built principal which may provide additional enrichment through a subclass of
* {@link KafkaPrincipalBuilder}.
*/
KafkaPrincipal build(AuthenticationContext context);
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/KafkaPrincipalSerde.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import org.apache.kafka.common.errors.SerializationException;
/**
* Serializer/Deserializer interface for {@link KafkaPrincipal} for the purpose of inter-broker forwarding.
* Any serialization/deserialization failure should raise a {@link SerializationException} to be consistent.
*/
public interface KafkaPrincipalSerde {
/**
* Serialize a {@link KafkaPrincipal} into byte array.
*
* @param principal principal to be serialized
* @return serialized bytes
* @throws SerializationException
*/
byte[] serialize(KafkaPrincipal principal) throws SerializationException;
/**
* Deserialize a {@link KafkaPrincipal} from byte array.
* @param bytes byte array to be deserialized
* @return the deserialized principal
* @throws SerializationException
*/
KafkaPrincipal deserialize(byte[] bytes) throws SerializationException;
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/Login.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import java.util.Map;
import javax.security.auth.Subject;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
/**
* Login interface for authentication.
*/
public interface Login {
/**
* Configures this login instance.
* @param configs Key-value pairs containing the parsed configuration options of
* the client or broker. Note that these are the Kafka configuration options
* and not the JAAS configuration options. The JAAS options may be obtained
* from `jaasConfiguration`.
* @param contextName JAAS context name for this login which may be used to obtain
* the login context from `jaasConfiguration`.
* @param jaasConfiguration JAAS configuration containing the login context named
* `contextName`. If static JAAS configuration is used, this `Configuration`
* may also contain other login contexts.
* @param loginCallbackHandler Login callback handler instance to use for this Login.
* Login callback handler class may be configured using
* {@link org.apache.kafka.common.config.SaslConfigs#SASL_LOGIN_CALLBACK_HANDLER_CLASS}.
*/
void configure(Map<String, ?> configs, String contextName, Configuration jaasConfiguration,
AuthenticateCallbackHandler loginCallbackHandler);
/**
* Performs login for each login module specified for the login context of this instance.
*/
LoginContext login() throws LoginException;
/**
* Returns the authenticated subject of this login context.
*/
Subject subject();
/**
* Returns the service name to be used for SASL.
*/
String serviceName();
/**
* Closes this instance.
*/
void close();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/PlaintextAuthenticationContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import java.net.InetAddress;
public class PlaintextAuthenticationContext implements AuthenticationContext {
private final InetAddress clientAddress;
private final String listenerName;
public PlaintextAuthenticationContext(InetAddress clientAddress, String listenerName) {
this.clientAddress = clientAddress;
this.listenerName = listenerName;
}
@Override
public SecurityProtocol securityProtocol() {
return SecurityProtocol.PLAINTEXT;
}
@Override
public InetAddress clientAddress() {
return clientAddress;
}
@Override
public String listenerName() {
return listenerName;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/SaslAuthenticationContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import javax.net.ssl.SSLSession;
import javax.security.sasl.SaslServer;
import java.net.InetAddress;
import java.util.Optional;
public class SaslAuthenticationContext implements AuthenticationContext {
private final SaslServer server;
private final SecurityProtocol securityProtocol;
private final InetAddress clientAddress;
private final String listenerName;
private final Optional<SSLSession> sslSession;
public SaslAuthenticationContext(SaslServer server, SecurityProtocol securityProtocol, InetAddress clientAddress, String listenerName) {
this(server, securityProtocol, clientAddress, listenerName, Optional.empty());
}
public SaslAuthenticationContext(SaslServer server, SecurityProtocol securityProtocol,
InetAddress clientAddress,
String listenerName,
Optional<SSLSession> sslSession) {
this.server = server;
this.securityProtocol = securityProtocol;
this.clientAddress = clientAddress;
this.listenerName = listenerName;
this.sslSession = sslSession;
}
public SaslServer server() {
return server;
}
/**
* Returns SSL session for the connection if security protocol is SASL_SSL. If SSL
* mutual client authentication is enabled for the listener, peer principal can be
* determined using {@link SSLSession#getPeerPrincipal()}.
*/
public Optional<SSLSession> sslSession() {
return sslSession;
}
@Override
public SecurityProtocol securityProtocol() {
return securityProtocol;
}
@Override
public InetAddress clientAddress() {
return clientAddress;
}
@Override
public String listenerName() {
return listenerName;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/SaslExtensions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import java.util.StringJoiner;
import javax.security.auth.Subject;
/**
* A simple immutable value object class holding customizable SASL extensions.
*
* <p/>
*
* <b>Note on object identity and equality</b>: <code>SaslExtensions</code> <em>intentionally</em>
* overrides the standard {@link #equals(Object)} and {@link #hashCode()} methods calling their
* respective {@link Object#equals(Object)} and {@link Object#hashCode()} implementations. In so
* doing, it provides equality <em>only</em> via reference identity and will not base equality on
* the underlying values of its {@link #extensionsMap extentions map}.
*
* <p/>
*
* The reason for this approach to equality is based off of the manner in which
* credentials are stored in a {@link Subject}. <code>SaslExtensions</code> are added to and
* removed from a {@link Subject} via its {@link Subject#getPublicCredentials() public credentials}.
* The public credentials are stored in a {@link Set} in the {@link Subject}, so object equality
* therefore becomes a concern. With shallow, reference-based equality, distinct
* <code>SaslExtensions</code> instances with the same map values can be considered unique. This is
* critical to operations like token refresh.
*
* See <a href="https://issues.apache.org/jira/browse/KAFKA-14062">KAFKA-14062</a> for more detail.
*/
public class SaslExtensions {
private final Map<String, String> extensionsMap;
public SaslExtensions(Map<String, String> extensionsMap) {
this.extensionsMap = Collections.unmodifiableMap(new HashMap<>(extensionsMap));
}
/**
* Returns an <strong>immutable</strong> map of the extension names and their values
*/
public Map<String, String> map() {
return extensionsMap;
}
/**
* Creates an "empty" instance indicating no SASL extensions. <em>Do not cache the result of
* this method call</em> for use by multiple {@link Subject}s as the references need to be
* unique.
*
* <p/>
*
* See the class-level documentation for details.
* @return Unique, but empty, <code>SaslExtensions</code> instance
*/
@SuppressWarnings("unchecked")
public static SaslExtensions empty() {
// It's ok to re-use the EMPTY_MAP instance as the object equality is on the outer
// SaslExtensions reference.
return new SaslExtensions(Collections.EMPTY_MAP);
}
/**
* Implements equals using the reference comparison implementation from
* {@link Object#equals(Object)}.
*
* <p/>
*
* See the class-level documentation for details.
*
* @param o Other object to compare
* @return True if <code>o == this</code>
*/
@Override
public final boolean equals(Object o) {
return super.equals(o);
}
/**
* Implements <code>hashCode</code> using the native implementation from
* {@link Object#hashCode()}.
*
* <p/>
*
* See the class-level documentation for details.
*
* @return Hash code of instance
*/
@Override
public final int hashCode() {
return super.hashCode();
}
@Override
public String toString() {
return new StringJoiner(", ", SaslExtensions.class.getSimpleName() + "[", "]")
.add("extensionsMap=" + extensionsMap)
.toString();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/SaslExtensionsCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import java.util.Objects;
import javax.security.auth.callback.Callback;
/**
* Optional callback used for SASL mechanisms if any extensions need to be set
* in the SASL exchange.
*/
public class SaslExtensionsCallback implements Callback {
private SaslExtensions extensions = SaslExtensions.empty();
/**
* Returns always non-null {@link SaslExtensions} consisting of the extension
* names and values that are sent by the client to the server in the initial
* client SASL authentication message. The default value is
* {@link SaslExtensions#empty()} so that if this callback is
* unhandled the client will see a non-null value.
*/
public SaslExtensions extensions() {
return extensions;
}
/**
* Sets the SASL extensions on this callback.
*
* @param extensions
* the mandatory extensions to set
*/
public void extensions(SaslExtensions extensions) {
this.extensions = Objects.requireNonNull(extensions, "extensions must not be null");
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/SecurityProtocol.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
public enum SecurityProtocol {
/** Un-authenticated, non-encrypted channel */
PLAINTEXT(0, "PLAINTEXT"),
/** SSL channel */
SSL(1, "SSL"),
/** SASL authenticated, non-encrypted channel */
SASL_PLAINTEXT(2, "SASL_PLAINTEXT"),
/** SASL authenticated, SSL channel */
SASL_SSL(3, "SASL_SSL");
private static final Map<Short, SecurityProtocol> CODE_TO_SECURITY_PROTOCOL;
private static final List<String> NAMES;
static {
SecurityProtocol[] protocols = SecurityProtocol.values();
List<String> names = new ArrayList<>(protocols.length);
Map<Short, SecurityProtocol> codeToSecurityProtocol = new HashMap<>(protocols.length);
for (SecurityProtocol proto : protocols) {
codeToSecurityProtocol.put(proto.id, proto);
names.add(proto.name);
}
CODE_TO_SECURITY_PROTOCOL = Collections.unmodifiableMap(codeToSecurityProtocol);
NAMES = Collections.unmodifiableList(names);
}
/** The permanent and immutable id of a security protocol -- this can't change, and must match kafka.cluster.SecurityProtocol */
public final short id;
/** Name of the security protocol. This may be used by client configuration. */
public final String name;
SecurityProtocol(int id, String name) {
this.id = (short) id;
this.name = name;
}
public static List<String> names() {
return NAMES;
}
public static SecurityProtocol forId(short id) {
return CODE_TO_SECURITY_PROTOCOL.get(id);
}
/** Case insensitive lookup by protocol name */
public static SecurityProtocol forName(String name) {
return SecurityProtocol.valueOf(name.toUpperCase(Locale.ROOT));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/SecurityProviderCreator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import org.apache.kafka.common.Configurable;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.security.Provider;
import java.util.Map;
/**
* An interface for generating security providers.
*/
@InterfaceStability.Evolving
public interface SecurityProviderCreator extends Configurable {
/**
* Configure method is used to configure the generator to create the Security Provider
* @param config configuration parameters for initialising security provider
*/
default void configure(Map<String, ?> config) {
}
/**
* Generate the security provider configured
*/
Provider getProvider();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/SslAuthenticationContext.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import javax.net.ssl.SSLSession;
import java.net.InetAddress;
public class SslAuthenticationContext implements AuthenticationContext {
private final SSLSession session;
private final InetAddress clientAddress;
private final String listenerName;
public SslAuthenticationContext(SSLSession session, InetAddress clientAddress, String listenerName) {
this.session = session;
this.clientAddress = clientAddress;
this.listenerName = listenerName;
}
public SSLSession session() {
return session;
}
@Override
public SecurityProtocol securityProtocol() {
return SecurityProtocol.SSL;
}
@Override
public InetAddress clientAddress() {
return clientAddress;
}
@Override
public String listenerName() {
return listenerName;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/SslEngineFactory.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.auth;
import org.apache.kafka.common.Configurable;
import javax.net.ssl.SSLEngine;
import java.io.Closeable;
import java.security.KeyStore;
import java.util.Map;
import java.util.Set;
/**
* Plugin interface for allowing creation of <code>SSLEngine</code> object in a custom way.
* For example, you can use this to customize loading your key material and trust material needed for <code>SSLContext</code>.
* This is complementary to the existing Java Security Provider mechanism which allows the entire provider
* to be replaced with a custom provider. In scenarios where only the configuration mechanism for SSL engines
* need to be updated, this interface provides a convenient method for overriding the default implementation.
*/
public interface SslEngineFactory extends Configurable, Closeable {
/**
* Creates a new <code>SSLEngine</code> object to be used by the client.
*
* @param peerHost The peer host to use. This is used in client mode if endpoint validation is enabled.
* @param peerPort The peer port to use. This is a hint and not used for validation.
* @param endpointIdentification Endpoint identification algorithm for client mode.
* @return The new <code>SSLEngine</code>.
*/
SSLEngine createClientSslEngine(String peerHost, int peerPort, String endpointIdentification);
/**
* Creates a new <code>SSLEngine</code> object to be used by the server.
*
* @param peerHost The peer host to use. This is a hint and not used for validation.
* @param peerPort The peer port to use. This is a hint and not used for validation.
* @return The new <code>SSLEngine</code>.
*/
SSLEngine createServerSslEngine(String peerHost, int peerPort);
/**
* Returns true if <code>SSLEngine</code> needs to be rebuilt. This method will be called when reconfiguration is triggered on
* the <code>SslFactory</code> used to create SSL engines. Based on the new configs provided in <i>nextConfigs</i>, this method
* will decide whether underlying <code>SSLEngine</code> object needs to be rebuilt. If this method returns true, the
* <code>SslFactory</code> will create a new instance of this object with <i>nextConfigs</i> and run other
* checks before deciding to use the new object for <i>new incoming connection</i> requests. Existing connections
* are not impacted by this and will not see any changes done as part of reconfiguration.
* <p>
* For example, if the implementation depends on file-based key material, it can check if the file was updated
* compared to the previous/last-loaded timestamp and return true.
* </p>
*
* @param nextConfigs The new configuration we want to use.
* @return True only if the underlying <code>SSLEngine</code> object should be rebuilt.
*/
boolean shouldBeRebuilt(Map<String, Object> nextConfigs);
/**
* Returns the names of configs that may be reconfigured.
* @return Names of configuration options that are dynamically reconfigurable.
*/
Set<String> reconfigurableConfigs();
/**
* Returns keystore configured for this factory.
* @return The keystore for this factory or null if a keystore is not configured.
*/
KeyStore keystore();
/**
* Returns truststore configured for this factory.
* @return The truststore for this factory or null if a truststore is not configured.
*/
KeyStore truststore();
} |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/auth/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides pluggable interfaces for implementing Kafka authentication mechanisms.
*/
package org.apache.kafka.common.security.auth; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/AbstractLogin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import javax.security.sasl.RealmCallback;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.Subject;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.auth.Login;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Map;
/**
* Base login class that implements methods common to typical SASL mechanisms.
*/
public abstract class AbstractLogin implements Login {
private static final Logger log = LoggerFactory.getLogger(AbstractLogin.class);
private String contextName;
private Configuration configuration;
private LoginContext loginContext;
private AuthenticateCallbackHandler loginCallbackHandler;
@Override
public void configure(Map<String, ?> configs, String contextName, Configuration configuration,
AuthenticateCallbackHandler loginCallbackHandler) {
this.contextName = contextName;
this.configuration = configuration;
this.loginCallbackHandler = loginCallbackHandler;
}
@Override
public LoginContext login() throws LoginException {
loginContext = new LoginContext(contextName, null, loginCallbackHandler, configuration);
loginContext.login();
log.info("Successfully logged in.");
return loginContext;
}
@Override
public Subject subject() {
return loginContext.getSubject();
}
protected String contextName() {
return contextName;
}
protected Configuration configuration() {
return configuration;
}
/**
* Callback handler for creating login context. Login callback handlers
* should support the callbacks required for the login modules used by
* the KafkaServer and KafkaClient contexts. Kafka does not support
* callback handlers which require additional user input.
*
*/
public static class DefaultLoginCallbackHandler implements AuthenticateCallbackHandler {
@Override
public void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries) {
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
NameCallback nc = (NameCallback) callback;
nc.setName(nc.getDefaultName());
} else if (callback instanceof PasswordCallback) {
String errorMessage = "Could not login: the client is being asked for a password, but the Kafka" +
" client code does not currently support obtaining a password from the user.";
throw new UnsupportedCallbackException(callback, errorMessage);
} else if (callback instanceof RealmCallback) {
RealmCallback rc = (RealmCallback) callback;
rc.setText(rc.getDefaultText());
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL Login callback");
}
}
}
@Override
public void close() {
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/CredentialCache.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
import java.util.concurrent.ConcurrentHashMap;
public class CredentialCache {
private final ConcurrentHashMap<String, Cache<?>> cacheMap = new ConcurrentHashMap<>();
public <C> Cache<C> createCache(String mechanism, Class<C> credentialClass) {
Cache<C> cache = new Cache<>(credentialClass);
@SuppressWarnings("unchecked")
Cache<C> oldCache = (Cache<C>) cacheMap.putIfAbsent(mechanism, cache);
return oldCache == null ? cache : oldCache;
}
@SuppressWarnings("unchecked")
public <C> Cache<C> cache(String mechanism, Class<C> credentialClass) {
Cache<?> cache = cacheMap.get(mechanism);
if (cache != null) {
if (cache.credentialClass() != credentialClass)
throw new IllegalArgumentException("Invalid credential class " + credentialClass + ", expected " + cache.credentialClass());
return (Cache<C>) cache;
} else
return null;
}
public static class Cache<C> {
private final Class<C> credentialClass;
private final ConcurrentHashMap<String, C> credentials;
public Cache(Class<C> credentialClass) {
this.credentialClass = credentialClass;
this.credentials = new ConcurrentHashMap<>();
}
public C get(String username) {
return credentials.get(username);
}
public C put(String username, C credential) {
return credentials.put(username, credential);
}
public C remove(String username) {
return credentials.remove(username);
}
public Class<C> credentialClass() {
return credentialClass;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/DefaultKafkaPrincipalBuilder.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
import javax.security.auth.x500.X500Principal;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.errors.SerializationException;
import org.apache.kafka.common.message.DefaultPrincipalData;
import org.apache.kafka.common.protocol.ByteBufferAccessor;
import org.apache.kafka.common.protocol.MessageUtil;
import org.apache.kafka.common.security.auth.AuthenticationContext;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.auth.KafkaPrincipalBuilder;
import org.apache.kafka.common.security.auth.KafkaPrincipalSerde;
import org.apache.kafka.common.security.auth.PlaintextAuthenticationContext;
import org.apache.kafka.common.security.auth.SaslAuthenticationContext;
import org.apache.kafka.common.security.auth.SslAuthenticationContext;
import org.apache.kafka.common.security.kerberos.KerberosName;
import org.apache.kafka.common.security.kerberos.KerberosShortNamer;
import javax.net.ssl.SSLPeerUnverifiedException;
import javax.net.ssl.SSLSession;
import javax.security.sasl.SaslServer;
import org.apache.kafka.common.security.ssl.SslPrincipalMapper;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.Principal;
/**
* Default implementation of {@link KafkaPrincipalBuilder} which provides basic support for
* SSL authentication and SASL authentication. In the latter case, when GSSAPI is used, this
* class applies {@link org.apache.kafka.common.security.kerberos.KerberosShortNamer} to transform
* the name.
*
* NOTE: This is an internal class and can change without notice.
*/
public class DefaultKafkaPrincipalBuilder implements KafkaPrincipalBuilder, KafkaPrincipalSerde {
private final KerberosShortNamer kerberosShortNamer;
private final SslPrincipalMapper sslPrincipalMapper;
/**
* Construct a new instance.
*
* @param kerberosShortNamer Kerberos name rewrite rules or null if none have been configured
* @param sslPrincipalMapper SSL Principal mapper or null if none have been configured
*/
public DefaultKafkaPrincipalBuilder(KerberosShortNamer kerberosShortNamer, SslPrincipalMapper sslPrincipalMapper) {
this.kerberosShortNamer = kerberosShortNamer;
this.sslPrincipalMapper = sslPrincipalMapper;
}
@Override
public KafkaPrincipal build(AuthenticationContext context) {
if (context instanceof PlaintextAuthenticationContext) {
return KafkaPrincipal.ANONYMOUS;
} else if (context instanceof SslAuthenticationContext) {
SSLSession sslSession = ((SslAuthenticationContext) context).session();
try {
return applySslPrincipalMapper(sslSession.getPeerPrincipal());
} catch (SSLPeerUnverifiedException se) {
return KafkaPrincipal.ANONYMOUS;
}
} else if (context instanceof SaslAuthenticationContext) {
SaslServer saslServer = ((SaslAuthenticationContext) context).server();
if (SaslConfigs.GSSAPI_MECHANISM.equals(saslServer.getMechanismName()))
return applyKerberosShortNamer(saslServer.getAuthorizationID());
else
return new KafkaPrincipal(KafkaPrincipal.USER_TYPE, saslServer.getAuthorizationID());
} else {
throw new IllegalArgumentException("Unhandled authentication context type: " + context.getClass().getName());
}
}
private KafkaPrincipal applyKerberosShortNamer(String authorizationId) {
KerberosName kerberosName = KerberosName.parse(authorizationId);
try {
String shortName = kerberosShortNamer.shortName(kerberosName);
return new KafkaPrincipal(KafkaPrincipal.USER_TYPE, shortName);
} catch (IOException e) {
throw new KafkaException("Failed to set name for '" + kerberosName +
"' based on Kerberos authentication rules.", e);
}
}
private KafkaPrincipal applySslPrincipalMapper(Principal principal) {
try {
if (!(principal instanceof X500Principal) || principal == KafkaPrincipal.ANONYMOUS) {
return new KafkaPrincipal(KafkaPrincipal.USER_TYPE, principal.getName());
} else {
return new KafkaPrincipal(KafkaPrincipal.USER_TYPE, sslPrincipalMapper.getName(principal.getName()));
}
} catch (IOException e) {
throw new KafkaException("Failed to map name for '" + principal.getName() +
"' based on SSL principal mapping rules.", e);
}
}
@Override
public byte[] serialize(KafkaPrincipal principal) {
DefaultPrincipalData data = new DefaultPrincipalData()
.setType(principal.getPrincipalType())
.setName(principal.getName())
.setTokenAuthenticated(principal.tokenAuthenticated());
return MessageUtil.toVersionPrefixedBytes(DefaultPrincipalData.HIGHEST_SUPPORTED_VERSION, data);
}
@Override
public KafkaPrincipal deserialize(byte[] bytes) {
ByteBuffer buffer = ByteBuffer.wrap(bytes);
short version = buffer.getShort();
if (version < DefaultPrincipalData.LOWEST_SUPPORTED_VERSION || version > DefaultPrincipalData.HIGHEST_SUPPORTED_VERSION) {
throw new SerializationException("Invalid principal data version " + version);
}
DefaultPrincipalData data = new DefaultPrincipalData(new ByteBufferAccessor(buffer), version);
return new KafkaPrincipal(data.type(), data.name(), data.tokenAuthenticated());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/DefaultLogin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
public class DefaultLogin extends AbstractLogin {
@Override
public String serviceName() {
return "kafka";
}
@Override
public void close() {
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/LoginManager.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.config.types.Password;
import org.apache.kafka.common.network.ListenerName;
import org.apache.kafka.common.security.JaasContext;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.auth.Login;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule;
import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerUnsecuredLoginCallbackHandler;
import org.apache.kafka.common.utils.SecurityUtils;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.auth.Subject;
import javax.security.auth.login.LoginException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
public class LoginManager {
private static final Logger LOGGER = LoggerFactory.getLogger(LoginManager.class);
// static configs (broker or client)
private static final Map<LoginMetadata<String>, LoginManager> STATIC_INSTANCES = new HashMap<>();
// dynamic configs (broker or client)
private static final Map<LoginMetadata<Password>, LoginManager> DYNAMIC_INSTANCES = new HashMap<>();
private final Login login;
private final LoginMetadata<?> loginMetadata;
private final AuthenticateCallbackHandler loginCallbackHandler;
private int refCount;
private LoginManager(JaasContext jaasContext, String saslMechanism, Map<String, ?> configs,
LoginMetadata<?> loginMetadata) throws LoginException {
this.loginMetadata = loginMetadata;
this.login = Utils.newInstance(loginMetadata.loginClass);
loginCallbackHandler = Utils.newInstance(loginMetadata.loginCallbackClass);
loginCallbackHandler.configure(configs, saslMechanism, jaasContext.configurationEntries());
login.configure(configs, jaasContext.name(), jaasContext.configuration(), loginCallbackHandler);
login.login();
}
/**
* Returns an instance of `LoginManager` and increases its reference count.
*
* `release()` should be invoked when the `LoginManager` is no longer needed. This method will try to reuse an
* existing `LoginManager` for the provided context type. If `jaasContext` was loaded from a dynamic config,
* login managers are reused for the same dynamic config value. For `jaasContext` loaded from static JAAS
* configuration, login managers are reused for static contexts with the same login context name.
*
* This is a bit ugly and it would be nicer if we could pass the `LoginManager` to `ChannelBuilders.create` and
* shut it down when the broker or clients are closed. It's straightforward to do the former, but it's more
* complicated to do the latter without making the consumer API more complex.
*
* @param jaasContext Static or dynamic JAAS context. `jaasContext.dynamicJaasConfig()` is non-null for dynamic context.
* For static contexts, this may contain multiple login modules if the context type is SERVER.
* For CLIENT static contexts and dynamic contexts of CLIENT and SERVER, 'jaasContext` contains
* only one login module.
* @param saslMechanism SASL mechanism for which login manager is being acquired. For dynamic contexts, the single
* login module in `jaasContext` corresponds to this SASL mechanism. Hence `Login` class is
* chosen based on this mechanism.
* @param defaultLoginClass Default login class to use if an override is not specified in `configs`
* @param configs Config options used to configure `Login` if a new login manager is created.
*
*/
public static LoginManager acquireLoginManager(JaasContext jaasContext, String saslMechanism,
Class<? extends Login> defaultLoginClass,
Map<String, ?> configs) throws LoginException {
Class<? extends Login> loginClass = configuredClassOrDefault(configs, jaasContext,
saslMechanism, SaslConfigs.SASL_LOGIN_CLASS, defaultLoginClass);
Class<? extends AuthenticateCallbackHandler> defaultLoginCallbackHandlerClass = OAuthBearerLoginModule.OAUTHBEARER_MECHANISM
.equals(saslMechanism) ? OAuthBearerUnsecuredLoginCallbackHandler.class
: AbstractLogin.DefaultLoginCallbackHandler.class;
Class<? extends AuthenticateCallbackHandler> loginCallbackClass = configuredClassOrDefault(configs, jaasContext,
saslMechanism, SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, defaultLoginCallbackHandlerClass);
synchronized (LoginManager.class) {
LoginManager loginManager;
Password jaasConfigValue = jaasContext.dynamicJaasConfig();
if (jaasConfigValue != null) {
LoginMetadata<Password> loginMetadata = new LoginMetadata<>(jaasConfigValue, loginClass, loginCallbackClass, configs);
loginManager = DYNAMIC_INSTANCES.get(loginMetadata);
if (loginManager == null) {
loginManager = new LoginManager(jaasContext, saslMechanism, configs, loginMetadata);
DYNAMIC_INSTANCES.put(loginMetadata, loginManager);
}
} else {
LoginMetadata<String> loginMetadata = new LoginMetadata<>(jaasContext.name(), loginClass, loginCallbackClass, configs);
loginManager = STATIC_INSTANCES.get(loginMetadata);
if (loginManager == null) {
loginManager = new LoginManager(jaasContext, saslMechanism, configs, loginMetadata);
STATIC_INSTANCES.put(loginMetadata, loginManager);
}
}
SecurityUtils.addConfiguredSecurityProviders(configs);
return loginManager.acquire();
}
}
public Subject subject() {
return login.subject();
}
public String serviceName() {
return login.serviceName();
}
// Only for testing
Object cacheKey() {
return loginMetadata.configInfo;
}
private LoginManager acquire() {
++refCount;
LOGGER.trace("{} acquired", this);
return this;
}
/**
* Decrease the reference count for this instance and release resources if it reaches 0.
*/
public void release() {
synchronized (LoginManager.class) {
if (refCount == 0)
throw new IllegalStateException("release() called on disposed " + this);
else if (refCount == 1) {
if (loginMetadata.configInfo instanceof Password) {
DYNAMIC_INSTANCES.remove(loginMetadata);
} else {
STATIC_INSTANCES.remove(loginMetadata);
}
login.close();
loginCallbackHandler.close();
}
--refCount;
LOGGER.trace("{} released", this);
}
}
@Override
public String toString() {
return "LoginManager(serviceName=" + serviceName() +
// subject.toString() exposes private credentials, so we can't use it
", publicCredentials=" + subject().getPublicCredentials() +
", refCount=" + refCount + ')';
}
/* Should only be used in tests. */
public static void closeAll() {
synchronized (LoginManager.class) {
for (LoginMetadata<String> key : new ArrayList<>(STATIC_INSTANCES.keySet()))
STATIC_INSTANCES.remove(key).login.close();
for (LoginMetadata<Password> key : new ArrayList<>(DYNAMIC_INSTANCES.keySet()))
DYNAMIC_INSTANCES.remove(key).login.close();
}
}
private static <T> Class<? extends T> configuredClassOrDefault(Map<String, ?> configs,
JaasContext jaasContext,
String saslMechanism,
String configName,
Class<? extends T> defaultClass) {
String prefix = jaasContext.type() == JaasContext.Type.SERVER ? ListenerName.saslMechanismPrefix(saslMechanism) : "";
@SuppressWarnings("unchecked")
Class<? extends T> clazz = (Class<? extends T>) configs.get(prefix + configName);
if (clazz != null && jaasContext.configurationEntries().size() != 1) {
String errorMessage = configName + " cannot be specified with multiple login modules in the JAAS context. " +
SaslConfigs.SASL_JAAS_CONFIG + " must be configured to override mechanism-specific configs.";
throw new ConfigException(errorMessage);
}
if (clazz == null)
clazz = defaultClass;
return clazz;
}
private static class LoginMetadata<T> {
final T configInfo;
final Class<? extends Login> loginClass;
final Class<? extends AuthenticateCallbackHandler> loginCallbackClass;
final Map<String, Object> saslConfigs;
LoginMetadata(T configInfo, Class<? extends Login> loginClass,
Class<? extends AuthenticateCallbackHandler> loginCallbackClass,
Map<String, ?> configs) {
this.configInfo = configInfo;
this.loginClass = loginClass;
this.loginCallbackClass = loginCallbackClass;
this.saslConfigs = new HashMap<>();
configs.entrySet().stream()
.filter(e -> e.getKey().startsWith("sasl."))
.forEach(e -> saslConfigs.put(e.getKey(), e.getValue())); // value may be null
}
@Override
public int hashCode() {
return Objects.hash(configInfo, loginClass, loginCallbackClass, saslConfigs);
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
LoginMetadata<?> loginMetadata = (LoginMetadata<?>) o;
return Objects.equals(configInfo, loginMetadata.configInfo) &&
Objects.equals(loginClass, loginMetadata.loginClass) &&
Objects.equals(loginCallbackClass, loginMetadata.loginCallbackClass) &&
Objects.equals(saslConfigs, loginMetadata.saslConfigs);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.NetworkClient;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.errors.IllegalSaslStateException;
import org.apache.kafka.common.errors.SaslAuthenticationException;
import org.apache.kafka.common.errors.UnsupportedSaslMechanismException;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion;
import org.apache.kafka.common.message.RequestHeaderData;
import org.apache.kafka.common.message.SaslAuthenticateRequestData;
import org.apache.kafka.common.message.SaslHandshakeRequestData;
import org.apache.kafka.common.network.Authenticator;
import org.apache.kafka.common.network.ByteBufferSend;
import org.apache.kafka.common.network.NetworkReceive;
import org.apache.kafka.common.network.ReauthenticationContext;
import org.apache.kafka.common.network.Send;
import org.apache.kafka.common.network.TransportLayer;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.types.SchemaException;
import org.apache.kafka.common.requests.AbstractResponse;
import org.apache.kafka.common.requests.ApiVersionsRequest;
import org.apache.kafka.common.requests.ApiVersionsResponse;
import org.apache.kafka.common.requests.RequestHeader;
import org.apache.kafka.common.requests.SaslAuthenticateRequest;
import org.apache.kafka.common.requests.SaslAuthenticateResponse;
import org.apache.kafka.common.requests.SaslHandshakeRequest;
import org.apache.kafka.common.requests.SaslHandshakeResponse;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.auth.KafkaPrincipalSerde;
import org.apache.kafka.common.security.kerberos.KerberosError;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import javax.security.auth.Subject;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import java.io.IOException;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.nio.channels.SelectionKey;
import java.security.Principal;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Random;
import java.util.Set;
public class SaslClientAuthenticator implements Authenticator {
/**
* The internal state transitions for initial authentication of a channel are
* declared in order, starting with {@link #SEND_APIVERSIONS_REQUEST} and ending
* in either {@link #COMPLETE} or {@link #FAILED}.
* <p>
* Re-authentication of a channel starts with the state
* {@link #REAUTH_PROCESS_ORIG_APIVERSIONS_RESPONSE} and then flows to
* {@link #REAUTH_SEND_HANDSHAKE_REQUEST} followed by
* {@link #REAUTH_RECEIVE_HANDSHAKE_OR_OTHER_RESPONSE} and then
* {@link #REAUTH_INITIAL}; after that the flow joins the authentication flow
* at the {@link #INTERMEDIATE} state and ends at either {@link #COMPLETE} or
* {@link #FAILED}.
*/
public enum SaslState {
SEND_APIVERSIONS_REQUEST, // Initial state for authentication: client sends ApiVersionsRequest in this state when authenticating
RECEIVE_APIVERSIONS_RESPONSE, // Awaiting ApiVersionsResponse from server
SEND_HANDSHAKE_REQUEST, // Received ApiVersionsResponse, send SaslHandshake request
RECEIVE_HANDSHAKE_RESPONSE, // Awaiting SaslHandshake response from server when authenticating
INITIAL, // Initial authentication state starting SASL token exchange for configured mechanism, send first token
INTERMEDIATE, // Intermediate state during SASL token exchange, process challenges and send responses
CLIENT_COMPLETE, // Sent response to last challenge. If using SaslAuthenticate, wait for authentication status from server, else COMPLETE
COMPLETE, // Authentication sequence complete. If using SaslAuthenticate, this state implies successful authentication.
FAILED, // Failed authentication due to an error at some stage
REAUTH_PROCESS_ORIG_APIVERSIONS_RESPONSE, // Initial state for re-authentication: process ApiVersionsResponse from original authentication
REAUTH_SEND_HANDSHAKE_REQUEST, // Processed original ApiVersionsResponse, send SaslHandshake request as part of re-authentication
REAUTH_RECEIVE_HANDSHAKE_OR_OTHER_RESPONSE, // Awaiting SaslHandshake response from server when re-authenticating, and may receive other, in-flight responses sent prior to start of re-authentication as well
REAUTH_INITIAL, // Initial re-authentication state starting SASL token exchange for configured mechanism, send first token
}
private static final short DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER = -1;
private static final Random RNG = new Random();
/**
* the reserved range of correlation id for Sasl requests.
*
* Noted: there is a story about reserved range. The response of LIST_OFFSET is compatible to response of SASL_HANDSHAKE.
* Hence, we could miss the schema error when using schema of SASL_HANDSHAKE to parse response of LIST_OFFSET.
* For example: the IllegalStateException caused by mismatched correlation id is thrown if following steps happens.
* 1) sent LIST_OFFSET
* 2) sent SASL_HANDSHAKE
* 3) receive response of LIST_OFFSET
* 4) succeed to use schema of SASL_HANDSHAKE to parse response of LIST_OFFSET
* 5) throw IllegalStateException due to mismatched correlation id
* As a simple approach, we force Sasl requests to use a reserved correlation id which is separated from those
* used in NetworkClient for Kafka requests. Hence, we can guarantee that every SASL request will throw
* SchemaException due to correlation id mismatch during reauthentication
*/
public static final int MAX_RESERVED_CORRELATION_ID = Integer.MAX_VALUE;
/**
* We only expect one request in-flight a time during authentication so the small range is fine.
*/
public static final int MIN_RESERVED_CORRELATION_ID = MAX_RESERVED_CORRELATION_ID - 7;
/**
* @return true if the correlation id is reserved for SASL request. otherwise, false
*/
public static boolean isReserved(int correlationId) {
return correlationId >= MIN_RESERVED_CORRELATION_ID;
}
private final Subject subject;
private final String servicePrincipal;
private final String host;
private final String node;
private final String mechanism;
private final TransportLayer transportLayer;
private final SaslClient saslClient;
private final Map<String, ?> configs;
private final String clientPrincipalName;
private final AuthenticateCallbackHandler callbackHandler;
private final Time time;
private final Logger log;
private final ReauthInfo reauthInfo;
// buffers used in `authenticate`
private NetworkReceive netInBuffer;
private Send netOutBuffer;
// Current SASL state
private SaslState saslState;
// Next SASL state to be set when outgoing writes associated with the current SASL state complete
private SaslState pendingSaslState;
// Correlation ID for the next request
private int correlationId;
// Request header for which response from the server is pending
private RequestHeader currentRequestHeader;
// Version of SaslAuthenticate request/responses
private short saslAuthenticateVersion;
// Version of SaslHandshake request/responses
private short saslHandshakeVersion;
public SaslClientAuthenticator(Map<String, ?> configs,
AuthenticateCallbackHandler callbackHandler,
String node,
Subject subject,
String servicePrincipal,
String host,
String mechanism,
boolean handshakeRequestEnable,
TransportLayer transportLayer,
Time time,
LogContext logContext) {
this.node = node;
this.subject = subject;
this.callbackHandler = callbackHandler;
this.host = host;
this.servicePrincipal = servicePrincipal;
this.mechanism = mechanism;
this.correlationId = 0;
this.transportLayer = transportLayer;
this.configs = configs;
this.saslAuthenticateVersion = DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER;
this.time = time;
this.log = logContext.logger(getClass());
this.reauthInfo = new ReauthInfo();
try {
setSaslState(handshakeRequestEnable ? SaslState.SEND_APIVERSIONS_REQUEST : SaslState.INITIAL);
// determine client principal from subject for Kerberos to use as authorization id for the SaslClient.
// For other mechanisms, the authenticated principal (username for PLAIN and SCRAM) is used as
// authorization id. Hence the principal is not specified for creating the SaslClient.
if (mechanism.equals(SaslConfigs.GSSAPI_MECHANISM))
this.clientPrincipalName = firstPrincipal(subject);
else
this.clientPrincipalName = null;
saslClient = createSaslClient();
} catch (Exception e) {
throw new SaslAuthenticationException("Failed to configure SaslClientAuthenticator", e);
}
}
// visible for testing
SaslClient createSaslClient() {
try {
return Subject.doAs(subject, (PrivilegedExceptionAction<SaslClient>) () -> {
String[] mechs = {mechanism};
log.debug("Creating SaslClient: client={};service={};serviceHostname={};mechs={}",
clientPrincipalName, servicePrincipal, host, Arrays.toString(mechs));
SaslClient retvalSaslClient = Sasl.createSaslClient(mechs, clientPrincipalName, servicePrincipal, host, configs, callbackHandler);
if (retvalSaslClient == null) {
throw new SaslAuthenticationException("Failed to create SaslClient with mechanism " + mechanism);
}
return retvalSaslClient;
});
} catch (PrivilegedActionException e) {
throw new SaslAuthenticationException("Failed to create SaslClient with mechanism " + mechanism, e.getCause());
}
}
/**
* Sends an empty message to the server to initiate the authentication process. It then evaluates server challenges
* via `SaslClient.evaluateChallenge` and returns client responses until authentication succeeds or fails.
*
* The messages are sent and received as size delimited bytes that consists of a 4 byte network-ordered size N
* followed by N bytes representing the opaque payload.
*/
@SuppressWarnings("fallthrough")
public void authenticate() throws IOException {
if (netOutBuffer != null && !flushNetOutBufferAndUpdateInterestOps())
return;
switch (saslState) {
case SEND_APIVERSIONS_REQUEST:
// Always use version 0 request since brokers treat requests with schema exceptions as GSSAPI tokens
ApiVersionsRequest apiVersionsRequest = new ApiVersionsRequest.Builder().build((short) 0);
send(apiVersionsRequest.toSend(nextRequestHeader(ApiKeys.API_VERSIONS, apiVersionsRequest.version())));
setSaslState(SaslState.RECEIVE_APIVERSIONS_RESPONSE);
break;
case RECEIVE_APIVERSIONS_RESPONSE:
ApiVersionsResponse apiVersionsResponse = (ApiVersionsResponse) receiveKafkaResponse();
if (apiVersionsResponse == null)
break;
else {
setSaslAuthenticateAndHandshakeVersions(apiVersionsResponse);
reauthInfo.apiVersionsResponseReceivedFromBroker = apiVersionsResponse;
setSaslState(SaslState.SEND_HANDSHAKE_REQUEST);
// Fall through to send handshake request with the latest supported version
}
case SEND_HANDSHAKE_REQUEST:
sendHandshakeRequest(saslHandshakeVersion);
setSaslState(SaslState.RECEIVE_HANDSHAKE_RESPONSE);
break;
case RECEIVE_HANDSHAKE_RESPONSE:
SaslHandshakeResponse handshakeResponse = (SaslHandshakeResponse) receiveKafkaResponse();
if (handshakeResponse == null)
break;
else {
handleSaslHandshakeResponse(handshakeResponse);
setSaslState(SaslState.INITIAL);
// Fall through and start SASL authentication using the configured client mechanism
}
case INITIAL:
sendInitialToken();
setSaslState(SaslState.INTERMEDIATE);
break;
case REAUTH_PROCESS_ORIG_APIVERSIONS_RESPONSE:
setSaslAuthenticateAndHandshakeVersions(reauthInfo.apiVersionsResponseFromOriginalAuthentication);
setSaslState(SaslState.REAUTH_SEND_HANDSHAKE_REQUEST); // Will set immediately
// Fall through to send handshake request with the latest supported version
case REAUTH_SEND_HANDSHAKE_REQUEST:
sendHandshakeRequest(saslHandshakeVersion);
setSaslState(SaslState.REAUTH_RECEIVE_HANDSHAKE_OR_OTHER_RESPONSE);
break;
case REAUTH_RECEIVE_HANDSHAKE_OR_OTHER_RESPONSE:
handshakeResponse = (SaslHandshakeResponse) receiveKafkaResponse();
if (handshakeResponse == null)
break;
handleSaslHandshakeResponse(handshakeResponse);
setSaslState(SaslState.REAUTH_INITIAL); // Will set immediately
/*
* Fall through and start SASL authentication using the configured client
* mechanism. Note that we have to either fall through or add a loop to enter
* the switch statement again. We will fall through to avoid adding the loop and
* therefore minimize the changes to authentication-related code due to the
* changes related to re-authentication.
*/
case REAUTH_INITIAL:
sendInitialToken();
setSaslState(SaslState.INTERMEDIATE);
break;
case INTERMEDIATE:
byte[] serverToken = receiveToken();
boolean noResponsesPending = serverToken != null && !sendSaslClientToken(serverToken, false);
// For versions without SASL_AUTHENTICATE header, SASL exchange may be complete after a token is sent to server.
// For versions with SASL_AUTHENTICATE header, server always sends a response to each SASL_AUTHENTICATE request.
if (saslClient.isComplete()) {
if (saslAuthenticateVersion == DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER || noResponsesPending)
setSaslState(SaslState.COMPLETE);
else
setSaslState(SaslState.CLIENT_COMPLETE);
}
break;
case CLIENT_COMPLETE:
byte[] serverResponse = receiveToken();
if (serverResponse != null)
setSaslState(SaslState.COMPLETE);
break;
case COMPLETE:
break;
case FAILED:
// Should never get here since exception would have been propagated earlier
throw new IllegalStateException("SASL handshake has already failed");
}
}
private void sendHandshakeRequest(short version) throws IOException {
SaslHandshakeRequest handshakeRequest = createSaslHandshakeRequest(version);
send(handshakeRequest.toSend(nextRequestHeader(ApiKeys.SASL_HANDSHAKE, handshakeRequest.version())));
}
private void sendInitialToken() throws IOException {
sendSaslClientToken(new byte[0], true);
}
@Override
public void reauthenticate(ReauthenticationContext reauthenticationContext) throws IOException {
SaslClientAuthenticator previousSaslClientAuthenticator = (SaslClientAuthenticator) Objects
.requireNonNull(reauthenticationContext).previousAuthenticator();
ApiVersionsResponse apiVersionsResponseFromOriginalAuthentication = previousSaslClientAuthenticator.reauthInfo
.apiVersionsResponse();
previousSaslClientAuthenticator.close();
reauthInfo.reauthenticating(apiVersionsResponseFromOriginalAuthentication,
reauthenticationContext.reauthenticationBeginNanos());
NetworkReceive netInBufferFromChannel = reauthenticationContext.networkReceive();
netInBuffer = netInBufferFromChannel;
setSaslState(SaslState.REAUTH_PROCESS_ORIG_APIVERSIONS_RESPONSE); // Will set immediately
authenticate();
}
@Override
public Optional<NetworkReceive> pollResponseReceivedDuringReauthentication() {
return reauthInfo.pollResponseReceivedDuringReauthentication();
}
@Override
public Long clientSessionReauthenticationTimeNanos() {
return reauthInfo.clientSessionReauthenticationTimeNanos;
}
@Override
public Long reauthenticationLatencyMs() {
return reauthInfo.reauthenticationLatencyMs();
}
// visible for testing
int nextCorrelationId() {
if (!isReserved(correlationId))
correlationId = MIN_RESERVED_CORRELATION_ID;
return correlationId++;
}
private RequestHeader nextRequestHeader(ApiKeys apiKey, short version) {
String clientId = (String) configs.get(CommonClientConfigs.CLIENT_ID_CONFIG);
short requestApiKey = apiKey.id;
currentRequestHeader = new RequestHeader(
new RequestHeaderData().
setRequestApiKey(requestApiKey).
setRequestApiVersion(version).
setClientId(clientId).
setCorrelationId(nextCorrelationId()),
apiKey.requestHeaderVersion(version));
return currentRequestHeader;
}
// Visible to override for testing
protected SaslHandshakeRequest createSaslHandshakeRequest(short version) {
return new SaslHandshakeRequest.Builder(
new SaslHandshakeRequestData().setMechanism(mechanism)).build(version);
}
// Visible to override for testing
protected void setSaslAuthenticateAndHandshakeVersions(ApiVersionsResponse apiVersionsResponse) {
ApiVersion authenticateVersion = apiVersionsResponse.apiVersion(ApiKeys.SASL_AUTHENTICATE.id);
if (authenticateVersion != null) {
this.saslAuthenticateVersion = (short) Math.min(authenticateVersion.maxVersion(),
ApiKeys.SASL_AUTHENTICATE.latestVersion());
}
ApiVersion handshakeVersion = apiVersionsResponse.apiVersion(ApiKeys.SASL_HANDSHAKE.id);
if (handshakeVersion != null) {
this.saslHandshakeVersion = (short) Math.min(handshakeVersion.maxVersion(),
ApiKeys.SASL_HANDSHAKE.latestVersion());
}
}
private void setSaslState(SaslState saslState) {
if (netOutBuffer != null && !netOutBuffer.completed())
pendingSaslState = saslState;
else {
this.pendingSaslState = null;
this.saslState = saslState;
log.debug("Set SASL client state to {}", saslState);
if (saslState == SaslState.COMPLETE) {
reauthInfo.setAuthenticationEndAndSessionReauthenticationTimes(time.nanoseconds());
if (!reauthInfo.reauthenticating())
transportLayer.removeInterestOps(SelectionKey.OP_WRITE);
else
/*
* Re-authentication is triggered by a write, so we have to make sure that
* pending write is actually sent.
*/
transportLayer.addInterestOps(SelectionKey.OP_WRITE);
}
}
}
/**
* Sends a SASL client token to server if required. This may be an initial token to start
* SASL token exchange or response to a challenge from the server.
* @return true if a token was sent to the server
*/
private boolean sendSaslClientToken(byte[] serverToken, boolean isInitial) throws IOException {
if (!saslClient.isComplete()) {
byte[] saslToken = createSaslToken(serverToken, isInitial);
if (saslToken != null) {
ByteBuffer tokenBuf = ByteBuffer.wrap(saslToken);
Send send;
if (saslAuthenticateVersion == DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER) {
send = ByteBufferSend.sizePrefixed(tokenBuf);
} else {
SaslAuthenticateRequestData data = new SaslAuthenticateRequestData()
.setAuthBytes(tokenBuf.array());
SaslAuthenticateRequest request = new SaslAuthenticateRequest.Builder(data).build(saslAuthenticateVersion);
send = request.toSend(nextRequestHeader(ApiKeys.SASL_AUTHENTICATE, saslAuthenticateVersion));
}
send(send);
return true;
}
}
return false;
}
private void send(Send send) throws IOException {
try {
netOutBuffer = send;
flushNetOutBufferAndUpdateInterestOps();
} catch (IOException e) {
setSaslState(SaslState.FAILED);
throw e;
}
}
private boolean flushNetOutBufferAndUpdateInterestOps() throws IOException {
boolean flushedCompletely = flushNetOutBuffer();
if (flushedCompletely) {
transportLayer.removeInterestOps(SelectionKey.OP_WRITE);
if (pendingSaslState != null)
setSaslState(pendingSaslState);
} else
transportLayer.addInterestOps(SelectionKey.OP_WRITE);
return flushedCompletely;
}
private byte[] receiveResponseOrToken() throws IOException {
if (netInBuffer == null) netInBuffer = new NetworkReceive(node);
netInBuffer.readFrom(transportLayer);
byte[] serverPacket = null;
if (netInBuffer.complete()) {
netInBuffer.payload().rewind();
serverPacket = new byte[netInBuffer.payload().remaining()];
netInBuffer.payload().get(serverPacket, 0, serverPacket.length);
netInBuffer = null; // reset the networkReceive as we read all the data.
}
return serverPacket;
}
public KafkaPrincipal principal() {
return new KafkaPrincipal(KafkaPrincipal.USER_TYPE, clientPrincipalName);
}
@Override
public Optional<KafkaPrincipalSerde> principalSerde() {
return Optional.empty();
}
public boolean complete() {
return saslState == SaslState.COMPLETE;
}
public void close() throws IOException {
if (saslClient != null)
saslClient.dispose();
}
private byte[] receiveToken() throws IOException {
if (saslAuthenticateVersion == DISABLE_KAFKA_SASL_AUTHENTICATE_HEADER) {
return receiveResponseOrToken();
} else {
SaslAuthenticateResponse response = (SaslAuthenticateResponse) receiveKafkaResponse();
if (response != null) {
Errors error = response.error();
if (error != Errors.NONE) {
setSaslState(SaslState.FAILED);
String errMsg = response.errorMessage();
throw errMsg == null ? error.exception() : error.exception(errMsg);
}
long sessionLifetimeMs = response.sessionLifetimeMs();
if (sessionLifetimeMs > 0L)
reauthInfo.positiveSessionLifetimeMs = sessionLifetimeMs;
return Utils.copyArray(response.saslAuthBytes());
} else
return null;
}
}
private byte[] createSaslToken(final byte[] saslToken, boolean isInitial) throws SaslException {
if (saslToken == null)
throw new IllegalSaslStateException("Error authenticating with the Kafka Broker: received a `null` saslToken.");
try {
if (isInitial && !saslClient.hasInitialResponse())
return saslToken;
else
return Subject.doAs(subject, (PrivilegedExceptionAction<byte[]>) () -> saslClient.evaluateChallenge(saslToken));
} catch (PrivilegedActionException e) {
String error = "An error: (" + e + ") occurred when evaluating SASL token received from the Kafka Broker.";
KerberosError kerberosError = KerberosError.fromException(e);
// Try to provide hints to use about what went wrong so they can fix their configuration.
if (kerberosError == KerberosError.SERVER_NOT_FOUND) {
error += " This may be caused by Java's being unable to resolve the Kafka Broker's" +
" hostname correctly. You may want to try to adding" +
" '-Dsun.net.spi.nameservice.provider.1=dns,sun' to your client's JVMFLAGS environment." +
" Users must configure FQDN of kafka brokers when authenticating using SASL and" +
" `socketChannel.socket().getInetAddress().getHostName()` must match the hostname in `principal/hostname@realm`";
}
//Unwrap the SaslException inside `PrivilegedActionException`
Throwable cause = e.getCause();
// Treat transient Kerberos errors as non-fatal SaslExceptions that are processed as I/O exceptions
// and all other failures as fatal SaslAuthenticationException.
if ((kerberosError != null && kerberosError.retriable()) || (kerberosError == null && KerberosError.isRetriableClientGssException(e))) {
error += " Kafka Client will retry.";
throw new SaslException(error, cause);
} else {
error += " Kafka Client will go to AUTHENTICATION_FAILED state.";
throw new SaslAuthenticationException(error, cause);
}
}
}
private boolean flushNetOutBuffer() throws IOException {
if (!netOutBuffer.completed()) {
netOutBuffer.writeTo(transportLayer);
}
return netOutBuffer.completed();
}
private AbstractResponse receiveKafkaResponse() throws IOException {
if (netInBuffer == null)
netInBuffer = new NetworkReceive(node);
NetworkReceive receive = netInBuffer;
try {
byte[] responseBytes = receiveResponseOrToken();
if (responseBytes == null)
return null;
else {
AbstractResponse response = NetworkClient.parseResponse(ByteBuffer.wrap(responseBytes), currentRequestHeader);
currentRequestHeader = null;
return response;
}
} catch (BufferUnderflowException | SchemaException | IllegalArgumentException e) {
/*
* Account for the fact that during re-authentication there may be responses
* arriving for requests that were sent in the past.
*/
if (reauthInfo.reauthenticating()) {
/*
* It didn't match the current request header, so it must be unrelated to
* re-authentication. Save it so it can be processed later.
*/
receive.payload().rewind();
reauthInfo.pendingAuthenticatedReceives.add(receive);
return null;
}
log.debug("Invalid SASL mechanism response, server may be expecting only GSSAPI tokens");
setSaslState(SaslState.FAILED);
throw new IllegalSaslStateException("Invalid SASL mechanism response, server may be expecting a different protocol", e);
}
}
private void handleSaslHandshakeResponse(SaslHandshakeResponse response) {
Errors error = response.error();
if (error != Errors.NONE)
setSaslState(SaslState.FAILED);
switch (error) {
case NONE:
break;
case UNSUPPORTED_SASL_MECHANISM:
throw new UnsupportedSaslMechanismException(String.format("Client SASL mechanism '%s' not enabled in the server, enabled mechanisms are %s",
mechanism, response.enabledMechanisms()));
case ILLEGAL_SASL_STATE:
throw new IllegalSaslStateException(String.format("Unexpected handshake request with client mechanism %s, enabled mechanisms are %s",
mechanism, response.enabledMechanisms()));
default:
throw new IllegalSaslStateException(String.format("Unknown error code %s, client mechanism is %s, enabled mechanisms are %s",
response.error(), mechanism, response.enabledMechanisms()));
}
}
/**
* Returns the first Principal from Subject.
* @throws KafkaException if there are no Principals in the Subject.
* During Kerberos re-login, principal is reset on Subject. An exception is
* thrown so that the connection is retried after any configured backoff.
*/
public static String firstPrincipal(Subject subject) {
Set<Principal> principals = subject.getPrincipals();
synchronized (principals) {
Iterator<Principal> iterator = principals.iterator();
if (iterator.hasNext())
return iterator.next().getName();
else
throw new KafkaException("Principal could not be determined from Subject, this may be a transient failure due to Kerberos re-login");
}
}
/**
* Information related to re-authentication
*/
private class ReauthInfo {
public ApiVersionsResponse apiVersionsResponseFromOriginalAuthentication;
public long reauthenticationBeginNanos;
public List<NetworkReceive> pendingAuthenticatedReceives = new ArrayList<>();
public ApiVersionsResponse apiVersionsResponseReceivedFromBroker;
public Long positiveSessionLifetimeMs;
public long authenticationEndNanos;
public Long clientSessionReauthenticationTimeNanos;
public void reauthenticating(ApiVersionsResponse apiVersionsResponseFromOriginalAuthentication,
long reauthenticationBeginNanos) {
this.apiVersionsResponseFromOriginalAuthentication = Objects
.requireNonNull(apiVersionsResponseFromOriginalAuthentication);
this.reauthenticationBeginNanos = reauthenticationBeginNanos;
}
public boolean reauthenticating() {
return apiVersionsResponseFromOriginalAuthentication != null;
}
public ApiVersionsResponse apiVersionsResponse() {
return reauthenticating() ? apiVersionsResponseFromOriginalAuthentication
: apiVersionsResponseReceivedFromBroker;
}
/**
* Return the (always non-null but possibly empty) NetworkReceive response that
* arrived during re-authentication that is unrelated to re-authentication, if
* any. This corresponds to a request sent prior to the beginning of
* re-authentication; the request was made when the channel was successfully
* authenticated, and the response arrived during the re-authentication
* process.
*
* @return the (always non-null but possibly empty) NetworkReceive response
* that arrived during re-authentication that is unrelated to
* re-authentication, if any
*/
public Optional<NetworkReceive> pollResponseReceivedDuringReauthentication() {
if (pendingAuthenticatedReceives.isEmpty())
return Optional.empty();
return Optional.of(pendingAuthenticatedReceives.remove(0));
}
public void setAuthenticationEndAndSessionReauthenticationTimes(long nowNanos) {
authenticationEndNanos = nowNanos;
long sessionLifetimeMsToUse = 0;
if (positiveSessionLifetimeMs != null) {
// pick a random percentage between 85% and 95% for session re-authentication
double pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount = 0.85;
double pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously = 0.10;
double pctToUse = pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount + RNG.nextDouble()
* pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously;
sessionLifetimeMsToUse = (long) (positiveSessionLifetimeMs * pctToUse);
clientSessionReauthenticationTimeNanos = authenticationEndNanos + 1000 * 1000 * sessionLifetimeMsToUse;
log.debug(
"Finished {} with session expiration in {} ms and session re-authentication on or after {} ms",
authenticationOrReauthenticationText(), positiveSessionLifetimeMs, sessionLifetimeMsToUse);
} else
log.debug("Finished {} with no session expiration and no session re-authentication",
authenticationOrReauthenticationText());
}
public Long reauthenticationLatencyMs() {
return reauthenticating()
? Math.round((authenticationEndNanos - reauthenticationBeginNanos) / 1000.0 / 1000.0)
: null;
}
private String authenticationOrReauthenticationText() {
return reauthenticating() ? "re-authentication" : "authentication";
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/SaslClientCallbackHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
import java.security.AccessController;
import java.util.List;
import java.util.Map;
import javax.security.auth.Subject;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.sasl.AuthorizeCallback;
import javax.security.sasl.RealmCallback;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.security.auth.SaslExtensionsCallback;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.auth.SaslExtensions;
import org.apache.kafka.common.security.scram.ScramExtensionsCallback;
import org.apache.kafka.common.security.scram.internals.ScramMechanism;
/**
* Default callback handler for Sasl clients. The callbacks required for the SASL mechanism
* configured for the client should be supported by this callback handler. See
* <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/security/sasl/sasl-refguide.html">Java SASL API</a>
* for the list of SASL callback handlers required for each SASL mechanism.
*
* For adding custom SASL extensions, a {@link SaslExtensions} may be added to the subject's public credentials
*/
public class SaslClientCallbackHandler implements AuthenticateCallbackHandler {
private String mechanism;
@Override
public void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries) {
this.mechanism = saslMechanism;
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
Subject subject = Subject.getSubject(AccessController.getContext());
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
NameCallback nc = (NameCallback) callback;
if (subject != null && !subject.getPublicCredentials(String.class).isEmpty()) {
nc.setName(subject.getPublicCredentials(String.class).iterator().next());
} else
nc.setName(nc.getDefaultName());
} else if (callback instanceof PasswordCallback) {
if (subject != null && !subject.getPrivateCredentials(String.class).isEmpty()) {
char[] password = subject.getPrivateCredentials(String.class).iterator().next().toCharArray();
((PasswordCallback) callback).setPassword(password);
} else {
String errorMessage = "Could not login: the client is being asked for a password, but the Kafka" +
" client code does not currently support obtaining a password from the user.";
throw new UnsupportedCallbackException(callback, errorMessage);
}
} else if (callback instanceof RealmCallback) {
RealmCallback rc = (RealmCallback) callback;
rc.setText(rc.getDefaultText());
} else if (callback instanceof AuthorizeCallback) {
AuthorizeCallback ac = (AuthorizeCallback) callback;
String authId = ac.getAuthenticationID();
String authzId = ac.getAuthorizationID();
ac.setAuthorized(authId.equals(authzId));
if (ac.isAuthorized())
ac.setAuthorizedID(authzId);
} else if (callback instanceof ScramExtensionsCallback) {
if (ScramMechanism.isScram(mechanism) && subject != null && !subject.getPublicCredentials(Map.class).isEmpty()) {
@SuppressWarnings("unchecked")
Map<String, String> extensions = (Map<String, String>) subject.getPublicCredentials(Map.class).iterator().next();
((ScramExtensionsCallback) callback).extensions(extensions);
}
} else if (callback instanceof SaslExtensionsCallback) {
if (!SaslConfigs.GSSAPI_MECHANISM.equals(mechanism) &&
subject != null && !subject.getPublicCredentials(SaslExtensions.class).isEmpty()) {
SaslExtensions extensions = subject.getPublicCredentials(SaslExtensions.class).iterator().next();
((SaslExtensionsCallback) callback).extensions(extensions);
}
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL ClientCallback");
}
}
}
@Override
public void close() {
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/SaslInternalConfigs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs;
public class SaslInternalConfigs {
/**
* The server (broker) specifies a positive session length in milliseconds to a
* SASL client when {@link BrokerSecurityConfigs#CONNECTIONS_MAX_REAUTH_MS} is
* positive as per <a href=
* "https://cwiki.apache.org/confluence/display/KAFKA/KIP-368%3A+Allow+SASL+Connections+to+Periodically+Re-Authenticate">KIP
* 368: Allow SASL Connections to Periodically Re-Authenticate</a>. The session
* length is the minimum of the configured value and any session length implied
* by the credential presented during authentication. The lifetime defined by
* the credential, in terms of milliseconds since the epoch, is available via a
* negotiated property on the SASL Server instance, and that value can be
* converted to a session length by subtracting the time at which authentication
* occurred. This variable defines the negotiated property key that is used to
* communicate the credential lifetime in milliseconds since the epoch.
*/
public static final String CREDENTIAL_LIFETIME_MS_SASL_NEGOTIATED_PROPERTY_KEY = "CREDENTIAL.LIFETIME.MS";
private SaslInternalConfigs() {
// empty
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.config.internals.BrokerSecurityConfigs;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.IllegalSaslStateException;
import org.apache.kafka.common.errors.InvalidRequestException;
import org.apache.kafka.common.errors.SaslAuthenticationException;
import org.apache.kafka.common.errors.UnsupportedSaslMechanismException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.SaslAuthenticateResponseData;
import org.apache.kafka.common.message.SaslHandshakeResponseData;
import org.apache.kafka.common.network.InvalidReceiveException;
import org.apache.kafka.common.network.Authenticator;
import org.apache.kafka.common.network.ByteBufferSend;
import org.apache.kafka.common.network.ChannelBuilders;
import org.apache.kafka.common.network.ChannelMetadataRegistry;
import org.apache.kafka.common.network.ClientInformation;
import org.apache.kafka.common.network.ListenerName;
import org.apache.kafka.common.network.NetworkReceive;
import org.apache.kafka.common.network.ReauthenticationContext;
import org.apache.kafka.common.network.Send;
import org.apache.kafka.common.network.SslTransportLayer;
import org.apache.kafka.common.network.TransportLayer;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.AbstractResponse;
import org.apache.kafka.common.requests.ApiVersionsRequest;
import org.apache.kafka.common.requests.ApiVersionsResponse;
import org.apache.kafka.common.requests.RequestAndSize;
import org.apache.kafka.common.requests.RequestContext;
import org.apache.kafka.common.requests.RequestHeader;
import org.apache.kafka.common.requests.SaslAuthenticateRequest;
import org.apache.kafka.common.requests.SaslAuthenticateResponse;
import org.apache.kafka.common.requests.SaslHandshakeRequest;
import org.apache.kafka.common.requests.SaslHandshakeResponse;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
import org.apache.kafka.common.security.auth.KafkaPrincipalBuilder;
import org.apache.kafka.common.security.auth.KafkaPrincipalSerde;
import org.apache.kafka.common.security.auth.SaslAuthenticationContext;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.security.kerberos.KerberosError;
import org.apache.kafka.common.security.kerberos.KerberosName;
import org.apache.kafka.common.security.kerberos.KerberosShortNamer;
import org.apache.kafka.common.security.scram.ScramLoginModule;
import org.apache.kafka.common.security.scram.internals.ScramMechanism;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.net.ssl.SSLSession;
import javax.security.auth.Subject;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import java.io.Closeable;
import java.io.IOException;
import java.net.InetAddress;
import java.nio.ByteBuffer;
import java.nio.channels.SelectionKey;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Supplier;
public class SaslServerAuthenticator implements Authenticator {
private static final Logger LOG = LoggerFactory.getLogger(SaslServerAuthenticator.class);
/**
* The internal state transitions for initial authentication of a channel on the
* server side are declared in order, starting with {@link #INITIAL_REQUEST} and
* ending in either {@link #COMPLETE} or {@link #FAILED}.
* <p>
* Re-authentication of a channel on the server side starts with the state
* {@link #REAUTH_PROCESS_HANDSHAKE}. It may then flow to
* {@link #REAUTH_BAD_MECHANISM} before a transition to {@link #FAILED} if
* re-authentication is attempted with a mechanism different than the original
* one; otherwise it joins the authentication flow at the {@link #AUTHENTICATE}
* state and likewise ends at either {@link #COMPLETE} or {@link #FAILED}.
*/
private enum SaslState {
INITIAL_REQUEST, // May be GSSAPI token, SaslHandshake or ApiVersions for authentication
HANDSHAKE_OR_VERSIONS_REQUEST, // May be SaslHandshake or ApiVersions
HANDSHAKE_REQUEST, // After an ApiVersions request, next request must be SaslHandshake
AUTHENTICATE, // Authentication tokens (SaslHandshake v1 and above indicate SaslAuthenticate headers)
COMPLETE, // Authentication completed successfully
FAILED, // Authentication failed
REAUTH_PROCESS_HANDSHAKE, // Initial state for re-authentication, processes SASL handshake request
REAUTH_BAD_MECHANISM, // When re-authentication requested with wrong mechanism, generate exception
}
private final SecurityProtocol securityProtocol;
private final ListenerName listenerName;
private final String connectionId;
private final Map<String, Subject> subjects;
private final TransportLayer transportLayer;
private final List<String> enabledMechanisms;
private final Map<String, ?> configs;
private final KafkaPrincipalBuilder principalBuilder;
private final Map<String, AuthenticateCallbackHandler> callbackHandlers;
private final Map<String, Long> connectionsMaxReauthMsByMechanism;
private final Time time;
private final ReauthInfo reauthInfo;
private final ChannelMetadataRegistry metadataRegistry;
private final Supplier<ApiVersionsResponse> apiVersionSupplier;
// Current SASL state
private SaslState saslState = SaslState.INITIAL_REQUEST;
// Next SASL state to be set when outgoing writes associated with the current SASL state complete
private SaslState pendingSaslState = null;
// Exception that will be thrown by `authenticate()` when SaslState is set to FAILED after outbound writes complete
private AuthenticationException pendingException = null;
private SaslServer saslServer;
private String saslMechanism;
// buffers used in `authenticate`
private Integer saslAuthRequestMaxReceiveSize;
private NetworkReceive netInBuffer;
private Send netOutBuffer;
private Send authenticationFailureSend = null;
// flag indicating if sasl tokens are sent as Kafka SaslAuthenticate request/responses
private boolean enableKafkaSaslAuthenticateHeaders;
public SaslServerAuthenticator(Map<String, ?> configs,
Map<String, AuthenticateCallbackHandler> callbackHandlers,
String connectionId,
Map<String, Subject> subjects,
KerberosShortNamer kerberosNameParser,
ListenerName listenerName,
SecurityProtocol securityProtocol,
TransportLayer transportLayer,
Map<String, Long> connectionsMaxReauthMsByMechanism,
ChannelMetadataRegistry metadataRegistry,
Time time,
Supplier<ApiVersionsResponse> apiVersionSupplier) {
this.callbackHandlers = callbackHandlers;
this.connectionId = connectionId;
this.subjects = subjects;
this.listenerName = listenerName;
this.securityProtocol = securityProtocol;
this.enableKafkaSaslAuthenticateHeaders = false;
this.transportLayer = transportLayer;
this.connectionsMaxReauthMsByMechanism = connectionsMaxReauthMsByMechanism;
this.time = time;
this.reauthInfo = new ReauthInfo();
this.metadataRegistry = metadataRegistry;
this.apiVersionSupplier = apiVersionSupplier;
this.configs = configs;
@SuppressWarnings("unchecked")
List<String> enabledMechanisms = (List<String>) this.configs.get(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG);
if (enabledMechanisms == null || enabledMechanisms.isEmpty())
throw new IllegalArgumentException("No SASL mechanisms are enabled");
this.enabledMechanisms = new ArrayList<>(new HashSet<>(enabledMechanisms));
for (String mechanism : this.enabledMechanisms) {
if (!callbackHandlers.containsKey(mechanism))
throw new IllegalArgumentException("Callback handler not specified for SASL mechanism " + mechanism);
if (!subjects.containsKey(mechanism))
throw new IllegalArgumentException("Subject cannot be null for SASL mechanism " + mechanism);
LOG.trace("{} for mechanism={}: {}", BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS, mechanism,
connectionsMaxReauthMsByMechanism.get(mechanism));
}
// Note that the old principal builder does not support SASL, so we do not need to pass the
// authenticator or the transport layer
this.principalBuilder = ChannelBuilders.createPrincipalBuilder(configs, kerberosNameParser, null);
saslAuthRequestMaxReceiveSize = (Integer) configs.get(BrokerSecurityConfigs.SASL_SERVER_MAX_RECEIVE_SIZE_CONFIG);
if (saslAuthRequestMaxReceiveSize == null)
saslAuthRequestMaxReceiveSize = BrokerSecurityConfigs.DEFAULT_SASL_SERVER_MAX_RECEIVE_SIZE;
}
private void createSaslServer(String mechanism) throws IOException {
this.saslMechanism = mechanism;
Subject subject = subjects.get(mechanism);
final AuthenticateCallbackHandler callbackHandler = callbackHandlers.get(mechanism);
if (mechanism.equals(SaslConfigs.GSSAPI_MECHANISM)) {
saslServer = createSaslKerberosServer(callbackHandler, configs, subject);
} else {
try {
saslServer = Subject.doAs(subject, (PrivilegedExceptionAction<SaslServer>) () ->
Sasl.createSaslServer(saslMechanism, "kafka", serverAddress().getHostName(), configs, callbackHandler));
if (saslServer == null) {
throw new SaslException("Kafka Server failed to create a SaslServer to interact with a client during session authentication with server mechanism " + saslMechanism);
}
} catch (PrivilegedActionException e) {
throw new SaslException("Kafka Server failed to create a SaslServer to interact with a client during session authentication with server mechanism " + saslMechanism, e.getCause());
}
}
}
private SaslServer createSaslKerberosServer(final AuthenticateCallbackHandler saslServerCallbackHandler, final Map<String, ?> configs, Subject subject) throws IOException {
// server is using a JAAS-authenticated subject: determine service principal name and hostname from kafka server's subject.
final String servicePrincipal = SaslClientAuthenticator.firstPrincipal(subject);
KerberosName kerberosName;
try {
kerberosName = KerberosName.parse(servicePrincipal);
} catch (IllegalArgumentException e) {
throw new KafkaException("Principal has name with unexpected format " + servicePrincipal);
}
final String servicePrincipalName = kerberosName.serviceName();
final String serviceHostname = kerberosName.hostName();
LOG.debug("Creating SaslServer for {} with mechanism {}", kerberosName, saslMechanism);
try {
return Subject.doAs(subject, (PrivilegedExceptionAction<SaslServer>) () ->
Sasl.createSaslServer(saslMechanism, servicePrincipalName, serviceHostname, configs, saslServerCallbackHandler));
} catch (PrivilegedActionException e) {
throw new SaslException("Kafka Server failed to create a SaslServer to interact with a client during session authentication", e.getCause());
}
}
/**
* Evaluates client responses via `SaslServer.evaluateResponse` and returns the issued challenge to the client until
* authentication succeeds or fails.
*
* The messages are sent and received as size delimited bytes that consists of a 4 byte network-ordered size N
* followed by N bytes representing the opaque payload.
*/
@SuppressWarnings("fallthrough")
@Override
public void authenticate() throws IOException {
if (saslState != SaslState.REAUTH_PROCESS_HANDSHAKE) {
if (netOutBuffer != null && !flushNetOutBufferAndUpdateInterestOps())
return;
if (saslServer != null && saslServer.isComplete()) {
setSaslState(SaslState.COMPLETE);
return;
}
// allocate on heap (as opposed to any socket server memory pool)
if (netInBuffer == null) netInBuffer = new NetworkReceive(saslAuthRequestMaxReceiveSize, connectionId);
try {
netInBuffer.readFrom(transportLayer);
} catch (InvalidReceiveException e) {
throw new SaslAuthenticationException("Failing SASL authentication due to invalid receive size", e);
}
if (!netInBuffer.complete())
return;
netInBuffer.payload().rewind();
}
byte[] clientToken = new byte[netInBuffer.payload().remaining()];
netInBuffer.payload().get(clientToken, 0, clientToken.length);
netInBuffer = null; // reset the networkReceive as we read all the data.
try {
switch (saslState) {
case REAUTH_PROCESS_HANDSHAKE:
case HANDSHAKE_OR_VERSIONS_REQUEST:
case HANDSHAKE_REQUEST:
handleKafkaRequest(clientToken);
break;
case REAUTH_BAD_MECHANISM:
throw new SaslAuthenticationException(reauthInfo.badMechanismErrorMessage);
case INITIAL_REQUEST:
if (handleKafkaRequest(clientToken))
break;
// For default GSSAPI, fall through to authenticate using the client token as the first GSSAPI packet.
// This is required for interoperability with 0.9.0.x clients which do not send handshake request
case AUTHENTICATE:
handleSaslToken(clientToken);
// When the authentication exchange is complete and no more tokens are expected from the client,
// update SASL state. Current SASL state will be updated when outgoing writes to the client complete.
if (saslServer.isComplete())
setSaslState(SaslState.COMPLETE);
break;
default:
break;
}
} catch (AuthenticationException e) {
// Exception will be propagated after response is sent to client
setSaslState(SaslState.FAILED, e);
} catch (Exception e) {
// In the case of IOExceptions and other unexpected exceptions, fail immediately
saslState = SaslState.FAILED;
LOG.debug("Failed during {}: {}", reauthInfo.authenticationOrReauthenticationText(), e.getMessage());
throw e;
}
}
@Override
public KafkaPrincipal principal() {
Optional<SSLSession> sslSession = transportLayer instanceof SslTransportLayer ?
Optional.of(((SslTransportLayer) transportLayer).sslSession()) : Optional.empty();
SaslAuthenticationContext context = new SaslAuthenticationContext(saslServer, securityProtocol,
clientAddress(), listenerName.value(), sslSession);
KafkaPrincipal principal = principalBuilder.build(context);
if (ScramMechanism.isScram(saslMechanism) && Boolean.parseBoolean((String) saslServer.getNegotiatedProperty(ScramLoginModule.TOKEN_AUTH_CONFIG))) {
principal.tokenAuthenticated(true);
}
return principal;
}
@Override
public Optional<KafkaPrincipalSerde> principalSerde() {
return principalBuilder instanceof KafkaPrincipalSerde ? Optional.of((KafkaPrincipalSerde) principalBuilder) : Optional.empty();
}
@Override
public boolean complete() {
return saslState == SaslState.COMPLETE;
}
@Override
public void handleAuthenticationFailure() throws IOException {
sendAuthenticationFailureResponse();
}
@Override
public void close() throws IOException {
if (principalBuilder instanceof Closeable)
Utils.closeQuietly((Closeable) principalBuilder, "principal builder");
if (saslServer != null)
saslServer.dispose();
}
@Override
public void reauthenticate(ReauthenticationContext reauthenticationContext) throws IOException {
NetworkReceive saslHandshakeReceive = reauthenticationContext.networkReceive();
if (saslHandshakeReceive == null)
throw new IllegalArgumentException(
"Invalid saslHandshakeReceive in server-side re-authentication context: null");
SaslServerAuthenticator previousSaslServerAuthenticator = (SaslServerAuthenticator) reauthenticationContext.previousAuthenticator();
reauthInfo.reauthenticating(previousSaslServerAuthenticator.saslMechanism,
previousSaslServerAuthenticator.principal(), reauthenticationContext.reauthenticationBeginNanos());
previousSaslServerAuthenticator.close();
netInBuffer = saslHandshakeReceive;
LOG.debug("Beginning re-authentication: {}", this);
netInBuffer.payload().rewind();
setSaslState(SaslState.REAUTH_PROCESS_HANDSHAKE);
authenticate();
}
@Override
public Long serverSessionExpirationTimeNanos() {
return reauthInfo.sessionExpirationTimeNanos;
}
@Override
public Long reauthenticationLatencyMs() {
return reauthInfo.reauthenticationLatencyMs();
}
@Override
public boolean connectedClientSupportsReauthentication() {
return reauthInfo.connectedClientSupportsReauthentication;
}
private void setSaslState(SaslState saslState) {
setSaslState(saslState, null);
}
private void setSaslState(SaslState saslState, AuthenticationException exception) {
if (netOutBuffer != null && !netOutBuffer.completed()) {
pendingSaslState = saslState;
pendingException = exception;
} else {
this.saslState = saslState;
LOG.debug("Set SASL server state to {} during {}", saslState, reauthInfo.authenticationOrReauthenticationText());
this.pendingSaslState = null;
this.pendingException = null;
if (exception != null)
throw exception;
}
}
private boolean flushNetOutBufferAndUpdateInterestOps() throws IOException {
boolean flushedCompletely = flushNetOutBuffer();
if (flushedCompletely) {
transportLayer.removeInterestOps(SelectionKey.OP_WRITE);
if (pendingSaslState != null)
setSaslState(pendingSaslState, pendingException);
} else
transportLayer.addInterestOps(SelectionKey.OP_WRITE);
return flushedCompletely;
}
private boolean flushNetOutBuffer() throws IOException {
if (!netOutBuffer.completed())
netOutBuffer.writeTo(transportLayer);
return netOutBuffer.completed();
}
private InetAddress serverAddress() {
return transportLayer.socketChannel().socket().getLocalAddress();
}
private InetAddress clientAddress() {
return transportLayer.socketChannel().socket().getInetAddress();
}
private void handleSaslToken(byte[] clientToken) throws IOException {
if (!enableKafkaSaslAuthenticateHeaders) {
byte[] response = saslServer.evaluateResponse(clientToken);
if (saslServer.isComplete()) {
reauthInfo.calcCompletionTimesAndReturnSessionLifetimeMs();
if (reauthInfo.reauthenticating())
reauthInfo.ensurePrincipalUnchanged(principal());
}
if (response != null) {
netOutBuffer = ByteBufferSend.sizePrefixed(ByteBuffer.wrap(response));
flushNetOutBufferAndUpdateInterestOps();
}
} else {
ByteBuffer requestBuffer = ByteBuffer.wrap(clientToken);
RequestHeader header = RequestHeader.parse(requestBuffer);
ApiKeys apiKey = header.apiKey();
short version = header.apiVersion();
RequestContext requestContext = new RequestContext(header, connectionId, clientAddress(),
KafkaPrincipal.ANONYMOUS, listenerName, securityProtocol, ClientInformation.EMPTY, false);
RequestAndSize requestAndSize = requestContext.parseRequest(requestBuffer);
if (apiKey != ApiKeys.SASL_AUTHENTICATE) {
IllegalSaslStateException e = new IllegalSaslStateException("Unexpected Kafka request of type " + apiKey + " during SASL authentication.");
buildResponseOnAuthenticateFailure(requestContext, requestAndSize.request.getErrorResponse(e));
throw e;
}
if (!apiKey.isVersionSupported(version)) {
// We cannot create an error response if the request version of SaslAuthenticate is not supported
// This should not normally occur since clients typically check supported versions using ApiVersionsRequest
throw new UnsupportedVersionException("Version " + version + " is not supported for apiKey " + apiKey);
}
/*
* The client sends multiple SASL_AUTHENTICATE requests, and the client is known
* to support the required version if any one of them indicates it supports that
* version.
*/
if (!reauthInfo.connectedClientSupportsReauthentication)
reauthInfo.connectedClientSupportsReauthentication = version > 0;
SaslAuthenticateRequest saslAuthenticateRequest = (SaslAuthenticateRequest) requestAndSize.request;
try {
byte[] responseToken = saslServer.evaluateResponse(
Utils.copyArray(saslAuthenticateRequest.data().authBytes()));
if (reauthInfo.reauthenticating() && saslServer.isComplete())
reauthInfo.ensurePrincipalUnchanged(principal());
// For versions with SASL_AUTHENTICATE header, send a response to SASL_AUTHENTICATE request even if token is empty.
byte[] responseBytes = responseToken == null ? new byte[0] : responseToken;
long sessionLifetimeMs = !saslServer.isComplete() ? 0L
: reauthInfo.calcCompletionTimesAndReturnSessionLifetimeMs();
sendKafkaResponse(requestContext, new SaslAuthenticateResponse(
new SaslAuthenticateResponseData()
.setErrorCode(Errors.NONE.code())
.setAuthBytes(responseBytes)
.setSessionLifetimeMs(sessionLifetimeMs)));
} catch (SaslAuthenticationException e) {
buildResponseOnAuthenticateFailure(requestContext,
new SaslAuthenticateResponse(
new SaslAuthenticateResponseData()
.setErrorCode(Errors.SASL_AUTHENTICATION_FAILED.code())
.setErrorMessage(e.getMessage())));
throw e;
} catch (SaslException e) {
KerberosError kerberosError = KerberosError.fromException(e);
if (kerberosError != null && kerberosError.retriable()) {
// Handle retriable Kerberos exceptions as I/O exceptions rather than authentication exceptions
throw e;
} else {
// DO NOT include error message from the `SaslException` in the client response since it may
// contain sensitive data like the existence of the user.
String errorMessage = "Authentication failed during "
+ reauthInfo.authenticationOrReauthenticationText()
+ " due to invalid credentials with SASL mechanism " + saslMechanism;
buildResponseOnAuthenticateFailure(requestContext, new SaslAuthenticateResponse(
new SaslAuthenticateResponseData()
.setErrorCode(Errors.SASL_AUTHENTICATION_FAILED.code())
.setErrorMessage(errorMessage)));
throw new SaslAuthenticationException(errorMessage, e);
}
}
}
}
private boolean handleKafkaRequest(byte[] requestBytes) throws IOException, AuthenticationException {
boolean isKafkaRequest = false;
String clientMechanism = null;
try {
ByteBuffer requestBuffer = ByteBuffer.wrap(requestBytes);
RequestHeader header = RequestHeader.parse(requestBuffer);
ApiKeys apiKey = header.apiKey();
// A valid Kafka request header was received. SASL authentication tokens are now expected only
// following a SaslHandshakeRequest since this is not a GSSAPI client token from a Kafka 0.9.0.x client.
if (saslState == SaslState.INITIAL_REQUEST)
setSaslState(SaslState.HANDSHAKE_OR_VERSIONS_REQUEST);
isKafkaRequest = true;
// Raise an error prior to parsing if the api cannot be handled at this layer. This avoids
// unnecessary exposure to some of the more complex schema types.
if (apiKey != ApiKeys.API_VERSIONS && apiKey != ApiKeys.SASL_HANDSHAKE)
throw new IllegalSaslStateException("Unexpected Kafka request of type " + apiKey + " during SASL handshake.");
LOG.debug("Handling Kafka request {} during {}", apiKey, reauthInfo.authenticationOrReauthenticationText());
RequestContext requestContext = new RequestContext(header, connectionId, clientAddress(),
KafkaPrincipal.ANONYMOUS, listenerName, securityProtocol, ClientInformation.EMPTY, false);
RequestAndSize requestAndSize = requestContext.parseRequest(requestBuffer);
if (apiKey == ApiKeys.API_VERSIONS)
handleApiVersionsRequest(requestContext, (ApiVersionsRequest) requestAndSize.request);
else
clientMechanism = handleHandshakeRequest(requestContext, (SaslHandshakeRequest) requestAndSize.request);
} catch (InvalidRequestException e) {
if (saslState == SaslState.INITIAL_REQUEST) {
// InvalidRequestException is thrown if the request is not in Kafka format or if the API key
// is invalid. For compatibility with 0.9.0.x where the first packet is a GSSAPI token
// starting with 0x60, revert to GSSAPI for both these exceptions.
if (LOG.isDebugEnabled()) {
StringBuilder tokenBuilder = new StringBuilder();
for (byte b : requestBytes) {
tokenBuilder.append(String.format("%02x", b));
if (tokenBuilder.length() >= 20)
break;
}
LOG.debug("Received client packet of length {} starting with bytes 0x{}, process as GSSAPI packet", requestBytes.length, tokenBuilder);
}
if (enabledMechanisms.contains(SaslConfigs.GSSAPI_MECHANISM)) {
LOG.debug("First client packet is not a SASL mechanism request, using default mechanism GSSAPI");
clientMechanism = SaslConfigs.GSSAPI_MECHANISM;
} else
throw new UnsupportedSaslMechanismException("Exception handling first SASL packet from client, GSSAPI is not supported by server", e);
} else
throw e;
}
if (clientMechanism != null && (!reauthInfo.reauthenticating()
|| reauthInfo.saslMechanismUnchanged(clientMechanism))) {
createSaslServer(clientMechanism);
setSaslState(SaslState.AUTHENTICATE);
}
return isKafkaRequest;
}
private String handleHandshakeRequest(RequestContext context, SaslHandshakeRequest handshakeRequest) throws IOException, UnsupportedSaslMechanismException {
String clientMechanism = handshakeRequest.data().mechanism();
short version = context.header.apiVersion();
if (version >= 1)
this.enableKafkaSaslAuthenticateHeaders(true);
if (enabledMechanisms.contains(clientMechanism)) {
LOG.debug("Using SASL mechanism '{}' provided by client", clientMechanism);
sendKafkaResponse(context, new SaslHandshakeResponse(
new SaslHandshakeResponseData().setErrorCode(Errors.NONE.code()).setMechanisms(enabledMechanisms)));
return clientMechanism;
} else {
LOG.debug("SASL mechanism '{}' requested by client is not supported", clientMechanism);
buildResponseOnAuthenticateFailure(context, new SaslHandshakeResponse(
new SaslHandshakeResponseData().setErrorCode(Errors.UNSUPPORTED_SASL_MECHANISM.code()).setMechanisms(enabledMechanisms)));
throw new UnsupportedSaslMechanismException("Unsupported SASL mechanism " + clientMechanism);
}
}
// Visible to override for testing
protected void enableKafkaSaslAuthenticateHeaders(boolean flag) {
this.enableKafkaSaslAuthenticateHeaders = flag;
}
private void handleApiVersionsRequest(RequestContext context, ApiVersionsRequest apiVersionsRequest) throws IOException {
if (saslState != SaslState.HANDSHAKE_OR_VERSIONS_REQUEST)
throw new IllegalStateException("Unexpected ApiVersions request received during SASL authentication state " + saslState);
if (apiVersionsRequest.hasUnsupportedRequestVersion())
sendKafkaResponse(context, apiVersionsRequest.getErrorResponse(0, Errors.UNSUPPORTED_VERSION.exception()));
else if (!apiVersionsRequest.isValid())
sendKafkaResponse(context, apiVersionsRequest.getErrorResponse(0, Errors.INVALID_REQUEST.exception()));
else {
metadataRegistry.registerClientInformation(new ClientInformation(apiVersionsRequest.data().clientSoftwareName(),
apiVersionsRequest.data().clientSoftwareVersion()));
sendKafkaResponse(context, apiVersionSupplier.get());
setSaslState(SaslState.HANDSHAKE_REQUEST);
}
}
/**
* Build a {@link Send} response on {@link #authenticate()} failure. The actual response is sent out when
* {@link #sendAuthenticationFailureResponse()} is called.
*/
private void buildResponseOnAuthenticateFailure(RequestContext context, AbstractResponse response) {
authenticationFailureSend = context.buildResponseSend(response);
}
/**
* Send any authentication failure response that may have been previously built.
*/
private void sendAuthenticationFailureResponse() throws IOException {
if (authenticationFailureSend == null)
return;
sendKafkaResponse(authenticationFailureSend);
authenticationFailureSend = null;
}
private void sendKafkaResponse(RequestContext context, AbstractResponse response) throws IOException {
sendKafkaResponse(context.buildResponseSend(response));
}
private void sendKafkaResponse(Send send) throws IOException {
netOutBuffer = send;
flushNetOutBufferAndUpdateInterestOps();
}
/**
* Information related to re-authentication
*/
private class ReauthInfo {
public String previousSaslMechanism;
public KafkaPrincipal previousKafkaPrincipal;
public long reauthenticationBeginNanos;
public Long sessionExpirationTimeNanos;
public boolean connectedClientSupportsReauthentication;
public long authenticationEndNanos;
public String badMechanismErrorMessage;
public void reauthenticating(String previousSaslMechanism, KafkaPrincipal previousKafkaPrincipal,
long reauthenticationBeginNanos) {
this.previousSaslMechanism = Objects.requireNonNull(previousSaslMechanism);
this.previousKafkaPrincipal = Objects.requireNonNull(previousKafkaPrincipal);
this.reauthenticationBeginNanos = reauthenticationBeginNanos;
}
public boolean reauthenticating() {
return previousSaslMechanism != null;
}
public String authenticationOrReauthenticationText() {
return reauthenticating() ? "re-authentication" : "authentication";
}
public void ensurePrincipalUnchanged(KafkaPrincipal reauthenticatedKafkaPrincipal) throws SaslAuthenticationException {
if (!previousKafkaPrincipal.equals(reauthenticatedKafkaPrincipal)) {
throw new SaslAuthenticationException(String.format(
"Cannot change principals during re-authentication from %s.%s: %s.%s",
previousKafkaPrincipal.getPrincipalType(), previousKafkaPrincipal.getName(),
reauthenticatedKafkaPrincipal.getPrincipalType(), reauthenticatedKafkaPrincipal.getName()));
}
}
/*
* We define the REAUTH_BAD_MECHANISM state because the failed re-authentication
* metric does not get updated if we send back an error immediately upon the
* start of re-authentication.
*/
public boolean saslMechanismUnchanged(String clientMechanism) {
if (previousSaslMechanism.equals(clientMechanism))
return true;
badMechanismErrorMessage = String.format(
"SASL mechanism '%s' requested by client is not supported for re-authentication of mechanism '%s'",
clientMechanism, previousSaslMechanism);
LOG.debug(badMechanismErrorMessage);
setSaslState(SaslState.REAUTH_BAD_MECHANISM);
return false;
}
private long calcCompletionTimesAndReturnSessionLifetimeMs() {
long retvalSessionLifetimeMs = 0L;
long authenticationEndMs = time.milliseconds();
authenticationEndNanos = time.nanoseconds();
Long credentialExpirationMs = (Long) saslServer
.getNegotiatedProperty(SaslInternalConfigs.CREDENTIAL_LIFETIME_MS_SASL_NEGOTIATED_PROPERTY_KEY);
Long connectionsMaxReauthMs = connectionsMaxReauthMsByMechanism.get(saslMechanism);
boolean maxReauthSet = connectionsMaxReauthMs != null && connectionsMaxReauthMs > 0;
if (credentialExpirationMs != null || maxReauthSet) {
if (credentialExpirationMs == null)
retvalSessionLifetimeMs = zeroIfNegative(connectionsMaxReauthMs);
else if (!maxReauthSet)
retvalSessionLifetimeMs = zeroIfNegative(credentialExpirationMs - authenticationEndMs);
else
retvalSessionLifetimeMs = zeroIfNegative(Math.min(credentialExpirationMs - authenticationEndMs, connectionsMaxReauthMs));
sessionExpirationTimeNanos = authenticationEndNanos + 1000 * 1000 * retvalSessionLifetimeMs;
}
if (credentialExpirationMs != null) {
LOG.debug(
"Authentication complete; session max lifetime from broker config={} ms, credential expiration={} ({} ms); session expiration = {} ({} ms), sending {} ms to client",
connectionsMaxReauthMs, new Date(credentialExpirationMs),
credentialExpirationMs - authenticationEndMs,
new Date(authenticationEndMs + retvalSessionLifetimeMs), retvalSessionLifetimeMs,
retvalSessionLifetimeMs);
} else {
if (sessionExpirationTimeNanos != null)
LOG.debug(
"Authentication complete; session max lifetime from broker config={} ms, no credential expiration; session expiration = {} ({} ms), sending {} ms to client",
connectionsMaxReauthMs, new Date(authenticationEndMs + retvalSessionLifetimeMs),
retvalSessionLifetimeMs, retvalSessionLifetimeMs);
else
LOG.debug(
"Authentication complete; session max lifetime from broker config={} ms, no credential expiration; no session expiration, sending 0 ms to client",
connectionsMaxReauthMs);
}
return retvalSessionLifetimeMs;
}
public Long reauthenticationLatencyMs() {
if (!reauthenticating())
return null;
// record at least 1 ms if there is some latency
long latencyNanos = authenticationEndNanos - reauthenticationBeginNanos;
return latencyNanos == 0L ? 0L : Math.max(1L, Math.round(latencyNanos / 1000.0 / 1000.0));
}
private long zeroIfNegative(long value) {
return Math.max(0L, value);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/SaslServerCallbackHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.authenticator;
import java.util.List;
import java.util.Map;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.sasl.AuthorizeCallback;
import javax.security.sasl.RealmCallback;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Default callback handler for Sasl servers. The callbacks required for all the SASL
* mechanisms enabled in the server should be supported by this callback handler. See
* <a href="https://docs.oracle.com/javase/8/docs/technotes/guides/security/sasl/sasl-refguide.html">Java SASL API</a>
* for the list of SASL callback handlers required for each SASL mechanism.
*/
public class SaslServerCallbackHandler implements AuthenticateCallbackHandler {
private static final Logger LOG = LoggerFactory.getLogger(SaslServerCallbackHandler.class);
private String mechanism;
@Override
public void configure(Map<String, ?> configs, String mechanism, List<AppConfigurationEntry> jaasConfigEntries) {
this.mechanism = mechanism;
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof RealmCallback)
handleRealmCallback((RealmCallback) callback);
else if (callback instanceof AuthorizeCallback && mechanism.equals(SaslConfigs.GSSAPI_MECHANISM))
handleAuthorizeCallback((AuthorizeCallback) callback);
else
throw new UnsupportedCallbackException(callback);
}
}
private void handleRealmCallback(RealmCallback rc) {
LOG.trace("Client supplied realm: {} ", rc.getDefaultText());
rc.setText(rc.getDefaultText());
}
private void handleAuthorizeCallback(AuthorizeCallback ac) {
String authenticationID = ac.getAuthenticationID();
String authorizationID = ac.getAuthorizationID();
LOG.info("Successfully authenticated client: authenticationID={}; authorizationID={}.",
authenticationID, authorizationID);
ac.setAuthorized(true);
ac.setAuthorizedID(authenticationID);
}
@Override
public void close() {
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/authenticator/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides some authentication mechanisms for securing Kafka clusters.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.security.authenticator; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/kerberos/BadFormatString.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.kerberos;
import java.io.IOException;
public class BadFormatString extends IOException {
BadFormatString(String msg) {
super(msg);
}
BadFormatString(String msg, Throwable err) {
super(msg, err);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/kerberos/KerberosClientCallbackHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.kerberos;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.NameCallback;
import javax.security.auth.callback.PasswordCallback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.sasl.AuthorizeCallback;
import javax.security.sasl.RealmCallback;
import java.util.List;
import java.util.Map;
/**
* Callback handler for SASL/GSSAPI clients.
*/
public class KerberosClientCallbackHandler implements AuthenticateCallbackHandler {
@Override
public void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries) {
if (!saslMechanism.equals(SaslConfigs.GSSAPI_MECHANISM))
throw new IllegalStateException("Kerberos callback handler should only be used with GSSAPI");
}
@Override
public void handle(Callback[] callbacks) throws UnsupportedCallbackException {
for (Callback callback : callbacks) {
if (callback instanceof NameCallback) {
NameCallback nc = (NameCallback) callback;
nc.setName(nc.getDefaultName());
} else if (callback instanceof PasswordCallback) {
String errorMessage = "Could not login: the client is being asked for a password, but the Kafka" +
" client code does not currently support obtaining a password from the user.";
errorMessage += " Make sure -Djava.security.auth.login.config property passed to JVM and" +
" the client is configured to use a ticket cache (using" +
" the JAAS configuration setting 'useTicketCache=true)'. Make sure you are using" +
" FQDN of the Kafka broker you are trying to connect to.";
throw new UnsupportedCallbackException(callback, errorMessage);
} else if (callback instanceof RealmCallback) {
RealmCallback rc = (RealmCallback) callback;
rc.setText(rc.getDefaultText());
} else if (callback instanceof AuthorizeCallback) {
AuthorizeCallback ac = (AuthorizeCallback) callback;
String authId = ac.getAuthenticationID();
String authzId = ac.getAuthorizationID();
ac.setAuthorized(authId.equals(authzId));
if (ac.isAuthorized())
ac.setAuthorizedID(authzId);
} else {
throw new UnsupportedCallbackException(callback, "Unrecognized SASL ClientCallback");
}
}
}
@Override
public void close() {
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/kerberos/KerberosError.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.kerberos;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.security.authenticator.SaslClientAuthenticator;
import org.apache.kafka.common.utils.Java;
import org.ietf.jgss.GSSException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.security.sasl.SaslClient;
import java.lang.reflect.Method;
/**
* Kerberos exceptions that may require special handling. The standard Kerberos error codes
* for these errors are retrieved using KrbException#errorCode() from the underlying Kerberos
* exception thrown during {@link SaslClient#evaluateChallenge(byte[])}.
*/
public enum KerberosError {
// (Mechanism level: Server not found in Kerberos database (7) - UNKNOWN_SERVER)
// This is retriable, but included here to add extra logging for this case.
SERVER_NOT_FOUND(7, false),
// (Mechanism level: Client not yet valid - try again later (21))
CLIENT_NOT_YET_VALID(21, true),
// (Mechanism level: Ticket not yet valid (33) - Ticket not yet valid)])
// This could be a small timing window.
TICKET_NOT_YET_VALID(33, true),
// (Mechanism level: Request is a replay (34) - Request is a replay)
// Replay detection used to prevent DoS attacks can result in false positives, so retry on error.
REPLAY(34, true);
private static final Logger log = LoggerFactory.getLogger(SaslClientAuthenticator.class);
private static final Class<?> KRB_EXCEPTION_CLASS;
private static final Method KRB_EXCEPTION_RETURN_CODE_METHOD;
static {
try {
// different IBM JDKs versions include different security implementations
if (Java.isIbmJdk() && canLoad("com.ibm.security.krb5.KrbException")) {
KRB_EXCEPTION_CLASS = Class.forName("com.ibm.security.krb5.KrbException");
} else if (Java.isIbmJdk() && canLoad("com.ibm.security.krb5.internal.KrbException")) {
KRB_EXCEPTION_CLASS = Class.forName("com.ibm.security.krb5.internal.KrbException");
} else {
KRB_EXCEPTION_CLASS = Class.forName("sun.security.krb5.KrbException");
}
KRB_EXCEPTION_RETURN_CODE_METHOD = KRB_EXCEPTION_CLASS.getMethod("returnCode");
} catch (Exception e) {
throw new KafkaException("Kerberos exceptions could not be initialized", e);
}
}
private static boolean canLoad(String clazz) {
try {
Class.forName(clazz);
return true;
} catch (Exception e) {
return false;
}
}
private final int errorCode;
private final boolean retriable;
KerberosError(int errorCode, boolean retriable) {
this.errorCode = errorCode;
this.retriable = retriable;
}
public boolean retriable() {
return retriable;
}
public static KerberosError fromException(Exception exception) {
Throwable cause = exception.getCause();
while (cause != null && !KRB_EXCEPTION_CLASS.isInstance(cause)) {
cause = cause.getCause();
}
if (cause == null)
return null;
else {
try {
Integer errorCode = (Integer) KRB_EXCEPTION_RETURN_CODE_METHOD.invoke(cause);
return fromErrorCode(errorCode);
} catch (Exception e) {
log.trace("Kerberos return code could not be determined from {} due to {}", exception, e);
return null;
}
}
}
private static KerberosError fromErrorCode(int errorCode) {
for (KerberosError error : values()) {
if (error.errorCode == errorCode)
return error;
}
return null;
}
/**
* Returns true if the exception should be handled as a transient failure on clients.
* We handle GSSException.NO_CRED as retriable on the client-side since this may
* occur during re-login if a clients attempts to authentication after logout, but
* before the subsequent login.
*/
public static boolean isRetriableClientGssException(Exception exception) {
Throwable cause = exception.getCause();
while (cause != null && !(cause instanceof GSSException)) {
cause = cause.getCause();
}
if (cause != null) {
GSSException gssException = (GSSException) cause;
return gssException.getMajor() == GSSException.NO_CRED;
}
return false;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/kerberos/KerberosLogin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.kerberos;
import javax.security.auth.kerberos.KerberosPrincipal;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import javax.security.auth.kerberos.KerberosTicket;
import javax.security.auth.Subject;
import org.apache.kafka.common.security.JaasContext;
import org.apache.kafka.common.security.JaasUtils;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.authenticator.AbstractLogin;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.utils.KafkaThread;
import org.apache.kafka.common.utils.Shell;
import org.apache.kafka.common.utils.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
/**
* This class is responsible for refreshing Kerberos credentials for
* logins for both Kafka client and server.
*/
public class KerberosLogin extends AbstractLogin {
private static final Logger log = LoggerFactory.getLogger(KerberosLogin.class);
private static final Random RNG = new Random();
private final Time time = Time.SYSTEM;
private Thread t;
private boolean isKrbTicket;
private boolean isUsingTicketCache;
private String principal;
// LoginThread will sleep until 80% of time from last refresh to
// ticket's expiry has been reached, at which time it will wake
// and try to renew the ticket.
private double ticketRenewWindowFactor;
/**
* Percentage of random jitter added to the renewal time
*/
private double ticketRenewJitter;
// Regardless of ticketRenewWindowFactor setting above and the ticket expiry time,
// thread will not sleep between refresh attempts any less than 1 minute (60*1000 milliseconds = 1 minute).
// Change the '1' to e.g. 5, to change this to 5 minutes.
private long minTimeBeforeRelogin;
private String kinitCmd;
private volatile Subject subject;
private LoginContext loginContext;
private String serviceName;
private long lastLogin;
@Override
public void configure(Map<String, ?> configs, String contextName, Configuration configuration,
AuthenticateCallbackHandler callbackHandler) {
super.configure(configs, contextName, configuration, callbackHandler);
this.ticketRenewWindowFactor = (Double) configs.get(SaslConfigs.SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR);
this.ticketRenewJitter = (Double) configs.get(SaslConfigs.SASL_KERBEROS_TICKET_RENEW_JITTER);
this.minTimeBeforeRelogin = (Long) configs.get(SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN);
this.kinitCmd = (String) configs.get(SaslConfigs.SASL_KERBEROS_KINIT_CMD);
this.serviceName = getServiceName(configs, contextName, configuration);
}
/**
* Performs login for each login module specified for the login context of this instance and starts the thread used
* to periodically re-login to the Kerberos Ticket Granting Server.
*/
@Override
public LoginContext login() throws LoginException {
this.lastLogin = currentElapsedTime();
loginContext = super.login();
subject = loginContext.getSubject();
isKrbTicket = !subject.getPrivateCredentials(KerberosTicket.class).isEmpty();
AppConfigurationEntry[] entries = configuration().getAppConfigurationEntry(contextName());
if (entries.length == 0) {
isUsingTicketCache = false;
principal = null;
} else {
// there will only be a single entry
AppConfigurationEntry entry = entries[0];
if (entry.getOptions().get("useTicketCache") != null) {
String val = (String) entry.getOptions().get("useTicketCache");
isUsingTicketCache = val.equals("true");
} else
isUsingTicketCache = false;
if (entry.getOptions().get("principal") != null)
principal = (String) entry.getOptions().get("principal");
else
principal = null;
}
if (!isKrbTicket) {
log.debug("[Principal={}]: It is not a Kerberos ticket", principal);
t = null;
// if no TGT, do not bother with ticket management.
return loginContext;
}
log.debug("[Principal={}]: It is a Kerberos ticket", principal);
// Refresh the Ticket Granting Ticket (TGT) periodically. How often to refresh is determined by the
// TGT's existing expiry date and the configured minTimeBeforeRelogin. For testing and development,
// you can decrease the interval of expiration of tickets (for example, to 3 minutes) by running:
// "modprinc -maxlife 3mins <principal>" in kadmin.
t = KafkaThread.daemon(String.format("kafka-kerberos-refresh-thread-%s", principal), () -> {
log.info("[Principal={}]: TGT refresh thread started.", principal);
while (true) { // renewal thread's main loop. if it exits from here, thread will exit.
KerberosTicket tgt = getTGT();
long now = currentWallTime();
long nextRefresh;
Date nextRefreshDate;
if (tgt == null) {
nextRefresh = now + minTimeBeforeRelogin;
nextRefreshDate = new Date(nextRefresh);
log.warn("[Principal={}]: No TGT found: will try again at {}", principal, nextRefreshDate);
} else {
nextRefresh = getRefreshTime(tgt);
long expiry = tgt.getEndTime().getTime();
Date expiryDate = new Date(expiry);
if (isUsingTicketCache && tgt.getRenewTill() != null && tgt.getRenewTill().getTime() < expiry) {
log.warn("The TGT cannot be renewed beyond the next expiry date: {}." +
"This process will not be able to authenticate new SASL connections after that " +
"time (for example, it will not be able to authenticate a new connection with a Kafka " +
"Broker). Ask your system administrator to either increase the " +
"'renew until' time by doing : 'modprinc -maxrenewlife {} ' within " +
"kadmin, or instead, to generate a keytab for {}. Because the TGT's " +
"expiry cannot be further extended by refreshing, exiting refresh thread now.",
expiryDate, principal, principal);
return;
}
// determine how long to sleep from looking at ticket's expiry.
// We should not allow the ticket to expire, but we should take into consideration
// minTimeBeforeRelogin. Will not sleep less than minTimeBeforeRelogin, unless doing so
// would cause ticket expiration.
if ((nextRefresh > expiry) || (minTimeBeforeRelogin > expiry - now)) {
// expiry is before next scheduled refresh).
log.info("[Principal={}]: Refreshing now because expiry is before next scheduled refresh time.", principal);
nextRefresh = now;
} else {
if (nextRefresh - now < minTimeBeforeRelogin) {
// next scheduled refresh is sooner than (now + MIN_TIME_BEFORE_LOGIN).
Date until = new Date(nextRefresh);
Date newUntil = new Date(now + minTimeBeforeRelogin);
log.warn("[Principal={}]: TGT refresh thread time adjusted from {} to {} since the former is sooner " +
"than the minimum refresh interval ({} seconds) from now.",
principal, until, newUntil, minTimeBeforeRelogin / 1000);
}
nextRefresh = Math.max(nextRefresh, now + minTimeBeforeRelogin);
}
nextRefreshDate = new Date(nextRefresh);
if (nextRefresh > expiry) {
log.error("[Principal={}]: Next refresh: {} is later than expiry {}. This may indicate a clock skew problem." +
"Check that this host and the KDC hosts' clocks are in sync. Exiting refresh thread.",
principal, nextRefreshDate, expiryDate);
return;
}
}
if (now < nextRefresh) {
Date until = new Date(nextRefresh);
log.info("[Principal={}]: TGT refresh sleeping until: {}", principal, until);
try {
Thread.sleep(nextRefresh - now);
} catch (InterruptedException ie) {
log.warn("[Principal={}]: TGT renewal thread has been interrupted and will exit.", principal);
return;
}
} else {
log.error("[Principal={}]: NextRefresh: {} is in the past: exiting refresh thread. Check"
+ " clock sync between this host and KDC - (KDC's clock is likely ahead of this host)."
+ " Manual intervention will be required for this client to successfully authenticate."
+ " Exiting refresh thread.", principal, nextRefreshDate);
return;
}
if (isUsingTicketCache) {
String kinitArgs = "-R";
int retry = 1;
while (retry >= 0) {
try {
log.debug("[Principal={}]: Running ticket cache refresh command: {} {}", principal, kinitCmd, kinitArgs);
Shell.execCommand(kinitCmd, kinitArgs);
break;
} catch (Exception e) {
if (retry > 0) {
log.warn("[Principal={}]: Error when trying to renew with TicketCache, but will retry ", principal, e);
--retry;
// sleep for 10 seconds
try {
Thread.sleep(10 * 1000);
} catch (InterruptedException ie) {
log.error("[Principal={}]: Interrupted while renewing TGT, exiting Login thread", principal);
return;
}
} else {
log.warn("[Principal={}]: Could not renew TGT due to problem running shell command: '{} {}'. " +
"Exiting refresh thread.", principal, kinitCmd, kinitArgs, e);
return;
}
}
}
}
try {
int retry = 1;
while (retry >= 0) {
try {
reLogin();
break;
} catch (LoginException le) {
if (retry > 0) {
log.warn("[Principal={}]: Error when trying to re-Login, but will retry ", principal, le);
--retry;
// sleep for 10 seconds.
try {
Thread.sleep(10 * 1000);
} catch (InterruptedException e) {
log.error("[Principal={}]: Interrupted during login retry after LoginException:", principal, le);
throw le;
}
} else {
log.error("[Principal={}]: Could not refresh TGT.", principal, le);
}
}
}
} catch (LoginException le) {
log.error("[Principal={}]: Failed to refresh TGT: refresh thread exiting now.", principal, le);
return;
}
}
});
t.start();
return loginContext;
}
@Override
public void close() {
if ((t != null) && (t.isAlive())) {
t.interrupt();
try {
t.join();
} catch (InterruptedException e) {
log.warn("[Principal={}]: Error while waiting for Login thread to shutdown.", principal, e);
Thread.currentThread().interrupt();
}
}
}
@Override
public Subject subject() {
return subject;
}
@Override
public String serviceName() {
return serviceName;
}
private static String getServiceName(Map<String, ?> configs, String contextName, Configuration configuration) {
List<AppConfigurationEntry> configEntries = Arrays.asList(configuration.getAppConfigurationEntry(contextName));
String jaasServiceName = JaasContext.configEntryOption(configEntries, JaasUtils.SERVICE_NAME, null);
String configServiceName = (String) configs.get(SaslConfigs.SASL_KERBEROS_SERVICE_NAME);
if (jaasServiceName != null && configServiceName != null && !jaasServiceName.equals(configServiceName)) {
String message = String.format("Conflicting serviceName values found in JAAS and Kafka configs " +
"value in JAAS file %s, value in Kafka config %s", jaasServiceName, configServiceName);
throw new IllegalArgumentException(message);
}
if (jaasServiceName != null)
return jaasServiceName;
if (configServiceName != null)
return configServiceName;
throw new IllegalArgumentException("No serviceName defined in either JAAS or Kafka config");
}
private long getRefreshTime(KerberosTicket tgt) {
long start = tgt.getStartTime().getTime();
long expires = tgt.getEndTime().getTime();
log.info("[Principal={}]: TGT valid starting at: {}", principal, tgt.getStartTime());
log.info("[Principal={}]: TGT expires: {}", principal, tgt.getEndTime());
long proposedRefresh = start + (long) ((expires - start) *
(ticketRenewWindowFactor + (ticketRenewJitter * RNG.nextDouble())));
if (proposedRefresh > expires)
// proposedRefresh is too far in the future: it's after ticket expires: simply return now.
return currentWallTime();
else
return proposedRefresh;
}
private KerberosTicket getTGT() {
Set<KerberosTicket> tickets = subject.getPrivateCredentials(KerberosTicket.class);
for (KerberosTicket ticket : tickets) {
KerberosPrincipal server = ticket.getServer();
if (server.getName().equals("krbtgt/" + server.getRealm() + "@" + server.getRealm())) {
log.debug("Found TGT with client principal '{}' and server principal '{}'.", ticket.getClient().getName(),
ticket.getServer().getName());
return ticket;
}
}
return null;
}
private boolean hasSufficientTimeElapsed() {
long now = currentElapsedTime();
if (now - lastLogin < minTimeBeforeRelogin) {
log.warn("[Principal={}]: Not attempting to re-login since the last re-login was attempted less than {} seconds before.",
principal, minTimeBeforeRelogin / 1000);
return false;
}
return true;
}
/**
* Re-login a principal. This method assumes that {@link #login()} has happened already.
* @throws javax.security.auth.login.LoginException on a failure
*/
protected void reLogin() throws LoginException {
if (!isKrbTicket) {
return;
}
if (loginContext == null) {
throw new LoginException("Login must be done first");
}
if (!hasSufficientTimeElapsed()) {
return;
}
synchronized (KerberosLogin.class) {
log.info("Initiating logout for {}", principal);
// register most recent relogin attempt
lastLogin = currentElapsedTime();
//clear up the kerberos state. But the tokens are not cleared! As per
//the Java kerberos login module code, only the kerberos credentials
//are cleared. If previous logout succeeded but login failed, we shouldn't
//logout again since duplicate logout causes NPE from Java 9 onwards.
if (subject != null && !subject.getPrincipals().isEmpty()) {
logout();
}
//login and also update the subject field of this instance to
//have the new credentials (pass it to the LoginContext constructor)
loginContext = new LoginContext(contextName(), subject, null, configuration());
log.info("Initiating re-login for {}", principal);
login(loginContext);
}
}
// Visibility to override for testing
protected void login(LoginContext loginContext) throws LoginException {
loginContext.login();
}
// Visibility to override for testing
protected void logout() throws LoginException {
loginContext.logout();
}
private long currentElapsedTime() {
return time.hiResClockMs();
}
private long currentWallTime() {
return time.milliseconds();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/kerberos/KerberosName.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.kerberos;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class KerberosName {
/**
* A pattern that matches a Kerberos name with at most 3 components.
*/
private static final Pattern NAME_PARSER = Pattern.compile("([^/@]*)(/([^/@]*))?@([^/@]*)");
/** The first component of the name */
private final String serviceName;
/** The second component of the name. It may be null. */
private final String hostName;
/** The realm of the name. */
private final String realm;
/**
* Creates an instance of `KerberosName` with the provided parameters.
*/
public KerberosName(String serviceName, String hostName, String realm) {
if (serviceName == null)
throw new IllegalArgumentException("serviceName must not be null");
this.serviceName = serviceName;
this.hostName = hostName;
this.realm = realm;
}
/**
* Create a name from the full Kerberos principal name.
*/
public static KerberosName parse(String principalName) {
Matcher match = NAME_PARSER.matcher(principalName);
if (!match.matches()) {
if (principalName.contains("@")) {
throw new IllegalArgumentException("Malformed Kerberos name: " + principalName);
} else {
return new KerberosName(principalName, null, null);
}
} else {
return new KerberosName(match.group(1), match.group(3), match.group(4));
}
}
/**
* Put the name back together from the parts.
*/
@Override
public String toString() {
StringBuilder result = new StringBuilder();
result.append(serviceName);
if (hostName != null) {
result.append('/');
result.append(hostName);
}
if (realm != null) {
result.append('@');
result.append(realm);
}
return result.toString();
}
/**
* Get the first component of the name.
* @return the first section of the Kerberos principal name
*/
public String serviceName() {
return serviceName;
}
/**
* Get the second component of the name.
* @return the second section of the Kerberos principal name, and may be null
*/
public String hostName() {
return hostName;
}
/**
* Get the realm of the name.
* @return the realm of the name, may be null
*/
public String realm() {
return realm;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/kerberos/KerberosRule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.kerberos;
import java.io.IOException;
import java.util.Locale;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* An encoding of a rule for translating kerberos names.
*/
class KerberosRule {
/**
* A pattern that matches a string without '$' and then a single
* parameter with $n.
*/
private static final Pattern PARAMETER_PATTERN = Pattern.compile("([^$]*)(\\$(\\d*))?");
/**
* A pattern that recognizes simple/non-simple names.
*/
private static final Pattern NON_SIMPLE_PATTERN = Pattern.compile("[/@]");
private final String defaultRealm;
private final boolean isDefault;
private final int numOfComponents;
private final String format;
private final Pattern match;
private final Pattern fromPattern;
private final String toPattern;
private final boolean repeat;
private final boolean toLowerCase;
private final boolean toUpperCase;
KerberosRule(String defaultRealm) {
this.defaultRealm = defaultRealm;
isDefault = true;
numOfComponents = 0;
format = null;
match = null;
fromPattern = null;
toPattern = null;
repeat = false;
toLowerCase = false;
toUpperCase = false;
}
KerberosRule(String defaultRealm, int numOfComponents, String format, String match, String fromPattern,
String toPattern, boolean repeat, boolean toLowerCase, boolean toUpperCase) {
this.defaultRealm = defaultRealm;
isDefault = false;
this.numOfComponents = numOfComponents;
this.format = format;
this.match = match == null ? null : Pattern.compile(match);
this.fromPattern =
fromPattern == null ? null : Pattern.compile(fromPattern);
this.toPattern = toPattern;
this.repeat = repeat;
this.toLowerCase = toLowerCase;
this.toUpperCase = toUpperCase;
}
@Override
public String toString() {
StringBuilder buf = new StringBuilder();
if (isDefault) {
buf.append("DEFAULT");
} else {
buf.append("RULE:[");
buf.append(numOfComponents);
buf.append(':');
buf.append(format);
buf.append(']');
if (match != null) {
buf.append('(');
buf.append(match);
buf.append(')');
}
if (fromPattern != null) {
buf.append("s/");
buf.append(fromPattern);
buf.append('/');
buf.append(toPattern);
buf.append('/');
if (repeat) {
buf.append('g');
}
}
if (toLowerCase) {
buf.append("/L");
}
if (toUpperCase) {
buf.append("/U");
}
}
return buf.toString();
}
/**
* Replace the numbered parameters of the form $n where n is from 0 to
* the length of params - 1. Normal text is copied directly and $n is replaced
* by the corresponding parameter.
* @param format the string to replace parameters again
* @param params the list of parameters
* @return the generated string with the parameter references replaced.
* @throws BadFormatString
*/
static String replaceParameters(String format,
String[] params) throws BadFormatString {
Matcher match = PARAMETER_PATTERN.matcher(format);
int start = 0;
StringBuilder result = new StringBuilder();
while (start < format.length() && match.find(start)) {
result.append(match.group(1));
String paramNum = match.group(3);
if (paramNum != null) {
try {
int num = Integer.parseInt(paramNum);
if (num < 0 || num >= params.length) {
throw new BadFormatString("index " + num + " from " + format +
" is outside of the valid range 0 to " +
(params.length - 1));
}
result.append(params[num]);
} catch (NumberFormatException nfe) {
throw new BadFormatString("bad format in username mapping in " +
paramNum, nfe);
}
}
start = match.end();
}
return result.toString();
}
/**
* Replace the matches of the from pattern in the base string with the value
* of the to string.
* @param base the string to transform
* @param from the pattern to look for in the base string
* @param to the string to replace matches of the pattern with
* @param repeat whether the substitution should be repeated
* @return
*/
static String replaceSubstitution(String base, Pattern from, String to,
boolean repeat) {
Matcher match = from.matcher(base);
if (repeat) {
return match.replaceAll(to);
} else {
return match.replaceFirst(to);
}
}
/**
* Try to apply this rule to the given name represented as a parameter
* array.
* @param params first element is the realm, second and later elements are
* are the components of the name "a/b@FOO" -> {"FOO", "a", "b"}
* @return the short name if this rule applies or null
* @throws IOException throws if something is wrong with the rules
*/
String apply(String[] params) throws IOException {
String result = null;
if (isDefault) {
if (defaultRealm.equals(params[0])) {
result = params[1];
}
} else if (params.length - 1 == numOfComponents) {
String base = replaceParameters(format, params);
if (match == null || match.matcher(base).matches()) {
if (fromPattern == null) {
result = base;
} else {
result = replaceSubstitution(base, fromPattern, toPattern, repeat);
}
}
}
if (result != null && NON_SIMPLE_PATTERN.matcher(result).find()) {
throw new NoMatchingRule("Non-simple name " + result + " after auth_to_local rule " + this);
}
if (toLowerCase && result != null) {
result = result.toLowerCase(Locale.ENGLISH);
} else if (toUpperCase && result != null) {
result = result.toUpperCase(Locale.ENGLISH);
}
return result;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/kerberos/KerberosShortNamer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.kerberos;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* This class implements parsing and handling of Kerberos principal names. In
* particular, it splits them apart and translates them down into local
* operating system names.
*/
public class KerberosShortNamer {
/**
* A pattern for parsing a auth_to_local rule.
*/
private static final Pattern RULE_PARSER = Pattern.compile("((DEFAULT)|((RULE:\\[(\\d*):([^\\]]*)](\\(([^)]*)\\))?(s/([^/]*)/([^/]*)/(g)?)?/?(L|U)?)))");
/* Rules for the translation of the principal name into an operating system name */
private final List<KerberosRule> principalToLocalRules;
public KerberosShortNamer(List<KerberosRule> principalToLocalRules) {
this.principalToLocalRules = principalToLocalRules;
}
public static KerberosShortNamer fromUnparsedRules(String defaultRealm, List<String> principalToLocalRules) {
List<String> rules = principalToLocalRules == null ? Collections.singletonList("DEFAULT") : principalToLocalRules;
return new KerberosShortNamer(parseRules(defaultRealm, rules));
}
private static List<KerberosRule> parseRules(String defaultRealm, List<String> rules) {
List<KerberosRule> result = new ArrayList<>();
for (String rule : rules) {
Matcher matcher = RULE_PARSER.matcher(rule);
if (!matcher.lookingAt()) {
throw new IllegalArgumentException("Invalid rule: " + rule);
}
if (rule.length() != matcher.end())
throw new IllegalArgumentException("Invalid rule: `" + rule + "`, unmatched substring: `" + rule.substring(matcher.end()) + "`");
if (matcher.group(2) != null) {
result.add(new KerberosRule(defaultRealm));
} else {
result.add(new KerberosRule(defaultRealm,
Integer.parseInt(matcher.group(5)),
matcher.group(6),
matcher.group(8),
matcher.group(10),
matcher.group(11),
"g".equals(matcher.group(12)),
"L".equals(matcher.group(13)),
"U".equals(matcher.group(13))));
}
}
return result;
}
/**
* Get the translation of the principal name into an operating system
* user name.
* @return the short name
* @throws IOException
*/
public String shortName(KerberosName kerberosName) throws IOException {
String[] params;
if (kerberosName.hostName() == null) {
// if it is already simple, just return it
if (kerberosName.realm() == null)
return kerberosName.serviceName();
params = new String[]{kerberosName.realm(), kerberosName.serviceName()};
} else {
params = new String[]{kerberosName.realm(), kerberosName.serviceName(), kerberosName.hostName()};
}
for (KerberosRule r : principalToLocalRules) {
String result = r.apply(params);
if (result != null)
return result;
}
throw new NoMatchingRule("No rules apply to " + kerberosName + ", rules " + principalToLocalRules);
}
@Override
public String toString() {
return "KerberosShortNamer(principalToLocalRules = " + principalToLocalRules + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/kerberos/NoMatchingRule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.kerberos;
import java.io.IOException;
public class NoMatchingRule extends IOException {
NoMatchingRule(String msg) {
super(msg);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/kerberos/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides adaptor for using Kerberos for securing Kafka clusters.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.security.kerberos; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/OAuthBearerExtensionsValidatorCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer;
import org.apache.kafka.common.security.auth.SaslExtensions;
import javax.security.auth.callback.Callback;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
import static org.apache.kafka.common.utils.CollectionUtils.subtractMap;
/**
* A {@code Callback} for use by the {@code SaslServer} implementation when it
* needs to validate the SASL extensions for the OAUTHBEARER mechanism
* Callback handlers should use the {@link #valid(String)}
* method to communicate valid extensions back to the SASL server.
* Callback handlers should use the
* {@link #error(String, String)} method to communicate validation errors back to
* the SASL Server.
* As per RFC-7628 (https://tools.ietf.org/html/rfc7628#section-3.1), unknown extensions must be ignored by the server.
* The callback handler implementation should simply ignore unknown extensions,
* not calling {@link #error(String, String)} nor {@link #valid(String)}.
* Callback handlers should communicate other problems by raising an {@code IOException}.
* <p>
* The OAuth bearer token is provided in the callback for better context in extension validation.
* It is very important that token validation is done in its own {@link OAuthBearerValidatorCallback}
* irregardless of provided extensions, as they are inherently insecure.
*/
public class OAuthBearerExtensionsValidatorCallback implements Callback {
private final OAuthBearerToken token;
private final SaslExtensions inputExtensions;
private final Map<String, String> validatedExtensions = new HashMap<>();
private final Map<String, String> invalidExtensions = new HashMap<>();
public OAuthBearerExtensionsValidatorCallback(OAuthBearerToken token, SaslExtensions extensions) {
this.token = Objects.requireNonNull(token);
this.inputExtensions = Objects.requireNonNull(extensions);
}
/**
* @return {@link OAuthBearerToken} the OAuth bearer token of the client
*/
public OAuthBearerToken token() {
return token;
}
/**
* @return {@link SaslExtensions} consisting of the unvalidated extension names and values that were sent by the client
*/
public SaslExtensions inputExtensions() {
return inputExtensions;
}
/**
* @return an unmodifiable {@link Map} consisting of the validated and recognized by the server extension names and values
*/
public Map<String, String> validatedExtensions() {
return Collections.unmodifiableMap(validatedExtensions);
}
/**
* @return An immutable {@link Map} consisting of the name->error messages of extensions which failed validation
*/
public Map<String, String> invalidExtensions() {
return Collections.unmodifiableMap(invalidExtensions);
}
/**
* @return An immutable {@link Map} consisting of the extensions that have neither been validated nor invalidated
*/
public Map<String, String> ignoredExtensions() {
return Collections.unmodifiableMap(subtractMap(subtractMap(inputExtensions.map(), invalidExtensions), validatedExtensions));
}
/**
* Validates a specific extension in the original {@code inputExtensions} map
* @param extensionName - the name of the extension which was validated
*/
public void valid(String extensionName) {
if (!inputExtensions.map().containsKey(extensionName))
throw new IllegalArgumentException(String.format("Extension %s was not found in the original extensions", extensionName));
validatedExtensions.put(extensionName, inputExtensions.map().get(extensionName));
}
/**
* Set the error value for a specific extension key-value pair if validation has failed
*
* @param invalidExtensionName
* the mandatory extension name which caused the validation failure
* @param errorMessage
* error message describing why the validation failed
*/
public void error(String invalidExtensionName, String errorMessage) {
if (Objects.requireNonNull(invalidExtensionName).isEmpty())
throw new IllegalArgumentException("extension name must not be empty");
this.invalidExtensions.put(invalidExtensionName, errorMessage);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer;
import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.login.AppConfigurationEntry;
import javax.security.sasl.SaslException;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.auth.SaslExtensions;
import org.apache.kafka.common.security.auth.SaslExtensionsCallback;
import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerClientInitialResponse;
import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenRetriever;
import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenRetrieverFactory;
import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator;
import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory;
import org.apache.kafka.common.security.oauthbearer.internals.secured.JaasOptionsUtils;
import org.apache.kafka.common.security.oauthbearer.internals.secured.ValidateException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
* <code>OAuthBearerLoginCallbackHandler</code> is an {@link AuthenticateCallbackHandler} that
* accepts {@link OAuthBearerTokenCallback} and {@link SaslExtensionsCallback} callbacks to
* perform the steps to request a JWT from an OAuth/OIDC provider using the
* <code>clientcredentials</code>. This grant type is commonly used for non-interactive
* "service accounts" where there is no user available to interactively supply credentials.
* </p>
*
* <p>
* The <code>OAuthBearerLoginCallbackHandler</code> is used on the client side to retrieve a JWT
* and the {@link OAuthBearerValidatorCallbackHandler} is used on the broker to validate the JWT
* that was sent to it by the client to allow access. Both the brokers and clients will need to
* be configured with their appropriate callback handlers and respective configuration for OAuth
* functionality to work.
* </p>
*
* <p>
* Note that while this callback handler class must be specified for a Kafka client that wants to
* use OAuth functionality, in the case of OAuth-based inter-broker communication, the callback
* handler must be used on the Kafka broker side as well.
* </p>
*
* <p>
* This {@link AuthenticateCallbackHandler} is enabled by specifying its class name in the Kafka
* configuration. For client use, specify the class name in the
* {@link org.apache.kafka.common.config.SaslConfigs#SASL_LOGIN_CALLBACK_HANDLER_CLASS}
* configuration like so:
*
* <code>
* sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
* </code>
* </p>
*
* <p>
* If using OAuth login on the broker side (for inter-broker communication), the callback handler
* class will be specified with a listener-based property:
* <code>listener.name.<listener name>.oauthbearer.sasl.login.callback.handler.class</code> like so:
*
* <code>
* listener.name.<listener name>.oauthbearer.sasl.login.callback.handler.class=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler
* </code>
* </p>
*
* <p>
* The Kafka configuration must also include JAAS configuration which includes the following
* OAuth-specific options:
*
* <ul>
* <li><code>clientId</code>OAuth client ID (required)</li>
* <li><code>clientSecret</code>OAuth client secret (required)</li>
* <li><code>scope</code>OAuth scope (optional)</li>
* </ul>
* </p>
*
* <p>
* The JAAS configuration can also include any SSL options that are needed. The configuration
* options are the same as those specified by the configuration in
* {@link org.apache.kafka.common.config.SslConfigs#addClientSslSupport(ConfigDef)}.
* </p>
*
* <p>
* Here's an example of the JAAS configuration for a Kafka client:
*
* <code>
* sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required \
* clientId="foo" \
* clientSecret="bar" \
* scope="baz" \
* ssl.protocol="SSL" ;
* </code>
* </p>
*
* <p>
* The configuration option
* {@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL}
* is also required in order for the client to contact the OAuth/OIDC provider. For example:
*
* <code>
* sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token
* </code>
*
* Please see the OAuth/OIDC providers documentation for the token endpoint URL.
* </p>
*
* <p>
* The following is a list of all the configuration options that are available for the login
* callback handler:
*
* <ul>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_LOGIN_CALLBACK_HANDLER_CLASS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_LOGIN_CONNECT_TIMEOUT_MS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_LOGIN_READ_TIMEOUT_MS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_LOGIN_RETRY_BACKOFF_MS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_LOGIN_RETRY_BACKOFF_MAX_MS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_JAAS_CONFIG}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_SCOPE_CLAIM_NAME}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_SUB_CLAIM_NAME}</li>
* </ul>
* </p>
*/
public class OAuthBearerLoginCallbackHandler implements AuthenticateCallbackHandler {
private static final Logger log = LoggerFactory.getLogger(OAuthBearerLoginCallbackHandler.class);
public static final String CLIENT_ID_CONFIG = "clientId";
public static final String CLIENT_SECRET_CONFIG = "clientSecret";
public static final String SCOPE_CONFIG = "scope";
public static final String CLIENT_ID_DOC = "The OAuth/OIDC identity provider-issued " +
"client ID to uniquely identify the service account to use for authentication for " +
"this client. The value must be paired with a corresponding " + CLIENT_SECRET_CONFIG + " " +
"value and is provided to the OAuth provider using the OAuth " +
"clientcredentials grant type.";
public static final String CLIENT_SECRET_DOC = "The OAuth/OIDC identity provider-issued " +
"client secret serves a similar function as a password to the " + CLIENT_ID_CONFIG + " " +
"account and identifies the service account to use for authentication for " +
"this client. The value must be paired with a corresponding " + CLIENT_ID_CONFIG + " " +
"value and is provided to the OAuth provider using the OAuth " +
"clientcredentials grant type.";
public static final String SCOPE_DOC = "The (optional) HTTP/HTTPS login request to the " +
"token endpoint (" + SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL + ") may need to specify an " +
"OAuth \"scope\". If so, the " + SCOPE_CONFIG + " is used to provide the value to " +
"include with the login request.";
private static final String EXTENSION_PREFIX = "extension_";
private Map<String, Object> moduleOptions;
private AccessTokenRetriever accessTokenRetriever;
private AccessTokenValidator accessTokenValidator;
private boolean isInitialized = false;
@Override
public void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries) {
moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries);
AccessTokenRetriever accessTokenRetriever = AccessTokenRetrieverFactory.create(configs, saslMechanism, moduleOptions);
AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs, saslMechanism);
init(accessTokenRetriever, accessTokenValidator);
}
public void init(AccessTokenRetriever accessTokenRetriever, AccessTokenValidator accessTokenValidator) {
this.accessTokenRetriever = accessTokenRetriever;
this.accessTokenValidator = accessTokenValidator;
try {
this.accessTokenRetriever.init();
} catch (IOException e) {
throw new KafkaException("The OAuth login configuration encountered an error when initializing the AccessTokenRetriever", e);
}
isInitialized = true;
}
/*
* Package-visible for testing.
*/
AccessTokenRetriever getAccessTokenRetriever() {
return accessTokenRetriever;
}
@Override
public void close() {
if (accessTokenRetriever != null) {
try {
this.accessTokenRetriever.close();
} catch (IOException e) {
log.warn("The OAuth login configuration encountered an error when closing the AccessTokenRetriever", e);
}
}
}
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
checkInitialized();
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback) {
handleTokenCallback((OAuthBearerTokenCallback) callback);
} else if (callback instanceof SaslExtensionsCallback) {
handleExtensionsCallback((SaslExtensionsCallback) callback);
} else {
throw new UnsupportedCallbackException(callback);
}
}
}
private void handleTokenCallback(OAuthBearerTokenCallback callback) throws IOException {
checkInitialized();
String accessToken = accessTokenRetriever.retrieve();
try {
OAuthBearerToken token = accessTokenValidator.validate(accessToken);
callback.token(token);
} catch (ValidateException e) {
log.warn(e.getMessage(), e);
callback.error("invalid_token", e.getMessage(), null);
}
}
private void handleExtensionsCallback(SaslExtensionsCallback callback) {
checkInitialized();
Map<String, String> extensions = new HashMap<>();
for (Map.Entry<String, Object> configEntry : this.moduleOptions.entrySet()) {
String key = configEntry.getKey();
if (!key.startsWith(EXTENSION_PREFIX))
continue;
Object valueRaw = configEntry.getValue();
String value;
if (valueRaw instanceof String)
value = (String) valueRaw;
else
value = String.valueOf(valueRaw);
extensions.put(key.substring(EXTENSION_PREFIX.length()), value);
}
SaslExtensions saslExtensions = new SaslExtensions(extensions);
try {
OAuthBearerClientInitialResponse.validateExtensions(saslExtensions);
} catch (SaslException e) {
throw new ConfigException(e.getMessage());
}
callback.extensions(saslExtensions);
}
private void checkInitialized() {
if (!isInitialized)
throw new IllegalStateException(String.format("To use %s, first call the configure or init method", getClass().getSimpleName()));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginModule.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import java.util.Objects;
import javax.security.auth.Subject;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.login.LoginException;
import javax.security.auth.spi.LoginModule;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.auth.Login;
import org.apache.kafka.common.security.auth.SaslExtensionsCallback;
import org.apache.kafka.common.security.auth.SaslExtensions;
import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerSaslClientProvider;
import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerSaslServerProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* The {@code LoginModule} for the SASL/OAUTHBEARER mechanism. When a client
* (whether a non-broker client or a broker when SASL/OAUTHBEARER is the
* inter-broker protocol) connects to Kafka the {@code OAuthBearerLoginModule}
* instance asks its configured {@link AuthenticateCallbackHandler}
* implementation to handle an instance of {@link OAuthBearerTokenCallback} and
* return an instance of {@link OAuthBearerToken}. A default, builtin
* {@link AuthenticateCallbackHandler} implementation creates an unsecured token
* as defined by these JAAS module options:
* <p>
* <table>
* <tr>
* <th>JAAS Module Option for Unsecured Token Retrieval</th>
* <th>Documentation</th>
* </tr>
* <tr>
* <td>{@code unsecuredLoginStringClaim_<claimname>="value"}</td>
* <td>Creates a {@code String} claim with the given name and value. Any valid
* claim name can be specified except '{@code iat}' and '{@code exp}' (these are
* automatically generated).</td>
* </tr>
* <tr>
* <td>{@code unsecuredLoginNumberClaim_<claimname>="value"}</td>
* <td>Creates a {@code Number} claim with the given name and value. Any valid
* claim name can be specified except '{@code iat}' and '{@code exp}' (these are
* automatically generated).</td>
* </tr>
* <tr>
* <td>{@code unsecuredLoginListClaim_<claimname>="value"}</td>
* <td>Creates a {@code String List} claim with the given name and values parsed
* from the given value where the first character is taken as the delimiter. For
* example: {@code unsecuredLoginListClaim_fubar="|value1|value2"}. Any valid
* claim name can be specified except '{@code iat}' and '{@code exp}' (these are
* automatically generated).</td>
* </tr>
* <tr>
* <td>{@code unsecuredLoginPrincipalClaimName}</td>
* <td>Set to a custom claim name if you wish the name of the {@code String}
* claim holding the principal name to be something other than
* '{@code sub}'.</td>
* </tr>
* <tr>
* <td>{@code unsecuredLoginLifetimeSeconds}</td>
* <td>Set to an integer value if the token expiration is to be set to something
* other than the default value of 3600 seconds (which is 1 hour). The
* '{@code exp}' claim will be set to reflect the expiration time.</td>
* </tr>
* <tr>
* <td>{@code unsecuredLoginScopeClaimName}</td>
* <td>Set to a custom claim name if you wish the name of the {@code String} or
* {@code String List} claim holding any token scope to be something other than
* '{@code scope}'.</td>
* </tr>
* </table>
* <p>
* <p>
* You can also add custom unsecured SASL extensions when using the default, builtin {@link AuthenticateCallbackHandler}
* implementation through using the configurable option {@code unsecuredLoginExtension_<extensionname>}. Note that there
* are validations for the key/values in order to conform to the SASL/OAUTHBEARER standard
* (https://tools.ietf.org/html/rfc7628#section-3.1), including the reserved key at
* {@link org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerClientInitialResponse#AUTH_KEY}.
* The {@code OAuthBearerLoginModule} instance also asks its configured {@link AuthenticateCallbackHandler}
* implementation to handle an instance of {@link SaslExtensionsCallback} and return an instance of {@link SaslExtensions}.
* The configured callback handler does not need to handle this callback, though -- any {@code UnsupportedCallbackException}
* that is thrown is ignored, and no SASL extensions will be associated with the login.
* <p>
* Production use cases will require writing an implementation of
* {@link AuthenticateCallbackHandler} that can handle an instance of
* {@link OAuthBearerTokenCallback} and declaring it via either the
* {@code sasl.login.callback.handler.class} configuration option for a
* non-broker client or via the
* {@code listener.name.sasl_ssl.oauthbearer.sasl.login.callback.handler.class}
* configuration option for brokers (when SASL/OAUTHBEARER is the inter-broker
* protocol).
* <p>
* This class stores the retrieved {@link OAuthBearerToken} in the
* {@code Subject}'s private credentials where the {@code SaslClient} can
* retrieve it. An appropriate, builtin {@code SaslClient} implementation is
* automatically used and configured such that it can perform that retrieval.
* <p>
* Here is a typical, basic JAAS configuration for a client leveraging unsecured
* SASL/OAUTHBEARER authentication:
*
* <pre>
* KafkaClient {
* org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule Required
* unsecuredLoginStringClaim_sub="thePrincipalName";
* };
* </pre>
*
* An implementation of the {@link Login} interface specific to the
* {@code OAUTHBEARER} mechanism is automatically applied; it periodically
* refreshes any token before it expires so that the client can continue to make
* connections to brokers. The parameters that impact how the refresh algorithm
* operates are specified as part of the producer/consumer/broker configuration
* and are as follows. See the documentation for these properties elsewhere for
* details.
* <p>
* <table>
* <tr>
* <th>Producer/Consumer/Broker Configuration Property</th>
* </tr>
* <tr>
* <td>{@code sasl.login.refresh.window.factor}</td>
* </tr>
* <tr>
* <td>{@code sasl.login.refresh.window.jitter}</td>
* </tr>
* <tr>
* <td>{@code sasl.login.refresh.min.period.seconds}</td>
* </tr>
* <tr>
* <td>{@code sasl.login.refresh.min.buffer.seconds}</td>
* </tr>
* </table>
* <p>
* When a broker accepts a SASL/OAUTHBEARER connection the instance of the
* builtin {@code SaslServer} implementation asks its configured
* {@link AuthenticateCallbackHandler} implementation to handle an instance of
* {@link OAuthBearerValidatorCallback} constructed with the OAuth 2 Bearer
* Token's compact serialization and return an instance of
* {@link OAuthBearerToken} if the value validates. A default, builtin
* {@link AuthenticateCallbackHandler} implementation validates an unsecured
* token as defined by these JAAS module options:
* <p>
* <table>
* <tr>
* <th>JAAS Module Option for Unsecured Token Validation</th>
* <th>Documentation</th>
* </tr>
* <tr>
* <td>{@code unsecuredValidatorPrincipalClaimName="value"}</td>
* <td>Set to a non-empty value if you wish a particular {@code String} claim
* holding a principal name to be checked for existence; the default is to check
* for the existence of the '{@code sub}' claim.</td>
* </tr>
* <tr>
* <td>{@code unsecuredValidatorScopeClaimName="value"}</td>
* <td>Set to a custom claim name if you wish the name of the {@code String} or
* {@code String List} claim holding any token scope to be something other than
* '{@code scope}'.</td>
* </tr>
* <tr>
* <td>{@code unsecuredValidatorRequiredScope="value"}</td>
* <td>Set to a space-delimited list of scope values if you wish the
* {@code String/String List} claim holding the token scope to be checked to
* make sure it contains certain values.</td>
* </tr>
* <tr>
* <td>{@code unsecuredValidatorAllowableClockSkewMs="value"}</td>
* <td>Set to a positive integer value if you wish to allow up to some number of
* positive milliseconds of clock skew (the default is 0).</td>
* </tr>
* </table>
* <p>
* Here is a typical, basic JAAS configuration for a broker leveraging unsecured
* SASL/OAUTHBEARER validation:
*
* <pre>
* KafkaServer {
* org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule Required
* unsecuredLoginStringClaim_sub="thePrincipalName";
* };
* </pre>
*
* Production use cases will require writing an implementation of
* {@link AuthenticateCallbackHandler} that can handle an instance of
* {@link OAuthBearerValidatorCallback} and declaring it via the
* {@code listener.name.sasl_ssl.oauthbearer.sasl.server.callback.handler.class}
* broker configuration option.
* <p>
* The builtin {@code SaslServer} implementation for SASL/OAUTHBEARER in Kafka
* makes the instance of {@link OAuthBearerToken} available upon successful
* authentication via the negotiated property "{@code OAUTHBEARER.token}"; the
* token could be used in a custom authorizer (to authorize based on JWT claims
* rather than ACLs, for example).
* <p>
* This implementation's {@code logout()} method will logout the specific token
* that this instance logged in if it's {@code Subject} instance is shared
* across multiple {@code LoginContext}s and there happen to be multiple tokens
* on the {@code Subject}. This functionality is useful because it means a new
* token with a longer lifetime can be created before a soon-to-expire token is
* actually logged out. Otherwise, if multiple simultaneous tokens were not
* supported like this, the soon-to-be expired token would have to be logged out
* first, and then if the new token could not be retrieved (maybe the
* authorization server is temporarily unavailable, for example) the client
* would be left without a token and would be unable to create new connections.
* Better to mitigate this possibility by leaving the existing token (which
* still has some lifetime left) in place until a new replacement token is
* actually retrieved. This implementation supports this.
*
* @see SaslConfigs#SASL_LOGIN_REFRESH_WINDOW_FACTOR_DOC
* @see SaslConfigs#SASL_LOGIN_REFRESH_WINDOW_JITTER_DOC
* @see SaslConfigs#SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS_DOC
* @see SaslConfigs#SASL_LOGIN_REFRESH_BUFFER_SECONDS_DOC
*/
public class OAuthBearerLoginModule implements LoginModule {
/**
* Login state transitions:
* Initial state: NOT_LOGGED_IN
* login() : NOT_LOGGED_IN => LOGGED_IN_NOT_COMMITTED
* commit() : LOGGED_IN_NOT_COMMITTED => COMMITTED
* abort() : LOGGED_IN_NOT_COMMITTED => NOT_LOGGED_IN
* logout() : Any state => NOT_LOGGED_IN
*/
private enum LoginState {
NOT_LOGGED_IN,
LOGGED_IN_NOT_COMMITTED,
COMMITTED
}
/**
* The SASL Mechanism name for OAuth 2: {@code OAUTHBEARER}
*/
public static final String OAUTHBEARER_MECHANISM = "OAUTHBEARER";
private static final Logger log = LoggerFactory.getLogger(OAuthBearerLoginModule.class);
private static final SaslExtensions EMPTY_EXTENSIONS = new SaslExtensions(Collections.emptyMap());
private Subject subject = null;
private AuthenticateCallbackHandler callbackHandler = null;
private OAuthBearerToken tokenRequiringCommit = null;
private OAuthBearerToken myCommittedToken = null;
private SaslExtensions extensionsRequiringCommit = null;
private SaslExtensions myCommittedExtensions = null;
private LoginState loginState;
static {
OAuthBearerSaslClientProvider.initialize(); // not part of public API
OAuthBearerSaslServerProvider.initialize(); // not part of public API
}
@Override
public void initialize(Subject subject, CallbackHandler callbackHandler, Map<String, ?> sharedState,
Map<String, ?> options) {
this.subject = Objects.requireNonNull(subject);
if (!(Objects.requireNonNull(callbackHandler) instanceof AuthenticateCallbackHandler))
throw new IllegalArgumentException(String.format("Callback handler must be castable to %s: %s",
AuthenticateCallbackHandler.class.getName(), callbackHandler.getClass().getName()));
this.callbackHandler = (AuthenticateCallbackHandler) callbackHandler;
}
@Override
public boolean login() throws LoginException {
if (loginState == LoginState.LOGGED_IN_NOT_COMMITTED) {
if (tokenRequiringCommit != null)
throw new IllegalStateException(String.format(
"Already have an uncommitted token with private credential token count=%d", committedTokenCount()));
else
throw new IllegalStateException("Already logged in without a token");
}
if (loginState == LoginState.COMMITTED) {
if (myCommittedToken != null)
throw new IllegalStateException(String.format(
"Already have a committed token with private credential token count=%d; must login on another login context or logout here first before reusing the same login context",
committedTokenCount()));
else
throw new IllegalStateException("Login has already been committed without a token");
}
identifyToken();
if (tokenRequiringCommit != null)
identifyExtensions();
else
log.debug("Logged in without a token, this login cannot be used to establish client connections");
loginState = LoginState.LOGGED_IN_NOT_COMMITTED;
log.debug("Login succeeded; invoke commit() to commit it; current committed token count={}",
committedTokenCount());
return true;
}
private void identifyToken() throws LoginException {
OAuthBearerTokenCallback tokenCallback = new OAuthBearerTokenCallback();
try {
callbackHandler.handle(new Callback[] {tokenCallback});
} catch (IOException | UnsupportedCallbackException e) {
log.error(e.getMessage(), e);
throw new LoginException("An internal error occurred while retrieving token from callback handler");
}
tokenRequiringCommit = tokenCallback.token();
if (tokenCallback.errorCode() != null) {
log.info("Login failed: {} : {} (URI={})", tokenCallback.errorCode(), tokenCallback.errorDescription(),
tokenCallback.errorUri());
throw new LoginException(tokenCallback.errorDescription());
}
}
/**
* Attaches SASL extensions to the Subject
*/
private void identifyExtensions() throws LoginException {
SaslExtensionsCallback extensionsCallback = new SaslExtensionsCallback();
try {
callbackHandler.handle(new Callback[] {extensionsCallback});
extensionsRequiringCommit = extensionsCallback.extensions();
} catch (IOException e) {
log.error(e.getMessage(), e);
throw new LoginException("An internal error occurred while retrieving SASL extensions from callback handler");
} catch (UnsupportedCallbackException e) {
extensionsRequiringCommit = EMPTY_EXTENSIONS;
log.debug("CallbackHandler {} does not support SASL extensions. No extensions will be added", callbackHandler.getClass().getName());
}
if (extensionsRequiringCommit == null) {
log.error("SASL Extensions cannot be null. Check whether your callback handler is explicitly setting them as null.");
throw new LoginException("Extensions cannot be null.");
}
}
@Override
public boolean logout() {
if (loginState == LoginState.LOGGED_IN_NOT_COMMITTED)
throw new IllegalStateException(
"Cannot call logout() immediately after login(); need to first invoke commit() or abort()");
if (loginState != LoginState.COMMITTED) {
log.debug("Nothing here to log out");
return false;
}
if (myCommittedToken != null) {
log.trace("Logging out my token; current committed token count = {}", committedTokenCount());
for (Iterator<Object> iterator = subject.getPrivateCredentials().iterator(); iterator.hasNext(); ) {
Object privateCredential = iterator.next();
if (privateCredential == myCommittedToken) {
iterator.remove();
myCommittedToken = null;
break;
}
}
log.debug("Done logging out my token; committed token count is now {}", committedTokenCount());
} else
log.debug("No tokens to logout for this login");
if (myCommittedExtensions != null) {
log.trace("Logging out my extensions");
if (subject.getPublicCredentials().removeIf(e -> myCommittedExtensions == e))
myCommittedExtensions = null;
log.debug("Done logging out my extensions");
} else
log.debug("No extensions to logout for this login");
loginState = LoginState.NOT_LOGGED_IN;
return true;
}
@Override
public boolean commit() {
if (loginState != LoginState.LOGGED_IN_NOT_COMMITTED) {
log.debug("Nothing here to commit");
return false;
}
if (tokenRequiringCommit != null) {
log.trace("Committing my token; current committed token count = {}", committedTokenCount());
subject.getPrivateCredentials().add(tokenRequiringCommit);
myCommittedToken = tokenRequiringCommit;
tokenRequiringCommit = null;
log.debug("Done committing my token; committed token count is now {}", committedTokenCount());
} else
log.debug("No tokens to commit, this login cannot be used to establish client connections");
if (extensionsRequiringCommit != null) {
subject.getPublicCredentials().add(extensionsRequiringCommit);
myCommittedExtensions = extensionsRequiringCommit;
extensionsRequiringCommit = null;
}
loginState = LoginState.COMMITTED;
return true;
}
@Override
public boolean abort() {
if (loginState == LoginState.LOGGED_IN_NOT_COMMITTED) {
log.debug("Login aborted");
tokenRequiringCommit = null;
extensionsRequiringCommit = null;
loginState = LoginState.NOT_LOGGED_IN;
return true;
}
log.debug("Nothing here to abort");
return false;
}
private int committedTokenCount() {
return subject.getPrivateCredentials(OAuthBearerToken.class).size();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/OAuthBearerToken.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer;
import java.util.Set;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* The <code>b64token</code> value as defined in
* <a href="https://tools.ietf.org/html/rfc6750#section-2.1">RFC 6750 Section
* 2.1</a> along with the token's specific scope and lifetime and principal
* name.
* <p>
* A network request would be required to re-hydrate an opaque token, and that
* could result in (for example) an {@code IOException}, but retrievers for
* various attributes ({@link #scope()}, {@link #lifetimeMs()}, etc.) declare no
* exceptions. Therefore, if a network request is required for any of these
* retriever methods, that request could be performed at construction time so
* that the various attributes can be reliably provided thereafter. For example,
* a constructor might declare {@code throws IOException} in such a case.
* Alternatively, the retrievers could throw unchecked exceptions.
* <p>
* This interface was introduced in 2.0.0 and, while it feels stable, it could
* evolve. We will try to evolve the API in a compatible manner (easier now that
* Java 7 and its lack of default methods doesn't have to be supported), but we
* reserve the right to make breaking changes in minor releases, if necessary.
* We will update the {@code InterfaceStability} annotation and this notice once
* the API is considered stable.
*
* @see <a href="https://tools.ietf.org/html/rfc6749#section-1.4">RFC 6749
* Section 1.4</a> and
* <a href="https://tools.ietf.org/html/rfc6750#section-2.1">RFC 6750
* Section 2.1</a>
*/
@InterfaceStability.Evolving
public interface OAuthBearerToken {
/**
* The <code>b64token</code> value as defined in
* <a href="https://tools.ietf.org/html/rfc6750#section-2.1">RFC 6750 Section
* 2.1</a>
*
* @return <code>b64token</code> value as defined in
* <a href="https://tools.ietf.org/html/rfc6750#section-2.1">RFC 6750
* Section 2.1</a>
*/
String value();
/**
* The token's scope of access, as per
* <a href="https://tools.ietf.org/html/rfc6749#section-1.4">RFC 6749 Section
* 1.4</a>
*
* @return the token's (always non-null but potentially empty) scope of access,
* as per <a href="https://tools.ietf.org/html/rfc6749#section-1.4">RFC
* 6749 Section 1.4</a>. Note that all values in the returned set will
* be trimmed of preceding and trailing whitespace, and the result will
* never contain the empty string.
*/
Set<String> scope();
/**
* The token's lifetime, expressed as the number of milliseconds since the
* epoch, as per <a href="https://tools.ietf.org/html/rfc6749#section-1.4">RFC
* 6749 Section 1.4</a>
*
* @return the token'slifetime, expressed as the number of milliseconds since
* the epoch, as per
* <a href="https://tools.ietf.org/html/rfc6749#section-1.4">RFC 6749
* Section 1.4</a>.
*/
long lifetimeMs();
/**
* The name of the principal to which this credential applies
*
* @return the always non-null/non-empty principal name
*/
String principalName();
/**
* When the credential became valid, in terms of the number of milliseconds
* since the epoch, if known, otherwise null. An expiring credential may not
* necessarily indicate when it was created -- just when it expires -- so we
* need to support a null return value here.
*
* @return the time when the credential became valid, in terms of the number of
* milliseconds since the epoch, if known, otherwise null
*/
Long startTimeMs();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/OAuthBearerTokenCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer;
import java.util.Objects;
import javax.security.auth.callback.Callback;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* A {@code Callback} for use by the {@code SaslClient} and {@code Login}
* implementations when they require an OAuth 2 bearer token. Callback handlers
* should use the {@link #error(String, String, String)} method to communicate
* errors returned by the authorization server as per
* <a href="https://tools.ietf.org/html/rfc6749#section-5.2">RFC 6749: The OAuth
* 2.0 Authorization Framework</a>. Callback handlers should communicate other
* problems by raising an {@code IOException}.
* <p>
* This class was introduced in 2.0.0 and, while it feels stable, it could
* evolve. We will try to evolve the API in a compatible manner, but we reserve
* the right to make breaking changes in minor releases, if necessary. We will
* update the {@code InterfaceStability} annotation and this notice once the API
* is considered stable.
*/
@InterfaceStability.Evolving
public class OAuthBearerTokenCallback implements Callback {
private OAuthBearerToken token = null;
private String errorCode = null;
private String errorDescription = null;
private String errorUri = null;
/**
* Return the (potentially null) token
*
* @return the (potentially null) token
*/
public OAuthBearerToken token() {
return token;
}
/**
* Return the optional (but always non-empty if not null) error code as per
* <a href="https://tools.ietf.org/html/rfc6749#section-5.2">RFC 6749: The OAuth
* 2.0 Authorization Framework</a>.
*
* @return the optional (but always non-empty if not null) error code
*/
public String errorCode() {
return errorCode;
}
/**
* Return the (potentially null) error description as per
* <a href="https://tools.ietf.org/html/rfc6749#section-5.2">RFC 6749: The OAuth
* 2.0 Authorization Framework</a>.
*
* @return the (potentially null) error description
*/
public String errorDescription() {
return errorDescription;
}
/**
* Return the (potentially null) error URI as per
* <a href="https://tools.ietf.org/html/rfc6749#section-5.2">RFC 6749: The OAuth
* 2.0 Authorization Framework</a>.
*
* @return the (potentially null) error URI
*/
public String errorUri() {
return errorUri;
}
/**
* Set the token. All error-related values are cleared.
*
* @param token
* the optional token to set
*/
public void token(OAuthBearerToken token) {
this.token = token;
this.errorCode = null;
this.errorDescription = null;
this.errorUri = null;
}
/**
* Set the error values as per
* <a href="https://tools.ietf.org/html/rfc6749#section-5.2">RFC 6749: The OAuth
* 2.0 Authorization Framework</a>. Any token is cleared.
*
* @param errorCode
* the mandatory error code to set
* @param errorDescription
* the optional error description to set
* @param errorUri
* the optional error URI to set
*/
public void error(String errorCode, String errorDescription, String errorUri) {
if (Objects.requireNonNull(errorCode).isEmpty())
throw new IllegalArgumentException("error code must not be empty");
this.errorCode = errorCode;
this.errorDescription = errorDescription;
this.errorUri = errorUri;
this.token = null;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallback.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer;
import java.util.Objects;
import javax.security.auth.callback.Callback;
import org.apache.kafka.common.annotation.InterfaceStability;
/**
* A {@code Callback} for use by the {@code SaslServer} implementation when it
* needs to provide an OAuth 2 bearer token compact serialization for
* validation. Callback handlers should use the
* {@link #error(String, String, String)} method to communicate errors back to
* the SASL Client as per
* <a href="https://tools.ietf.org/html/rfc6749#section-5.2">RFC 6749: The OAuth
* 2.0 Authorization Framework</a> and the <a href=
* "https://www.iana.org/assignments/oauth-parameters/oauth-parameters.xhtml#extensions-error">IANA
* OAuth Extensions Error Registry</a>. Callback handlers should communicate
* other problems by raising an {@code IOException}.
* <p>
* This class was introduced in 2.0.0 and, while it feels stable, it could
* evolve. We will try to evolve the API in a compatible manner, but we reserve
* the right to make breaking changes in minor releases, if necessary. We will
* update the {@code InterfaceStability} annotation and this notice once the API
* is considered stable.
*/
@InterfaceStability.Evolving
public class OAuthBearerValidatorCallback implements Callback {
private final String tokenValue;
private OAuthBearerToken token = null;
private String errorStatus = null;
private String errorScope = null;
private String errorOpenIDConfiguration = null;
/**
* Constructor
*
* @param tokenValue
* the mandatory/non-blank token value
*/
public OAuthBearerValidatorCallback(String tokenValue) {
if (Objects.requireNonNull(tokenValue).isEmpty())
throw new IllegalArgumentException("token value must not be empty");
this.tokenValue = tokenValue;
}
/**
* Return the (always non-null) token value
*
* @return the (always non-null) token value
*/
public String tokenValue() {
return tokenValue;
}
/**
* Return the (potentially null) token
*
* @return the (potentially null) token
*/
public OAuthBearerToken token() {
return token;
}
/**
* Return the (potentially null) error status value as per
* <a href="https://tools.ietf.org/html/rfc7628#section-3.2.2">RFC 7628: A Set
* of Simple Authentication and Security Layer (SASL) Mechanisms for OAuth</a>
* and the <a href=
* "https://www.iana.org/assignments/oauth-parameters/oauth-parameters.xhtml#extensions-error">IANA
* OAuth Extensions Error Registry</a>.
*
* @return the (potentially null) error status value
*/
public String errorStatus() {
return errorStatus;
}
/**
* Return the (potentially null) error scope value as per
* <a href="https://tools.ietf.org/html/rfc7628#section-3.2.2">RFC 7628: A Set
* of Simple Authentication and Security Layer (SASL) Mechanisms for OAuth</a>.
*
* @return the (potentially null) error scope value
*/
public String errorScope() {
return errorScope;
}
/**
* Return the (potentially null) error openid-configuration value as per
* <a href="https://tools.ietf.org/html/rfc7628#section-3.2.2">RFC 7628: A Set
* of Simple Authentication and Security Layer (SASL) Mechanisms for OAuth</a>.
*
* @return the (potentially null) error openid-configuration value
*/
public String errorOpenIDConfiguration() {
return errorOpenIDConfiguration;
}
/**
* Set the token. The token value is unchanged and is expected to match the
* provided token's value. All error values are cleared.
*
* @param token
* the mandatory token to set
*/
public void token(OAuthBearerToken token) {
this.token = Objects.requireNonNull(token);
this.errorStatus = null;
this.errorScope = null;
this.errorOpenIDConfiguration = null;
}
/**
* Set the error values as per
* <a href="https://tools.ietf.org/html/rfc7628#section-3.2.2">RFC 7628: A Set
* of Simple Authentication and Security Layer (SASL) Mechanisms for OAuth</a>.
* Any token is cleared.
*
* @param errorStatus
* the mandatory error status value from the <a href=
* "https://www.iana.org/assignments/oauth-parameters/oauth-parameters.xhtml#extensions-error">IANA
* OAuth Extensions Error Registry</a> to set
* @param errorScope
* the optional error scope value to set
* @param errorOpenIDConfiguration
* the optional error openid-configuration value to set
*/
public void error(String errorStatus, String errorScope, String errorOpenIDConfiguration) {
if (Objects.requireNonNull(errorStatus).isEmpty())
throw new IllegalArgumentException("error status must not be empty");
this.errorStatus = errorStatus;
this.errorScope = errorScope;
this.errorOpenIDConfiguration = errorOpenIDConfiguration;
this.token = null;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer;
import java.io.IOException;
import java.security.Key;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.login.AppConfigurationEntry;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator;
import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory;
import org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver;
import org.apache.kafka.common.security.oauthbearer.internals.secured.JaasOptionsUtils;
import org.apache.kafka.common.security.oauthbearer.internals.secured.RefreshingHttpsJwksVerificationKeyResolver;
import org.apache.kafka.common.security.oauthbearer.internals.secured.ValidateException;
import org.apache.kafka.common.security.oauthbearer.internals.secured.VerificationKeyResolverFactory;
import org.jose4j.jws.JsonWebSignature;
import org.jose4j.jwx.JsonWebStructure;
import org.jose4j.lang.UnresolvableKeyException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
* <code>OAuthBearerValidatorCallbackHandler</code> is an {@link AuthenticateCallbackHandler} that
* accepts {@link OAuthBearerValidatorCallback} and {@link OAuthBearerExtensionsValidatorCallback}
* callbacks to implement OAuth/OIDC validation. This callback handler is intended only to be used
* on the Kafka broker side as it will receive a {@link OAuthBearerValidatorCallback} that includes
* the JWT provided by the Kafka client. That JWT is validated in terms of format, expiration,
* signature, and audience and issuer (if desired). This callback handler is the broker side of the
* OAuth functionality, whereas {@link OAuthBearerLoginCallbackHandler} is used by clients.
* </p>
*
* <p>
* This {@link AuthenticateCallbackHandler} is enabled in the broker configuration by setting the
* {@link org.apache.kafka.common.config.internals.BrokerSecurityConfigs#SASL_SERVER_CALLBACK_HANDLER_CLASS}
* like so:
*
* <code>
* listener.name.<listener name>.oauthbearer.sasl.server.callback.handler.class=org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler
* </code>
* </p>
*
* <p>
* The JAAS configuration for OAuth is also needed. If using OAuth for inter-broker communication,
* the options are those specified in {@link OAuthBearerLoginCallbackHandler}.
* </p>
*
* <p>
* The configuration option
* {@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_JWKS_ENDPOINT_URL}
* is also required in order to contact the OAuth/OIDC provider to retrieve the JWKS for use in
* JWT signature validation. For example:
*
* <code>
* listener.name.<listener name>.oauthbearer.sasl.oauthbearer.jwks.endpoint.url=https://example.com/oauth2/v1/keys
* </code>
*
* Please see the OAuth/OIDC providers documentation for the JWKS endpoint URL.
* </p>
*
* <p>
* The following is a list of all the configuration options that are available for the broker
* validation callback handler:
*
* <ul>
* <li>{@link org.apache.kafka.common.config.internals.BrokerSecurityConfigs#SASL_SERVER_CALLBACK_HANDLER_CLASS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_JAAS_CONFIG}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_EXPECTED_AUDIENCE}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_EXPECTED_ISSUER}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_JWKS_ENDPOINT_URL}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_SCOPE_CLAIM_NAME}</li>
* <li>{@link org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_SUB_CLAIM_NAME}</li>
* </ul>
* </p>
*/
public class OAuthBearerValidatorCallbackHandler implements AuthenticateCallbackHandler {
private static final Logger log = LoggerFactory.getLogger(OAuthBearerValidatorCallbackHandler.class);
/**
* Because a {@link CloseableVerificationKeyResolver} instance can spawn threads and issue
* HTTP(S) calls ({@link RefreshingHttpsJwksVerificationKeyResolver}), we only want to create
* a new instance for each particular set of configuration. Because each set of configuration
* may have multiple instances, we want to reuse the single instance.
*/
private static final Map<VerificationKeyResolverKey, CloseableVerificationKeyResolver> VERIFICATION_KEY_RESOLVER_CACHE = new HashMap<>();
private CloseableVerificationKeyResolver verificationKeyResolver;
private AccessTokenValidator accessTokenValidator;
private boolean isInitialized = false;
@Override
public void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries) {
Map<String, Object> moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries);
CloseableVerificationKeyResolver verificationKeyResolver;
// Here's the logic which keeps our VerificationKeyResolvers down to a single instance.
synchronized (VERIFICATION_KEY_RESOLVER_CACHE) {
VerificationKeyResolverKey key = new VerificationKeyResolverKey(configs, moduleOptions);
verificationKeyResolver = VERIFICATION_KEY_RESOLVER_CACHE.computeIfAbsent(key, k ->
new RefCountingVerificationKeyResolver(VerificationKeyResolverFactory.create(configs, saslMechanism, moduleOptions)));
}
AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs, saslMechanism, verificationKeyResolver);
init(verificationKeyResolver, accessTokenValidator);
}
public void init(CloseableVerificationKeyResolver verificationKeyResolver, AccessTokenValidator accessTokenValidator) {
this.verificationKeyResolver = verificationKeyResolver;
this.accessTokenValidator = accessTokenValidator;
try {
verificationKeyResolver.init();
} catch (Exception e) {
throw new KafkaException("The OAuth validator configuration encountered an error when initializing the VerificationKeyResolver", e);
}
isInitialized = true;
}
@Override
public void close() {
if (verificationKeyResolver != null) {
try {
verificationKeyResolver.close();
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
}
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
checkInitialized();
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerValidatorCallback) {
handleValidatorCallback((OAuthBearerValidatorCallback) callback);
} else if (callback instanceof OAuthBearerExtensionsValidatorCallback) {
handleExtensionsValidatorCallback((OAuthBearerExtensionsValidatorCallback) callback);
} else {
throw new UnsupportedCallbackException(callback);
}
}
}
private void handleValidatorCallback(OAuthBearerValidatorCallback callback) {
checkInitialized();
OAuthBearerToken token;
try {
token = accessTokenValidator.validate(callback.tokenValue());
callback.token(token);
} catch (ValidateException e) {
log.warn(e.getMessage(), e);
callback.error("invalid_token", null, null);
}
}
private void handleExtensionsValidatorCallback(OAuthBearerExtensionsValidatorCallback extensionsValidatorCallback) {
checkInitialized();
extensionsValidatorCallback.inputExtensions().map().forEach((extensionName, v) -> extensionsValidatorCallback.valid(extensionName));
}
private void checkInitialized() {
if (!isInitialized)
throw new IllegalStateException(String.format("To use %s, first call the configure or init method", getClass().getSimpleName()));
}
/**
* <code>VkrKey</code> is a simple structure which encapsulates the criteria for different
* sets of configuration. This will allow us to use this object as a key in a {@link Map}
* to keep a single instance per key.
*/
private static class VerificationKeyResolverKey {
private final Map<String, ?> configs;
private final Map<String, Object> moduleOptions;
public VerificationKeyResolverKey(Map<String, ?> configs, Map<String, Object> moduleOptions) {
this.configs = configs;
this.moduleOptions = moduleOptions;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
VerificationKeyResolverKey that = (VerificationKeyResolverKey) o;
return configs.equals(that.configs) && moduleOptions.equals(that.moduleOptions);
}
@Override
public int hashCode() {
return Objects.hash(configs, moduleOptions);
}
}
/**
* <code>RefCountingVerificationKeyResolver</code> allows us to share a single
* {@link CloseableVerificationKeyResolver} instance between multiple
* {@link AuthenticateCallbackHandler} instances and perform the lifecycle methods the
* appropriate number of times.
*/
private static class RefCountingVerificationKeyResolver implements CloseableVerificationKeyResolver {
private final CloseableVerificationKeyResolver delegate;
private final AtomicInteger count = new AtomicInteger(0);
public RefCountingVerificationKeyResolver(CloseableVerificationKeyResolver delegate) {
this.delegate = delegate;
}
@Override
public Key resolveKey(JsonWebSignature jws, List<JsonWebStructure> nestingContext) throws UnresolvableKeyException {
return delegate.resolveKey(jws, nestingContext);
}
@Override
public void init() throws IOException {
if (count.incrementAndGet() == 1)
delegate.init();
}
@Override
public void close() throws IOException {
if (count.decrementAndGet() == 0)
delegate.close();
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/package-info.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Provides a {@link javax.security.auth.spi.LoginModule} for using OAuth Bearer Token authentication with Kafka clusters.
*/
package org.apache.kafka.common.security.oauthbearer; |
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerClientInitialResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer.internals;
import org.apache.kafka.common.security.auth.SaslExtensions;
import org.apache.kafka.common.utils.Utils;
import javax.security.sasl.SaslException;
import java.nio.charset.StandardCharsets;
import java.util.Map;
import java.util.Objects;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class OAuthBearerClientInitialResponse {
static final String SEPARATOR = "\u0001";
private static final String SASLNAME = "(?:[\\x01-\\x7F&&[^=,]]|=2C|=3D)+";
private static final String KEY = "[A-Za-z]+";
private static final String VALUE = "[\\x21-\\x7E \t\r\n]+";
private static final String KVPAIRS = String.format("(%s=%s%s)*", KEY, VALUE, SEPARATOR);
private static final Pattern AUTH_PATTERN = Pattern.compile("(?<scheme>[\\w]+)[ ]+(?<token>[-_\\.a-zA-Z0-9]+)");
private static final Pattern CLIENT_INITIAL_RESPONSE_PATTERN = Pattern.compile(
String.format("n,(a=(?<authzid>%s))?,%s(?<kvpairs>%s)%s", SASLNAME, SEPARATOR, KVPAIRS, SEPARATOR));
public static final String AUTH_KEY = "auth";
private final String tokenValue;
private final String authorizationId;
private SaslExtensions saslExtensions;
public static final Pattern EXTENSION_KEY_PATTERN = Pattern.compile(KEY);
public static final Pattern EXTENSION_VALUE_PATTERN = Pattern.compile(VALUE);
public OAuthBearerClientInitialResponse(byte[] response) throws SaslException {
String responseMsg = new String(response, StandardCharsets.UTF_8);
Matcher matcher = CLIENT_INITIAL_RESPONSE_PATTERN.matcher(responseMsg);
if (!matcher.matches())
throw new SaslException("Invalid OAUTHBEARER client first message");
String authzid = matcher.group("authzid");
this.authorizationId = authzid == null ? "" : authzid;
String kvPairs = matcher.group("kvpairs");
Map<String, String> properties = Utils.parseMap(kvPairs, "=", SEPARATOR);
String auth = properties.get(AUTH_KEY);
if (auth == null)
throw new SaslException("Invalid OAUTHBEARER client first message: 'auth' not specified");
properties.remove(AUTH_KEY);
SaslExtensions extensions = new SaslExtensions(properties);
validateExtensions(extensions);
this.saslExtensions = extensions;
Matcher authMatcher = AUTH_PATTERN.matcher(auth);
if (!authMatcher.matches())
throw new SaslException("Invalid OAUTHBEARER client first message: invalid 'auth' format");
if (!"bearer".equalsIgnoreCase(authMatcher.group("scheme"))) {
String msg = String.format("Invalid scheme in OAUTHBEARER client first message: %s",
matcher.group("scheme"));
throw new SaslException(msg);
}
this.tokenValue = authMatcher.group("token");
}
/**
* Constructor
*
* @param tokenValue
* the mandatory token value
* @param extensions
* the optional extensions
* @throws SaslException
* if any extension name or value fails to conform to the required
* regular expression as defined by the specification, or if the
* reserved {@code auth} appears as a key
*/
public OAuthBearerClientInitialResponse(String tokenValue, SaslExtensions extensions) throws SaslException {
this(tokenValue, "", extensions);
}
/**
* Constructor
*
* @param tokenValue
* the mandatory token value
* @param authorizationId
* the optional authorization ID
* @param extensions
* the optional extensions
* @throws SaslException
* if any extension name or value fails to conform to the required
* regular expression as defined by the specification, or if the
* reserved {@code auth} appears as a key
*/
public OAuthBearerClientInitialResponse(String tokenValue, String authorizationId, SaslExtensions extensions) throws SaslException {
this.tokenValue = Objects.requireNonNull(tokenValue, "token value must not be null");
this.authorizationId = authorizationId == null ? "" : authorizationId;
validateExtensions(extensions);
this.saslExtensions = extensions != null ? extensions : SaslExtensions.empty();
}
/**
* Return the always non-null extensions
*
* @return the always non-null extensions
*/
public SaslExtensions extensions() {
return saslExtensions;
}
public byte[] toBytes() {
String authzid = authorizationId.isEmpty() ? "" : "a=" + authorizationId;
String extensions = extensionsMessage();
if (extensions.length() > 0)
extensions = SEPARATOR + extensions;
String message = String.format("n,%s,%sauth=Bearer %s%s%s%s", authzid,
SEPARATOR, tokenValue, extensions, SEPARATOR, SEPARATOR);
return message.getBytes(StandardCharsets.UTF_8);
}
/**
* Return the always non-null token value
*
* @return the always non-null toklen value
*/
public String tokenValue() {
return tokenValue;
}
/**
* Return the always non-null authorization ID
*
* @return the always non-null authorization ID
*/
public String authorizationId() {
return authorizationId;
}
/**
* Validates that the given extensions conform to the standard. They should also not contain the reserve key name {@link OAuthBearerClientInitialResponse#AUTH_KEY}
*
* @param extensions
* optional extensions to validate
* @throws SaslException
* if any extension name or value fails to conform to the required
* regular expression as defined by the specification, or if the
* reserved {@code auth} appears as a key
*
* @see <a href="https://tools.ietf.org/html/rfc7628#section-3.1">RFC 7628,
* Section 3.1</a>
*/
public static void validateExtensions(SaslExtensions extensions) throws SaslException {
if (extensions == null)
return;
if (extensions.map().containsKey(OAuthBearerClientInitialResponse.AUTH_KEY))
throw new SaslException("Extension name " + OAuthBearerClientInitialResponse.AUTH_KEY + " is invalid");
for (Map.Entry<String, String> entry : extensions.map().entrySet()) {
String extensionName = entry.getKey();
String extensionValue = entry.getValue();
if (!EXTENSION_KEY_PATTERN.matcher(extensionName).matches())
throw new SaslException("Extension name " + extensionName + " is invalid");
if (!EXTENSION_VALUE_PATTERN.matcher(extensionValue).matches())
throw new SaslException("Extension value (" + extensionValue + ") for extension " + extensionName + " is invalid");
}
}
/**
* Converts the SASLExtensions to an OAuth protocol-friendly string
*/
private String extensionsMessage() {
return Utils.mkString(saslExtensions.map(), "", "", "=", SEPARATOR);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerRefreshingLogin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer.internals;
import java.util.Map;
import java.util.Set;
import javax.security.auth.Subject;
import javax.security.auth.login.Configuration;
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.auth.Login;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken;
import org.apache.kafka.common.security.oauthbearer.internals.expiring.ExpiringCredential;
import org.apache.kafka.common.security.oauthbearer.internals.expiring.ExpiringCredentialRefreshConfig;
import org.apache.kafka.common.security.oauthbearer.internals.expiring.ExpiringCredentialRefreshingLogin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class is responsible for refreshing logins for both Kafka client and
* server when the credential is an OAuth 2 bearer token communicated over
* SASL/OAUTHBEARER. An OAuth 2 bearer token has a limited lifetime, and an
* instance of this class periodically refreshes it so that the client can
* create new connections to brokers on an ongoing basis.
* <p>
* This class does not need to be explicitly set via the
* {@code sasl.login.class} client configuration property or the
* {@code listener.name.sasl_[plaintext|ssl].oauthbearer.sasl.login.class}
* broker configuration property when the SASL mechanism is OAUTHBEARER; it is
* automatically set by default in that case.
* <p>
* The parameters that impact how the refresh algorithm operates are specified
* as part of the producer/consumer/broker configuration and are as follows. See
* the documentation for these properties elsewhere for details.
* <table>
* <tr>
* <th>Producer/Consumer/Broker Configuration Property</th>
* </tr>
* <tr>
* <td>{@code sasl.login.refresh.window.factor}</td>
* </tr>
* <tr>
* <td>{@code sasl.login.refresh.window.jitter}</td>
* </tr>
* <tr>
* <td>{@code sasl.login.refresh.min.period.seconds}</td>
* </tr>
* <tr>
* <td>{@code sasl.login.refresh.min.buffer.seconds}</td>
* </tr>
* </table>
*
* @see OAuthBearerLoginModule
* @see SaslConfigs#SASL_LOGIN_REFRESH_WINDOW_FACTOR_DOC
* @see SaslConfigs#SASL_LOGIN_REFRESH_WINDOW_JITTER_DOC
* @see SaslConfigs#SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS_DOC
* @see SaslConfigs#SASL_LOGIN_REFRESH_BUFFER_SECONDS_DOC
*/
public class OAuthBearerRefreshingLogin implements Login {
private static final Logger log = LoggerFactory.getLogger(OAuthBearerRefreshingLogin.class);
private ExpiringCredentialRefreshingLogin expiringCredentialRefreshingLogin = null;
@Override
public void configure(Map<String, ?> configs, String contextName, Configuration configuration,
AuthenticateCallbackHandler loginCallbackHandler) {
/*
* Specify this class as the one to synchronize on so that only one OAuth 2
* Bearer Token is refreshed at a given time. Specify null if we don't mind
* multiple simultaneously refreshes. Refreshes happen on the order of minutes
* rather than seconds or milliseconds, and there are typically minutes of
* lifetime remaining when the refresh occurs, so serializing them seems
* reasonable.
*/
Class<OAuthBearerRefreshingLogin> classToSynchronizeOnPriorToRefresh = OAuthBearerRefreshingLogin.class;
expiringCredentialRefreshingLogin = new ExpiringCredentialRefreshingLogin(contextName, configuration,
new ExpiringCredentialRefreshConfig(configs, true), loginCallbackHandler,
classToSynchronizeOnPriorToRefresh) {
@Override
public ExpiringCredential expiringCredential() {
Set<OAuthBearerToken> privateCredentialTokens = expiringCredentialRefreshingLogin.subject()
.getPrivateCredentials(OAuthBearerToken.class);
if (privateCredentialTokens.isEmpty())
return null;
final OAuthBearerToken token = privateCredentialTokens.iterator().next();
if (log.isDebugEnabled())
log.debug("Found expiring credential with principal '{}'.", token.principalName());
return new ExpiringCredential() {
@Override
public String principalName() {
return token.principalName();
}
@Override
public Long startTimeMs() {
return token.startTimeMs();
}
@Override
public long expireTimeMs() {
return token.lifetimeMs();
}
@Override
public Long absoluteLastRefreshTimeMs() {
return null;
}
};
}
};
}
@Override
public void close() {
if (expiringCredentialRefreshingLogin != null)
expiringCredentialRefreshingLogin.close();
}
@Override
public Subject subject() {
return expiringCredentialRefreshingLogin != null ? expiringCredentialRefreshingLogin.subject() : null;
}
@Override
public String serviceName() {
return expiringCredentialRefreshingLogin != null ? expiringCredentialRefreshingLogin.serviceName() : null;
}
@Override
public synchronized LoginContext login() throws LoginException {
if (expiringCredentialRefreshingLogin != null)
return expiringCredentialRefreshingLogin.login();
throw new LoginException("Login was not configured properly");
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer.internals;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Map;
import java.util.Objects;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslClientFactory;
import javax.security.sasl.SaslException;
import org.apache.kafka.common.errors.IllegalSaslStateException;
import org.apache.kafka.common.security.auth.SaslExtensions;
import org.apache.kafka.common.security.auth.SaslExtensionsCallback;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* {@code SaslClient} implementation for SASL/OAUTHBEARER in Kafka. This
* implementation requires an instance of {@code AuthenticateCallbackHandler}
* that can handle an instance of {@link OAuthBearerTokenCallback} and return
* the {@link OAuthBearerToken} generated by the {@code login()} event on the
* {@code LoginContext}. Said handler can also optionally handle an instance of {@link SaslExtensionsCallback}
* to return any extensions generated by the {@code login()} event on the {@code LoginContext}.
*
* @see <a href="https://tools.ietf.org/html/rfc6750#section-2.1">RFC 6750,
* Section 2.1</a>
*
*/
public class OAuthBearerSaslClient implements SaslClient {
static final byte BYTE_CONTROL_A = (byte) 0x01;
private static final Logger log = LoggerFactory.getLogger(OAuthBearerSaslClient.class);
private final CallbackHandler callbackHandler;
enum State {
SEND_CLIENT_FIRST_MESSAGE, RECEIVE_SERVER_FIRST_MESSAGE, RECEIVE_SERVER_MESSAGE_AFTER_FAILURE, COMPLETE, FAILED
}
private State state;
public OAuthBearerSaslClient(AuthenticateCallbackHandler callbackHandler) {
this.callbackHandler = Objects.requireNonNull(callbackHandler);
setState(State.SEND_CLIENT_FIRST_MESSAGE);
}
public CallbackHandler callbackHandler() {
return callbackHandler;
}
@Override
public String getMechanismName() {
return OAuthBearerLoginModule.OAUTHBEARER_MECHANISM;
}
@Override
public boolean hasInitialResponse() {
return true;
}
@Override
public byte[] evaluateChallenge(byte[] challenge) throws SaslException {
try {
OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback();
switch (state) {
case SEND_CLIENT_FIRST_MESSAGE:
if (challenge != null && challenge.length != 0)
throw new SaslException("Expected empty challenge");
callbackHandler().handle(new Callback[] {callback});
SaslExtensions extensions = retrieveCustomExtensions();
setState(State.RECEIVE_SERVER_FIRST_MESSAGE);
return new OAuthBearerClientInitialResponse(callback.token().value(), extensions).toBytes();
case RECEIVE_SERVER_FIRST_MESSAGE:
if (challenge != null && challenge.length != 0) {
String jsonErrorResponse = new String(challenge, StandardCharsets.UTF_8);
if (log.isDebugEnabled())
log.debug("Sending %%x01 response to server after receiving an error: {}",
jsonErrorResponse);
setState(State.RECEIVE_SERVER_MESSAGE_AFTER_FAILURE);
return new byte[] {BYTE_CONTROL_A};
}
callbackHandler().handle(new Callback[] {callback});
if (log.isDebugEnabled())
log.debug("Successfully authenticated as {}", callback.token().principalName());
setState(State.COMPLETE);
return null;
default:
throw new IllegalSaslStateException("Unexpected challenge in Sasl client state " + state);
}
} catch (SaslException e) {
setState(State.FAILED);
throw e;
} catch (IOException | UnsupportedCallbackException e) {
setState(State.FAILED);
throw new SaslException(e.getMessage(), e);
}
}
@Override
public boolean isComplete() {
return state == State.COMPLETE;
}
@Override
public byte[] unwrap(byte[] incoming, int offset, int len) {
if (!isComplete())
throw new IllegalStateException("Authentication exchange has not completed");
return Arrays.copyOfRange(incoming, offset, offset + len);
}
@Override
public byte[] wrap(byte[] outgoing, int offset, int len) {
if (!isComplete())
throw new IllegalStateException("Authentication exchange has not completed");
return Arrays.copyOfRange(outgoing, offset, offset + len);
}
@Override
public Object getNegotiatedProperty(String propName) {
if (!isComplete())
throw new IllegalStateException("Authentication exchange has not completed");
return null;
}
@Override
public void dispose() {
}
private void setState(State state) {
log.debug("Setting SASL/{} client state to {}", OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, state);
this.state = state;
}
private SaslExtensions retrieveCustomExtensions() throws SaslException {
SaslExtensionsCallback extensionsCallback = new SaslExtensionsCallback();
try {
callbackHandler().handle(new Callback[] {extensionsCallback});
} catch (UnsupportedCallbackException e) {
log.debug("Extensions callback is not supported by client callback handler {}, no extensions will be added",
callbackHandler());
} catch (Exception e) {
throw new SaslException("SASL extensions could not be obtained", e);
}
return extensionsCallback.extensions();
}
public static class OAuthBearerSaslClientFactory implements SaslClientFactory {
@Override
public SaslClient createSaslClient(String[] mechanisms, String authorizationId, String protocol,
String serverName, Map<String, ?> props, CallbackHandler callbackHandler) {
String[] mechanismNamesCompatibleWithPolicy = getMechanismNames(props);
for (String mechanism : mechanisms) {
for (int i = 0; i < mechanismNamesCompatibleWithPolicy.length; i++) {
if (mechanismNamesCompatibleWithPolicy[i].equals(mechanism)) {
if (!(Objects.requireNonNull(callbackHandler) instanceof AuthenticateCallbackHandler))
throw new IllegalArgumentException(String.format(
"Callback handler must be castable to %s: %s",
AuthenticateCallbackHandler.class.getName(), callbackHandler.getClass().getName()));
return new OAuthBearerSaslClient((AuthenticateCallbackHandler) callbackHandler);
}
}
}
return null;
}
@Override
public String[] getMechanismNames(Map<String, ?> props) {
return OAuthBearerSaslServer.mechanismNamesCompatibleWithPolicy(props);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientCallbackHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer.internals;
import java.io.IOException;
import java.security.AccessController;
import java.util.Collections;
import java.util.Comparator;
import java.util.Date;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import javax.security.auth.Subject;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.auth.login.AppConfigurationEntry;
import org.apache.kafka.common.security.auth.SaslExtensionsCallback;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.auth.SaslExtensions;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerTokenCallback;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* An implementation of {@code AuthenticateCallbackHandler} that recognizes
* {@link OAuthBearerTokenCallback} and retrieves OAuth 2 Bearer Token that was
* created when the {@code OAuthBearerLoginModule} logged in by looking for an
* instance of {@link OAuthBearerToken} in the {@code Subject}'s private
* credentials. This class also recognizes {@link SaslExtensionsCallback} and retrieves any SASL extensions that were
* created when the {@code OAuthBearerLoginModule} logged in by looking for an instance of {@link SaslExtensions}
* in the {@code Subject}'s public credentials
* <p>
* Use of this class is configured automatically and does not need to be
* explicitly set via the {@code sasl.client.callback.handler.class}
* configuration property.
*/
public class OAuthBearerSaslClientCallbackHandler implements AuthenticateCallbackHandler {
private static final Logger log = LoggerFactory.getLogger(OAuthBearerSaslClientCallbackHandler.class);
private boolean configured = false;
/**
* Return true if this instance has been configured, otherwise false
*
* @return true if this instance has been configured, otherwise false
*/
public boolean configured() {
return configured;
}
@Override
public void configure(Map<String, ?> configs, String saslMechanism, List<AppConfigurationEntry> jaasConfigEntries) {
if (!OAuthBearerLoginModule.OAUTHBEARER_MECHANISM.equals(saslMechanism))
throw new IllegalArgumentException(String.format("Unexpected SASL mechanism: %s", saslMechanism));
configured = true;
}
@Override
public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException {
if (!configured())
throw new IllegalStateException("Callback handler not configured");
for (Callback callback : callbacks) {
if (callback instanceof OAuthBearerTokenCallback)
handleCallback((OAuthBearerTokenCallback) callback);
else if (callback instanceof SaslExtensionsCallback)
handleCallback((SaslExtensionsCallback) callback, Subject.getSubject(AccessController.getContext()));
else
throw new UnsupportedCallbackException(callback);
}
}
@Override
public void close() {
// empty
}
private void handleCallback(OAuthBearerTokenCallback callback) throws IOException {
if (callback.token() != null)
throw new IllegalArgumentException("Callback had a token already");
Subject subject = Subject.getSubject(AccessController.getContext());
Set<OAuthBearerToken> privateCredentials = subject != null
? subject.getPrivateCredentials(OAuthBearerToken.class)
: Collections.emptySet();
if (privateCredentials.size() == 0)
throw new IOException("No OAuth Bearer tokens in Subject's private credentials");
if (privateCredentials.size() == 1)
callback.token(privateCredentials.iterator().next());
else {
/*
* There a very small window of time upon token refresh (on the order of milliseconds)
* where both an old and a new token appear on the Subject's private credentials.
* Rather than implement a lock to eliminate this window, we will deal with it by
* checking for the existence of multiple tokens and choosing the one that has the
* longest lifetime. It is also possible that a bug could cause multiple tokens to
* exist (e.g. KAFKA-7902), so dealing with the unlikely possibility that occurs
* during normal operation also allows us to deal more robustly with potential bugs.
*/
SortedSet<OAuthBearerToken> sortedByLifetime =
new TreeSet<>(
new Comparator<OAuthBearerToken>() {
@Override
public int compare(OAuthBearerToken o1, OAuthBearerToken o2) {
return Long.compare(o1.lifetimeMs(), o2.lifetimeMs());
}
});
sortedByLifetime.addAll(privateCredentials);
log.warn("Found {} OAuth Bearer tokens in Subject's private credentials; the oldest expires at {}, will use the newest, which expires at {}",
sortedByLifetime.size(),
new Date(sortedByLifetime.first().lifetimeMs()),
new Date(sortedByLifetime.last().lifetimeMs()));
callback.token(sortedByLifetime.last());
}
}
/**
* Attaches the first {@link SaslExtensions} found in the public credentials of the Subject
*/
private static void handleCallback(SaslExtensionsCallback extensionsCallback, Subject subject) {
if (subject != null && !subject.getPublicCredentials(SaslExtensions.class).isEmpty()) {
SaslExtensions extensions = subject.getPublicCredentials(SaslExtensions.class).iterator().next();
extensionsCallback.extensions(extensions);
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClientProvider.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer.internals;
import java.security.Provider;
import java.security.Security;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule;
import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerSaslClient.OAuthBearerSaslClientFactory;
public class OAuthBearerSaslClientProvider extends Provider {
private static final long serialVersionUID = 1L;
protected OAuthBearerSaslClientProvider() {
super("SASL/OAUTHBEARER Client Provider", 1.0, "SASL/OAUTHBEARER Client Provider for Kafka");
put("SaslClientFactory." + OAuthBearerLoginModule.OAUTHBEARER_MECHANISM,
OAuthBearerSaslClientFactory.class.getName());
}
public static void initialize() {
Security.addProvider(new OAuthBearerSaslClientProvider());
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServer.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.common.security.oauthbearer.internals;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Map;
import java.util.Objects;
import javax.security.auth.callback.Callback;
import javax.security.auth.callback.CallbackHandler;
import javax.security.auth.callback.UnsupportedCallbackException;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslException;
import javax.security.sasl.SaslServer;
import javax.security.sasl.SaslServerFactory;
import org.apache.kafka.common.errors.SaslAuthenticationException;
import org.apache.kafka.common.security.auth.SaslExtensions;
import org.apache.kafka.common.security.authenticator.SaslInternalConfigs;
import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerExtensionsValidatorCallback;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken;
import org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallback;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* {@code SaslServer} implementation for SASL/OAUTHBEARER in Kafka. An instance
* of {@link OAuthBearerToken} is available upon successful authentication via
* the negotiated property "{@code OAUTHBEARER.token}"; the token could be used
* in a custom authorizer (to authorize based on JWT claims rather than ACLs,
* for example).
*/
public class OAuthBearerSaslServer implements SaslServer {
private static final Logger log = LoggerFactory.getLogger(OAuthBearerSaslServer.class);
private static final String NEGOTIATED_PROPERTY_KEY_TOKEN = OAuthBearerLoginModule.OAUTHBEARER_MECHANISM + ".token";
private static final String INTERNAL_ERROR_ON_SERVER = "Authentication could not be performed due to an internal error on the server";
private final AuthenticateCallbackHandler callbackHandler;
private boolean complete;
private OAuthBearerToken tokenForNegotiatedProperty = null;
private String errorMessage = null;
private SaslExtensions extensions;
public OAuthBearerSaslServer(CallbackHandler callbackHandler) {
if (!(Objects.requireNonNull(callbackHandler) instanceof AuthenticateCallbackHandler))
throw new IllegalArgumentException(String.format("Callback handler must be castable to %s: %s",
AuthenticateCallbackHandler.class.getName(), callbackHandler.getClass().getName()));
this.callbackHandler = (AuthenticateCallbackHandler) callbackHandler;
}
/**
* @throws SaslAuthenticationException
* if access token cannot be validated
* <p>
* <b>Note:</b> This method may throw
* {@link SaslAuthenticationException} to provide custom error
* messages to clients. But care should be taken to avoid including
* any information in the exception message that should not be
* leaked to unauthenticated clients. It may be safer to throw
* {@link SaslException} in some cases so that a standard error
* message is returned to clients.
* </p>
*/
@Override
public byte[] evaluateResponse(byte[] response) throws SaslException, SaslAuthenticationException {
if (response.length == 1 && response[0] == OAuthBearerSaslClient.BYTE_CONTROL_A && errorMessage != null) {
log.debug("Received %x01 response from client after it received our error");
throw new SaslAuthenticationException(errorMessage);
}
errorMessage = null;
OAuthBearerClientInitialResponse clientResponse;
try {
clientResponse = new OAuthBearerClientInitialResponse(response);
} catch (SaslException e) {
log.debug(e.getMessage());
throw e;
}
return process(clientResponse.tokenValue(), clientResponse.authorizationId(), clientResponse.extensions());
}
@Override
public String getAuthorizationID() {
if (!complete)
throw new IllegalStateException("Authentication exchange has not completed");
return tokenForNegotiatedProperty.principalName();
}
@Override
public String getMechanismName() {
return OAuthBearerLoginModule.OAUTHBEARER_MECHANISM;
}
@Override
public Object getNegotiatedProperty(String propName) {
if (!complete)
throw new IllegalStateException("Authentication exchange has not completed");
if (NEGOTIATED_PROPERTY_KEY_TOKEN.equals(propName))
return tokenForNegotiatedProperty;
if (SaslInternalConfigs.CREDENTIAL_LIFETIME_MS_SASL_NEGOTIATED_PROPERTY_KEY.equals(propName))
return tokenForNegotiatedProperty.lifetimeMs();
return extensions.map().get(propName);
}
@Override
public boolean isComplete() {
return complete;
}
@Override
public byte[] unwrap(byte[] incoming, int offset, int len) {
if (!complete)
throw new IllegalStateException("Authentication exchange has not completed");
return Arrays.copyOfRange(incoming, offset, offset + len);
}
@Override
public byte[] wrap(byte[] outgoing, int offset, int len) {
if (!complete)
throw new IllegalStateException("Authentication exchange has not completed");
return Arrays.copyOfRange(outgoing, offset, offset + len);
}
@Override
public void dispose() {
complete = false;
tokenForNegotiatedProperty = null;
extensions = null;
}
private byte[] process(String tokenValue, String authorizationId, SaslExtensions extensions) throws SaslException {
OAuthBearerValidatorCallback callback = new OAuthBearerValidatorCallback(tokenValue);
try {
callbackHandler.handle(new Callback[] {callback});
} catch (IOException | UnsupportedCallbackException e) {
handleCallbackError(e);
}
OAuthBearerToken token = callback.token();
if (token == null) {
errorMessage = jsonErrorResponse(callback.errorStatus(), callback.errorScope(),
callback.errorOpenIDConfiguration());
log.debug(errorMessage);
return errorMessage.getBytes(StandardCharsets.UTF_8);
}
/*
* We support the client specifying an authorization ID as per the SASL
* specification, but it must match the principal name if it is specified.
*/
if (!authorizationId.isEmpty() && !authorizationId.equals(token.principalName()))
throw new SaslAuthenticationException(String.format(
"Authentication failed: Client requested an authorization id (%s) that is different from the token's principal name (%s)",
authorizationId, token.principalName()));
Map<String, String> validExtensions = processExtensions(token, extensions);
tokenForNegotiatedProperty = token;
this.extensions = new SaslExtensions(validExtensions);
complete = true;
log.debug("Successfully authenticate User={}", token.principalName());
return new byte[0];
}
private Map<String, String> processExtensions(OAuthBearerToken token, SaslExtensions extensions) throws SaslException {
OAuthBearerExtensionsValidatorCallback extensionsCallback = new OAuthBearerExtensionsValidatorCallback(token, extensions);
try {
callbackHandler.handle(new Callback[] {extensionsCallback});
} catch (UnsupportedCallbackException e) {
// backwards compatibility - no extensions will be added
} catch (IOException e) {
handleCallbackError(e);
}
if (!extensionsCallback.invalidExtensions().isEmpty()) {
String errorMessage = String.format("Authentication failed: %d extensions are invalid! They are: %s",
extensionsCallback.invalidExtensions().size(),
Utils.mkString(extensionsCallback.invalidExtensions(), "", "", ": ", "; "));
log.debug(errorMessage);
throw new SaslAuthenticationException(errorMessage);
}
return extensionsCallback.validatedExtensions();
}
private static String jsonErrorResponse(String errorStatus, String errorScope, String errorOpenIDConfiguration) {
String jsonErrorResponse = String.format("{\"status\":\"%s\"", errorStatus);
if (errorScope != null)
jsonErrorResponse = String.format("%s, \"scope\":\"%s\"", jsonErrorResponse, errorScope);
if (errorOpenIDConfiguration != null)
jsonErrorResponse = String.format("%s, \"openid-configuration\":\"%s\"", jsonErrorResponse,
errorOpenIDConfiguration);
jsonErrorResponse = String.format("%s}", jsonErrorResponse);
return jsonErrorResponse;
}
private void handleCallbackError(Exception e) throws SaslException {
String msg = String.format("%s: %s", INTERNAL_ERROR_ON_SERVER, e.getMessage());
log.debug(msg, e);
throw new SaslException(msg);
}
public static String[] mechanismNamesCompatibleWithPolicy(Map<String, ?> props) {
return props != null && "true".equals(String.valueOf(props.get(Sasl.POLICY_NOPLAINTEXT))) ? new String[] {}
: new String[] {OAuthBearerLoginModule.OAUTHBEARER_MECHANISM};
}
public static class OAuthBearerSaslServerFactory implements SaslServerFactory {
@Override
public SaslServer createSaslServer(String mechanism, String protocol, String serverName, Map<String, ?> props,
CallbackHandler callbackHandler) {
String[] mechanismNamesCompatibleWithPolicy = getMechanismNames(props);
for (int i = 0; i < mechanismNamesCompatibleWithPolicy.length; i++) {
if (mechanismNamesCompatibleWithPolicy[i].equals(mechanism)) {
return new OAuthBearerSaslServer(callbackHandler);
}
}
return null;
}
@Override
public String[] getMechanismNames(Map<String, ?> props) {
return OAuthBearerSaslServer.mechanismNamesCompatibleWithPolicy(props);
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.