index int64 | repo_id string | file_path string | content string |
|---|---|---|---|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/Message.java | package art.starrynift.messages;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
import java.util.Map;
/**
* Represents a Message within a thread.
* <p>
* https://platform.openai.com/docs/api-reference/messages/object
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class Message {
/**
* The identifier, which can be referenced in API endpoints.
*/
String id;
/**
* The object type, which is always thread.message.
*/
String object;
/**
* The Unix timestamp (in seconds) for when the message was created.
*/
@JsonProperty("created_at")
int createdAt;
/**
* The thread ID that this message belongs to.
*/
@JsonProperty("thread_id")
String threadId;
/**
* The entity that produced the message. One of user or assistant.
*/
String role;
/**
* The content of the message in an array of text and/or images.
*/
List<MessageContent> content;
/**
* If applicable, the ID of the assistant that authored this message.
*/
@JsonProperty("assistant_id")
String assistantId;
/**
* If applicable, the ID of the run associated with the authoring of this message.
*/
@JsonProperty("run_id")
String runId;
/**
* A list of file IDs that the assistant should use.
* Useful for tools like retrieval and code_interpreter that can access files.
* A maximum of 10 files can be attached to a message.
*/
@JsonProperty("file_ids")
List<String> fileIds;
/**
* Set of 16 key-value pairs that can be attached to an object.
* This can be useful for storing additional information about the object in a structured format.
* Keys can be a maximum of 64 characters long, and values can be a maximum of 512 characters long.
*/
Map<String, String> metadata;
} |
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/MessageContent.java | package art.starrynift.messages;
import com.fasterxml.jackson.annotation.JsonProperty;
import art.starrynift.messages.content.ImageFile;
import art.starrynift.messages.content.Text;
import lombok.Data;
/**
* Represents the content of a message
* <p>
* https://platform.openai.com/docs/api-reference/messages/object
*/
@Data
public class MessageContent {
/**
* The content type, either "text" or "image_file"
*/
String type;
/**
* Text content of the message. Only present if type == text
*/
Text text;
/**
* The image content of a message. Only present if type == image_file
*/
@JsonProperty("image_file")
ImageFile imageFile;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/MessageFile.java | package art.starrynift.messages;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* A list of files attached to a Message
* <p>
* https://platform.openai.com/docs/api-reference/messages/file-object
*/
@NoArgsConstructor
@AllArgsConstructor
@Data
public class MessageFile {
/**
* The identifier, which can be referenced in API endpoints.
*/
String id;
/**
* The object type, which is always thread.message.file.
*/
String object;
/**
* The Unix timestamp (in seconds) for when the message file was created.
*/
@JsonProperty("created_at")
int createdAt;
/**
* The ID of the message that the File is attached to.
*/
@JsonProperty("message_id")
String messageId;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/MessageRequest.java | package art.starrynift.messages;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.*;
import java.util.List;
import java.util.Map;
/**
* Creates a Message
* <p>
* https://platform.openai.com/docs/api-reference/messages/createMessage
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class MessageRequest {
/**
* The role of the entity that is creating the message.
* Currently only "user" is supported.
*/
@NonNull
@Builder.Default
String role = "user";
/**
* The content of the message.
*/
@NonNull
String content;
/**
* A list of File IDs that the message should use.
* Defaults to an empty list.
* There can be a maximum of 10 files attached to a message.
* Useful for tools like retrieval and code_interpreter that can access and use files.
*/
@JsonProperty("file_ids")
List<String> fileIds;
/**
* Set of 16 key-value pairs that can be attached to an object.
* This can be useful for storing additional information about the object in a structured format.
* Keys can be a maximum of 64 characters long, and values can be a maximum of 512 characters long.
*/
Map<String, String> metadata;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/ModifyMessageRequest.java | package art.starrynift.messages;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Map;
/**
* Modifies a Message
* <p>
* https://platform.openai.com/docs/api-reference/messages/modifyMessage
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class ModifyMessageRequest {
/**
* Set of 16 key-value pairs that can be attached to an object.
* This can be useful for storing additional information about the object in a structured format.
* Keys can be a maximum of 64 characters long, and values can be a maximum of 512 characters long.
*/
Map<String, String> metadata;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/content/Annotation.java | package art.starrynift.messages.content;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* An annotation for a text Message
* <p>
* https://platform.openai.com/docs/api-reference/messages/object
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public class Annotation {
/**
* The type of annotation, either file_citation or file_path
*/
String type;
/**
* The text in the message content that needs to be replaced
*/
String text;
/**
* File citation details, only present when type == file_citation
*/
@JsonProperty("file_citation")
FileCitation fileCitation;
/**
* File path details, only present when type == file_path
*/
@JsonProperty("file_path")
FilePath filePath;
@JsonProperty("start_index")
int startIndex;
@JsonProperty("end_index")
int endIndex;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/content/FileCitation.java | package art.starrynift.messages.content;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* A citation within the message that points to a specific quote from a specific File associated with the
* assistant or the message. Generated when the assistant uses the "retrieval" tool to search files.
* <p>
* https://platform.openai.com/docs/api-reference/messages/object
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public class FileCitation {
/**
* The ID of the specific File the citation is from.
*/
@JsonProperty("file_id")
String fileId;
/**
* The specific quote in the file.
*/
String quote;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/content/FilePath.java | package art.starrynift.messages.content;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* A URL for the file that's generated when the assistant used the code_interpreter tool to generate a file.
* <p>
* https://platform.openai.com/docs/api-reference/messages/object
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public class FilePath {
/**
* The ID of the file that was generated
*/
@JsonProperty("file_id")
String fileId;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/content/ImageFile.java | package art.starrynift.messages.content;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* References an image File int eh content of a message.
* <p>
* /https://platform.openai.com/docs/api-reference/messages/object
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public class ImageFile {
/**
* The File ID of the image in the message content.
*/
@JsonProperty("file_id")
String fileId;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/messages/content/Text.java | package art.starrynift.messages.content;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
/**
* The text content that is part of a message
* <p>
* https://platform.openai.com/docs/api-reference/messages/object
*/
@Data
@NoArgsConstructor
@AllArgsConstructor
public class Text {
/**
* The data that makes up the text.
*/
String value;
/**
* Text annotations that show additional details
*/
List<Annotation> annotations;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/model/Model.java | package art.starrynift.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
import java.util.List;
/**
* GPT model details
*
* https://beta.openai.com/docs/api-reference/models
*/
@Data
public class Model {
/**
* An identifier for this model, used to specify the model when making completions, etc
*/
public String id;
/**
* The type of object returned, should be "model"
*/
public String object;
/**
* The owner of the model, typically "openai"
*/
@JsonProperty("owned_by")
public String ownedBy;
/**
* List of permissions for this model. No longer returned by OpenAI
*/
@Deprecated
public List<Permission> permission;
/**
* The root model that this and its parent (if applicable) are based on
*/
public String root;
/**
* The parent model that this is based on
*/
public String parent;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/model/Permission.java | package art.starrynift.model;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/**
* Model permissions
* I couldn't find documentation for the specific permissions, and I've elected to leave them undocumented rather than
* write something incorrect.
*
* https://beta.openai.com/docs/api-reference/models
*/
@Data
public class Permission {
/**
* An identifier for this model permission
*/
public String id;
/**
* The type of object returned, should be "model_permission"
*/
public String object;
/**
* The creation time in epoch seconds.
*/
public long created;
@JsonProperty("allow_create_engine")
public boolean allowCreateEngine;
@JsonProperty("allow_sampling")
public boolean allowSampling;
@JsonProperty("allow_log_probs")
public boolean allowLogProbs;
@JsonProperty("allow_search_indices")
public boolean allowSearchIndices;
@JsonProperty("allow_view")
public boolean allowView;
@JsonProperty("allow_fine_tuning")
public boolean allowFineTuning;
public String organization;
public String group;
@JsonProperty("is_blocking")
public boolean isBlocking;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/moderation/Moderation.java | package art.starrynift.moderation;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/**
* An object containing the moderation data for a single input string
*
* https://beta.openai.com/docs/api-reference/moderations/create
*/
@Data
public class Moderation {
/**
* Set to true if the model classifies the content as violating OpenAI's content policy, false otherwise
*/
public boolean flagged;
/**
* Object containing per-category binary content policy violation flags.
* For each category, the value is true if the model flags the corresponding category as violated, false otherwise.
*/
public ModerationCategories categories;
/**
* Object containing per-category raw scores output by the model, denoting the model's confidence that the
* input violates the OpenAI's policy for the category.
* The value is between 0 and 1, where higher values denote higher confidence.
* The scores should not be interpreted as probabilities.
*/
@JsonProperty("category_scores")
public ModerationCategoryScores categoryScores;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/moderation/ModerationCategories.java | package art.starrynift.moderation;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
import java.util.List;
/**
* An object containing the flags for each moderation category
*
* https://beta.openai.com/docs/api-reference/moderations/create
*/
@Data
public class ModerationCategories {
public boolean hate;
@JsonProperty("hate/threatening")
public boolean hateThreatening;
@JsonProperty("self-harm")
public boolean selfHarm;
public boolean sexual;
@JsonProperty("sexual/minors")
public boolean sexualMinors;
public boolean violence;
@JsonProperty("violence/graphic")
public boolean violenceGraphic;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/moderation/ModerationCategoryScores.java | package art.starrynift.moderation;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.Data;
/**
* An object containing the scores for each moderation category
*
* https://beta.openai.com/docs/api-reference/moderations/create
*/
@Data
public class ModerationCategoryScores {
public double hate;
@JsonProperty("hate/threatening")
public double hateThreatening;
@JsonProperty("self-harm")
public double selfHarm;
public double sexual;
@JsonProperty("sexual/minors")
public double sexualMinors;
public double violence;
@JsonProperty("violence/graphic")
public double violenceGraphic;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/moderation/ModerationRequest.java | package art.starrynift.moderation;
import lombok.*;
/**
* A request for OpenAi to detect if text violates OpenAi's content policy.
*
* https://beta.openai.com/docs/api-reference/moderations/create
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class ModerationRequest {
/**
* The input text to classify.
*/
@NonNull
String input;
/**
* The name of the model to use, defaults to text-moderation-stable.
*/
String model;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/moderation/ModerationResult.java | package art.starrynift.moderation;
import lombok.Data;
import java.util.List;
/**
* An object containing a response from the moderation api
*
* https://beta.openai.com/docs/api-reference/moderations/create
*/
@Data
public class ModerationResult {
/**
* A unique id assigned to this moderation.
*/
public String id;
/**
* The model used.
*/
public String model;
/**
* A list of moderation scores.
*/
public List<Moderation> results;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/CreateThreadAndRunRequest.java | package art.starrynift.runs;
import java.util.List;
import java.util.Map;
import javax.tools.Tool;
import com.fasterxml.jackson.annotation.JsonProperty;
import art.starrynift.threads.ThreadRequest;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class CreateThreadAndRunRequest {
@JsonProperty("assistant_id")
private String assistantId;
private ThreadRequest thread;
private String model;
private String instructions;
private List<Tool> tools;
private Map<String, String> metadata;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/MessageCreation.java | package art.starrynift.runs;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class MessageCreation {
@JsonProperty("message_id")
String messageId;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/RequiredAction.java | package art.starrynift.runs;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class RequiredAction {
private String type;
@JsonProperty("submit_tool_outputs")
private SubmitToolOutputs submitToolOutputs;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/Run.java | package art.starrynift.runs;
import java.util.List;
import java.util.Map;
import javax.tools.Tool;
import com.fasterxml.jackson.annotation.JsonProperty;
import art.starrynift.common.LastError;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class Run {
private String id;
private String object;
@JsonProperty("created_at")
private Integer createdAt;
@JsonProperty("thread_id")
private String threadId;
@JsonProperty("assistant_id")
private String assistantId;
private String status;
@JsonProperty("required_action")
private RequiredAction requiredAction;
@JsonProperty("last_error")
private LastError lastError;
@JsonProperty("expires_at")
private Integer expiresAt;
@JsonProperty("started_at")
private Integer startedAt;
@JsonProperty("cancelled_at")
private Integer cancelledAt;
@JsonProperty("failed_at")
private Integer failedAt;
@JsonProperty("completed_at")
private Integer completedAt;
private String model;
private String instructions;
private List<Tool> tools;
@JsonProperty("file_ids")
private List<String> fileIds;
private Map<String, String> metadata;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/RunCreateRequest.java | package art.starrynift.runs;
import java.util.List;
import java.util.Map;
import javax.tools.Tool;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class RunCreateRequest {
String assistantId;
// Optional
String model;
String instructions;
List<Tool> tools;
Map<String, String> metadata;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/RunImage.java | package art.starrynift.runs;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class RunImage {
@JsonProperty("file_id")
private String fileId;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/RunStep.java | package art.starrynift.runs;
import java.util.Map;
import com.fasterxml.jackson.annotation.JsonProperty;
import art.starrynift.common.LastError;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class RunStep {
private String id;
private String object;
@JsonProperty("created_at")
private Integer createdAt;
@JsonProperty("assistant_id")
private String assistantId;
@JsonProperty("thread_id")
private String threadId;
@JsonProperty("run_id")
private String runId;
private String type;
private String status;
@JsonProperty("step_details")
private StepDetails stepDetails;
@JsonProperty("last_error")
private LastError lastError;
@JsonProperty("expired_at")
private Integer expiredAt;
@JsonProperty("cancelled_at")
private Integer cancelledAt;
@JsonProperty("failed_at")
private Integer failedAt;
@JsonProperty("completed_at")
private Integer completedAt;
private Map<String, String> metadata;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/StepDetails.java | package art.starrynift.runs;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class StepDetails {
private String type;
@JsonProperty("message_creation")
private MessageCreation messageCreation;
@JsonProperty("tool_calls")
private List<ToolCall> toolCalls;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/SubmitToolOutputRequestItem.java | package art.starrynift.runs;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class SubmitToolOutputRequestItem {
@JsonProperty("tool_call_id")
private String toolCallId;
private String output;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/SubmitToolOutputs.java | package art.starrynift.runs;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class SubmitToolOutputs {
@JsonProperty("tool_calls")
List<ToolCall> toolCalls;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/SubmitToolOutputsRequest.java | package art.starrynift.runs;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class SubmitToolOutputsRequest {
@JsonProperty("tool_outputs")
private List<SubmitToolOutputRequestItem> toolOutputs;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/ToolCall.java | package art.starrynift.runs;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Map;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class ToolCall {
private String id;
private String type;
@JsonProperty("code_interpreter")
private ToolCallCodeInterpreter codeInterpreter;
private Map<String, String> retrieval;
private ToolCallFunction function;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/ToolCallCodeInterpreter.java | package art.starrynift.runs;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.List;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class ToolCallCodeInterpreter {
private String input;
private List<ToolCallCodeInterpreterOutput> outputs;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/ToolCallCodeInterpreterOutput.java | package art.starrynift.runs;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class ToolCallCodeInterpreterOutput {
private String type;
private String logs;
private RunImage image;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/runs/ToolCallFunction.java | package art.starrynift.runs;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
@Data
@Builder
@NoArgsConstructor
@AllArgsConstructor
public class ToolCallFunction {
private String name;
private String arguments;
private String output;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/AuthenticationInterceptor.java | package art.starrynift.service;
import okhttp3.Interceptor;
import okhttp3.Request;
import okhttp3.Response;
import java.io.IOException;
import java.util.Objects;
/**
* OkHttp Interceptor that adds an authorization token header
*/
public class AuthenticationInterceptor implements Interceptor {
private final String token;
AuthenticationInterceptor(String token) {
Objects.requireNonNull(token, "OpenAI token required");
this.token = token;
}
@Override
public Response intercept(Chain chain) throws IOException {
Request request = chain.request()
.newBuilder()
.header("Authorization", "Bearer " + token)
.build();
return chain.proceed(request);
}
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/ChatCompletionRequestMixIn.java | package art.starrynift.service;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
import art.starrynift.completion.chat.ChatCompletionRequest;
public abstract class ChatCompletionRequestMixIn {
@JsonSerialize(using = ChatCompletionRequestSerializerAndDeserializer.Serializer.class)
@JsonDeserialize(using = ChatCompletionRequestSerializerAndDeserializer.Deserializer.class)
abstract ChatCompletionRequest.ChatCompletionRequestFunctionCall getFunctionCall();
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/ChatCompletionRequestSerializerAndDeserializer.java | package art.starrynift.service;
import java.io.IOException;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.DeserializationContext;
import com.fasterxml.jackson.databind.JsonDeserializer;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.SerializerProvider;
import art.starrynift.completion.chat.ChatCompletionRequest;
public class ChatCompletionRequestSerializerAndDeserializer {
public static class Serializer extends JsonSerializer<ChatCompletionRequest.ChatCompletionRequestFunctionCall> {
@Override
public void serialize(ChatCompletionRequest.ChatCompletionRequestFunctionCall value, JsonGenerator gen, SerializerProvider serializers) throws IOException {
if (value == null || value.getName() == null) {
gen.writeNull();
} else if ("none".equals(value.getName()) || "auto".equals(value.getName())) {
gen.writeString(value.getName());
} else {
gen.writeStartObject();
gen.writeFieldName("name");
gen.writeString(value.getName());
gen.writeEndObject();
}
}
}
public static class Deserializer extends JsonDeserializer<ChatCompletionRequest.ChatCompletionRequestFunctionCall> {
@Override
public ChatCompletionRequest.ChatCompletionRequestFunctionCall deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
if (p.getCurrentToken().isStructStart()) {
p.nextToken(); //key
p.nextToken(); //value
}
return new ChatCompletionRequest.ChatCompletionRequestFunctionCall(p.getValueAsString());
}
}
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/ChatFunctionCallArgumentsSerializerAndDeserializer.java | package art.starrynift.service;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.core.JsonParseException;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.databind.*;
import com.fasterxml.jackson.databind.node.JsonNodeType;
import com.fasterxml.jackson.databind.node.TextNode;
import java.io.IOException;
public class ChatFunctionCallArgumentsSerializerAndDeserializer {
private final static ObjectMapper MAPPER = new ObjectMapper();
private ChatFunctionCallArgumentsSerializerAndDeserializer() {
}
public static class Serializer extends JsonSerializer<JsonNode> {
private Serializer() {
}
@Override
public void serialize(JsonNode value, JsonGenerator gen, SerializerProvider serializers) throws IOException {
if (value == null) {
gen.writeNull();
} else {
gen.writeString(value instanceof TextNode ? value.asText() : value.toPrettyString());
}
}
}
public static class Deserializer extends JsonDeserializer<JsonNode> {
private Deserializer() {
}
@Override
public JsonNode deserialize(JsonParser p, DeserializationContext ctxt) throws IOException {
String json = p.getValueAsString();
if (json == null || p.currentToken() == JsonToken.VALUE_NULL) {
return null;
}
try {
JsonNode node = null;
try {
node = MAPPER.readTree(json);
} catch (JsonParseException ignored) {
}
if (node == null || node.getNodeType() == JsonNodeType.MISSING) {
node = MAPPER.readTree(p);
}
return node;
} catch (Exception ex) {
ex.printStackTrace();
return null;
}
}
}
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/ChatFunctionCallMixIn.java | package art.starrynift.service;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
public abstract class ChatFunctionCallMixIn {
@JsonSerialize(using = ChatFunctionCallArgumentsSerializerAndDeserializer.Serializer.class)
@JsonDeserialize(using = ChatFunctionCallArgumentsSerializerAndDeserializer.Deserializer.class)
abstract JsonNode getArguments();
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/ChatFunctionMixIn.java | package art.starrynift.service;
import com.fasterxml.jackson.databind.annotation.JsonSerialize;
public abstract class ChatFunctionMixIn {
@JsonSerialize(using = ChatFunctionParametersSerializer.class)
abstract Class<?> getParametersClass();
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/ChatFunctionParametersSerializer.java | package art.starrynift.service;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.JsonSerializer;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializerProvider;
import com.kjetland.jackson.jsonSchema.JsonSchemaConfig;
import com.kjetland.jackson.jsonSchema.JsonSchemaGenerator;
import java.io.IOException;
public class ChatFunctionParametersSerializer extends JsonSerializer<Class<?>> {
private final ObjectMapper mapper = new ObjectMapper();
private final JsonSchemaConfig config = JsonSchemaConfig.vanillaJsonSchemaDraft4();
private final JsonSchemaGenerator jsonSchemaGenerator = new JsonSchemaGenerator(mapper, config);
@Override
public void serialize(Class<?> value, JsonGenerator gen, SerializerProvider serializers) throws IOException {
if (value == null) {
gen.writeNull();
} else {
try {
JsonNode schema = jsonSchemaGenerator.generateJsonSchema(value);
gen.writeObject(schema);
} catch (Exception e) {
throw new RuntimeException("Failed to generate JSON Schema", e);
}
}
}
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/ChatMessageAccumulator.java | package art.starrynift.service;
import art.starrynift.completion.chat.ChatFunctionCall;
import art.starrynift.completion.chat.ChatMessage;
/**
* Class that accumulates chat messages and provides utility methods for
* handling message chunks and function calls within a chat stream. This
* class is immutable.
*
*/
public class ChatMessageAccumulator {
private final ChatMessage messageChunk;
private final ChatMessage accumulatedMessage;
/**
* Constructor that initializes the message chunk and accumulated message.
*
* @param messageChunk The message chunk.
* @param accumulatedMessage The accumulated message.
*/
public ChatMessageAccumulator(ChatMessage messageChunk, ChatMessage accumulatedMessage) {
this.messageChunk = messageChunk;
this.accumulatedMessage = accumulatedMessage;
}
/**
* Checks if the accumulated message contains a function call.
*
* @return true if the accumulated message contains a function call, false otherwise.
*/
public boolean isFunctionCall() {
return getAccumulatedMessage().getFunctionCall() != null && getAccumulatedMessage().getFunctionCall().getName() != null;
}
/**
* Checks if the accumulated message contains a chat message.
*
* @return true if the accumulated message contains a chat message, false otherwise.
*/
public boolean isChatMessage() {
return !isFunctionCall();
}
/**
* Retrieves the message chunk.
*
* @return the message chunk.
*/
public ChatMessage getMessageChunk() {
return messageChunk;
}
/**
* Retrieves the accumulated message.
*
* @return the accumulated message.
*/
public ChatMessage getAccumulatedMessage() {
return accumulatedMessage;
}
/**
* Retrieves the function call from the message chunk.
* This is equivalent to getMessageChunk().getFunctionCall().
*
* @return the function call from the message chunk.
*/
public ChatFunctionCall getChatFunctionCallChunk() {
return getMessageChunk().getFunctionCall();
}
/**
* Retrieves the function call from the accumulated message.
* This is equivalent to getAccumulatedMessage().getFunctionCall().
*
* @return the function call from the accumulated message.
*/
public ChatFunctionCall getAccumulatedChatFunctionCall() {
return getAccumulatedMessage().getFunctionCall();
}
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/DeepseekService.java | package art.starrynift.service;
import art.starrynift.*;
import art.starrynift.assistants.*;
import art.starrynift.audio.*;
import art.starrynift.billing.BillingUsage;
import art.starrynift.billing.Subscription;
import art.starrynift.client.DeepseekApi;
import art.starrynift.client.OpenAiApi;
import art.starrynift.completion.CompletionChunk;
import art.starrynift.completion.CompletionRequest;
import art.starrynift.completion.CompletionResult;
import art.starrynift.completion.chat.*;
import art.starrynift.edit.EditRequest;
import art.starrynift.edit.EditResult;
import art.starrynift.embedding.EmbeddingRequest;
import art.starrynift.embedding.EmbeddingResult;
import art.starrynift.file.File;
import art.starrynift.fine_tuning.FineTuningEvent;
import art.starrynift.fine_tuning.FineTuningJob;
import art.starrynift.fine_tuning.FineTuningJobRequest;
import art.starrynift.image.CreateImageEditRequest;
import art.starrynift.image.CreateImageRequest;
import art.starrynift.image.CreateImageVariationRequest;
import art.starrynift.image.ImageResult;
import art.starrynift.messages.Message;
import art.starrynift.messages.MessageFile;
import art.starrynift.messages.MessageRequest;
import art.starrynift.messages.ModifyMessageRequest;
import art.starrynift.model.Model;
import art.starrynift.moderation.ModerationRequest;
import art.starrynift.moderation.ModerationResult;
import art.starrynift.runs.*;
import art.starrynift.threads.ThreadRequest;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategy;
import com.fasterxml.jackson.databind.node.TextNode;
import io.reactivex.BackpressureStrategy;
import io.reactivex.Flowable;
import io.reactivex.Single;
import okhttp3.*;
import retrofit2.Call;
import retrofit2.HttpException;
import retrofit2.Retrofit;
import retrofit2.adapter.rxjava2.RxJava2CallAdapterFactory;
import retrofit2.converter.jackson.JacksonConverterFactory;
import javax.validation.constraints.NotNull;
import java.io.IOException;
import java.time.Duration;
import java.time.LocalDate;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
public class DeepseekService {
private static final String BASE_URL = "https://api.deepseek.com/";
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(10);
private static final ObjectMapper mapper = defaultObjectMapper();
private final DeepseekApi api;
private final ExecutorService executorService;
/**
* Creates a new OpenAiService that wraps OpenAiApi
*
* @param token OpenAi token string "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
*/
public DeepseekService(final String token) {
this(token, DEFAULT_TIMEOUT);
}
/**
* Creates a new OpenAiService that wraps OpenAiApi
*
* @param token OpenAi token string "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
* @param timeout http read timeout, Duration.ZERO means no timeout
*/
public DeepseekService(final String token, final Duration timeout) {
ObjectMapper mapper = defaultObjectMapper();
OkHttpClient client = defaultClient(token, timeout);
Retrofit retrofit = defaultRetrofit(client, mapper);
this.api = retrofit.create(DeepseekApi.class);
this.executorService = client.dispatcher().executorService();
}
/**
* Creates a new OpenAiService that wraps OpenAiApi.
* Use this if you need more customization, but use OpenAiService(api, executorService) if you use streaming and
* want to shut down instantly
*
* @param api OpenAiApi instance to use for all methods
*/
public DeepseekService(final DeepseekApi api) {
this.api = api;
this.executorService = null;
}
/**
* Creates a new OpenAiService that wraps OpenAiApi.
* The ExecutorService must be the one you get from the client you created the api with
* otherwise shutdownExecutor() won't work.
* <p>
* Use this if you need more customization.
*
* @param api OpenAiApi instance to use for all methods
* @param executorService the ExecutorService from client.dispatcher().executorService()
*/
public DeepseekService(final DeepseekApi api, final ExecutorService executorService) {
this.api = api;
this.executorService = executorService;
}
public List<Model> listModels() {
return execute(api.listModels()).data;
}
public Model getModel(String modelId) {
return execute(api.getModel(modelId));
}
public CompletionResult createCompletion(CompletionRequest request) {
return execute(api.createCompletion(request));
}
public Flowable<CompletionChunk> streamCompletion(CompletionRequest request) {
request.setStream(true);
return stream(api.createCompletionStream(request), CompletionChunk.class);
}
public ChatCompletionResult createChatCompletion(ChatCompletionRequest request) {
return execute(api.createChatCompletion(request));
}
public Flowable<ChatCompletionChunk> streamChatCompletion(ChatCompletionRequest request) {
request.setStream(true);
return stream(api.createChatCompletionStream(request), ChatCompletionChunk.class);
}
public EditResult createEdit(EditRequest request) {
return execute(api.createEdit(request));
}
public EmbeddingResult createEmbeddings(EmbeddingRequest request) {
return execute(api.createEmbeddings(request));
}
public List<File> listFiles() {
return execute(api.listFiles()).data;
}
public File uploadFile(String purpose, String filepath) {
java.io.File file = new java.io.File(filepath);
RequestBody purposeBody = RequestBody.create(MultipartBody.FORM, purpose);
RequestBody fileBody = RequestBody.create(MediaType.parse("text"), file);
MultipartBody.Part body = MultipartBody.Part.createFormData("file", filepath, fileBody);
return execute(api.uploadFile(purposeBody, body));
}
public DeleteResult deleteFile(String fileId) {
return execute(api.deleteFile(fileId));
}
public File retrieveFile(String fileId) {
return execute(api.retrieveFile(fileId));
}
public ResponseBody retrieveFileContent(String fileId) {
return execute(api.retrieveFileContent(fileId));
}
public FineTuningJob createFineTuningJob(FineTuningJobRequest request) {
return execute(api.createFineTuningJob(request));
}
public List<FineTuningJob> listFineTuningJobs() {
return execute(api.listFineTuningJobs()).data;
}
public FineTuningJob retrieveFineTuningJob(String fineTuningJobId) {
return execute(api.retrieveFineTuningJob(fineTuningJobId));
}
public FineTuningJob cancelFineTuningJob(String fineTuningJobId) {
return execute(api.cancelFineTuningJob(fineTuningJobId));
}
public List<FineTuningEvent> listFineTuningJobEvents(String fineTuningJobId) {
return execute(api.listFineTuningJobEvents(fineTuningJobId)).data;
}
public CompletionResult createFineTuneCompletion(CompletionRequest request) {
return execute(api.createFineTuneCompletion(request));
}
public DeleteResult deleteFineTune(String fineTuneId) {
return execute(api.deleteFineTune(fineTuneId));
}
public ImageResult createImage(CreateImageRequest request) {
return execute(api.createImage(request));
}
public ImageResult createImageEdit(CreateImageEditRequest request, String imagePath, String maskPath) {
java.io.File image = new java.io.File(imagePath);
java.io.File mask = null;
if (maskPath != null) {
mask = new java.io.File(maskPath);
}
return createImageEdit(request, image, mask);
}
public ImageResult createImageEdit(CreateImageEditRequest request, java.io.File image, java.io.File mask) {
RequestBody imageBody = RequestBody.create(MediaType.parse("image"), image);
MultipartBody.Builder builder = new MultipartBody.Builder()
.setType(MediaType.get("multipart/form-data"))
.addFormDataPart("prompt", request.getPrompt())
.addFormDataPart("size", request.getSize())
.addFormDataPart("response_format", request.getResponseFormat())
.addFormDataPart("image", "image", imageBody);
if (request.getN() != null) {
builder.addFormDataPart("n", request.getN().toString());
}
if (mask != null) {
RequestBody maskBody = RequestBody.create(MediaType.parse("image"), mask);
builder.addFormDataPart("mask", "mask", maskBody);
}
if (request.getModel() != null) {
builder.addFormDataPart("model", request.getModel());
}
return execute(api.createImageEdit(builder.build()));
}
public ImageResult createImageVariation(CreateImageVariationRequest request, String imagePath) {
java.io.File image = new java.io.File(imagePath);
return createImageVariation(request, image);
}
public ImageResult createImageVariation(CreateImageVariationRequest request, java.io.File image) {
RequestBody imageBody = RequestBody.create(MediaType.parse("image"), image);
MultipartBody.Builder builder = new MultipartBody.Builder()
.setType(MediaType.get("multipart/form-data"))
.addFormDataPart("size", request.getSize())
.addFormDataPart("response_format", request.getResponseFormat())
.addFormDataPart("image", "image", imageBody);
if (request.getN() != null) {
builder.addFormDataPart("n", request.getN().toString());
}
if (request.getModel() != null) {
builder.addFormDataPart("model", request.getModel());
}
return execute(api.createImageVariation(builder.build()));
}
public TranscriptionResult createTranscription(CreateTranscriptionRequest request, String audioPath) {
java.io.File audio = new java.io.File(audioPath);
return createTranscription(request, audio);
}
public TranscriptionResult createTranscription(CreateTranscriptionRequest request, java.io.File audio) {
RequestBody audioBody = RequestBody.create(MediaType.parse("audio"), audio);
MultipartBody.Builder builder = new MultipartBody.Builder()
.setType(MediaType.get("multipart/form-data"))
.addFormDataPart("model", request.getModel())
.addFormDataPart("file", audio.getName(), audioBody);
if (request.getPrompt() != null) {
builder.addFormDataPart("prompt", request.getPrompt());
}
if (request.getResponseFormat() != null) {
builder.addFormDataPart("response_format", request.getResponseFormat());
}
if (request.getTemperature() != null) {
builder.addFormDataPart("temperature", request.getTemperature().toString());
}
if (request.getLanguage() != null) {
builder.addFormDataPart("language", request.getLanguage());
}
return execute(api.createTranscription(builder.build()));
}
public TranslationResult createTranslation(CreateTranslationRequest request, String audioPath) {
java.io.File audio = new java.io.File(audioPath);
return createTranslation(request, audio);
}
public TranslationResult createTranslation(CreateTranslationRequest request, java.io.File audio) {
RequestBody audioBody = RequestBody.create(MediaType.parse("audio"), audio);
MultipartBody.Builder builder = new MultipartBody.Builder()
.setType(MediaType.get("multipart/form-data"))
.addFormDataPart("model", request.getModel())
.addFormDataPart("file", audio.getName(), audioBody);
if (request.getPrompt() != null) {
builder.addFormDataPart("prompt", request.getPrompt());
}
if (request.getResponseFormat() != null) {
builder.addFormDataPart("response_format", request.getResponseFormat());
}
if (request.getTemperature() != null) {
builder.addFormDataPart("temperature", request.getTemperature().toString());
}
return execute(api.createTranslation(builder.build()));
}
public ModerationResult createModeration(ModerationRequest request) {
return execute(api.createModeration(request));
}
public ResponseBody createSpeech(CreateSpeechRequest request) {
return execute(api.createSpeech(request));
}
public Assistant createAssistant(AssistantRequest request) {
return execute(api.createAssistant(request));
}
public Assistant retrieveAssistant(String assistantId) {
return execute(api.retrieveAssistant(assistantId));
}
public Assistant modifyAssistant(String assistantId, ModifyAssistantRequest request) {
return execute(api.modifyAssistant(assistantId, request));
}
public DeleteResult deleteAssistant(String assistantId) {
return execute(api.deleteAssistant(assistantId));
}
public OpenAiResponse<Assistant> listAssistants(ListSearchParameters params) {
Map<String, Object> queryParameters = mapper.convertValue(params, new TypeReference<Map<String, Object>>() {
});
return execute(api.listAssistants(queryParameters));
}
public AssistantFile createAssistantFile(String assistantId, AssistantFileRequest fileRequest) {
return execute(api.createAssistantFile(assistantId, fileRequest));
}
public AssistantFile retrieveAssistantFile(String assistantId, String fileId) {
return execute(api.retrieveAssistantFile(assistantId, fileId));
}
public DeleteResult deleteAssistantFile(String assistantId, String fileId) {
return execute(api.deleteAssistantFile(assistantId, fileId));
}
public OpenAiResponse<AssistantFile> listAssistantFiles(String assistantId, ListSearchParameters params) {
Map<String, Object> queryParameters = mapper.convertValue(params, new TypeReference<Map<String, Object>>() {
});
return execute(api.listAssistantFiles(assistantId, queryParameters));
}
public Thread createThread(ThreadRequest request) {
return execute(api.createThread(request));
}
public Thread retrieveThread(String threadId) {
return execute(api.retrieveThread(threadId));
}
public Thread modifyThread(String threadId, ThreadRequest request) {
return execute(api.modifyThread(threadId, request));
}
public DeleteResult deleteThread(String threadId) {
return execute(api.deleteThread(threadId));
}
public Message createMessage(String threadId, MessageRequest request) {
return execute(api.createMessage(threadId, request));
}
public Message retrieveMessage(String threadId, String messageId) {
return execute(api.retrieveMessage(threadId, messageId));
}
public Message modifyMessage(String threadId, String messageId, ModifyMessageRequest request) {
return execute(api.modifyMessage(threadId, messageId, request));
}
public OpenAiResponse<Message> listMessages(String threadId) {
return execute(api.listMessages(threadId));
}
public OpenAiResponse<Message> listMessages(String threadId, ListSearchParameters params) {
Map<String, Object> queryParameters = mapper.convertValue(params, new TypeReference<Map<String, Object>>() {
});
return execute(api.listMessages(threadId, queryParameters));
}
public MessageFile retrieveMessageFile(String threadId, String messageId, String fileId) {
return execute(api.retrieveMessageFile(threadId, messageId, fileId));
}
public OpenAiResponse<MessageFile> listMessageFiles(String threadId, String messageId) {
return execute(api.listMessageFiles(threadId, messageId));
}
public OpenAiResponse<MessageFile> listMessageFiles(String threadId, String messageId, ListSearchParameters params) {
Map<String, Object> queryParameters = mapper.convertValue(params, new TypeReference<Map<String, Object>>() {
});
return execute(api.listMessageFiles(threadId, messageId, queryParameters));
}
public Run createRun(String threadId, RunCreateRequest runCreateRequest) {
return execute(api.createRun(threadId, runCreateRequest));
}
public Run retrieveRun(String threadId, String runId) {
return execute(api.retrieveRun(threadId, runId));
}
public Run modifyRun(String threadId, String runId, Map<String, String> metadata) {
return execute(api.modifyRun(threadId, runId, metadata));
}
public OpenAiResponse<Run> listRuns(String threadId, ListSearchParameters listSearchParameters) {
Map<String, String> search = new HashMap<>();
if (listSearchParameters != null) {
ObjectMapper mapper = defaultObjectMapper();
search = mapper.convertValue(listSearchParameters, Map.class);
}
return execute(api.listRuns(threadId, search));
}
public Run submitToolOutputs(String threadId, String runId, SubmitToolOutputsRequest submitToolOutputsRequest) {
return execute(api.submitToolOutputs(threadId, runId, submitToolOutputsRequest));
}
public Run cancelRun(String threadId, String runId) {
return execute(api.cancelRun(threadId, runId));
}
public Run createThreadAndRun(CreateThreadAndRunRequest createThreadAndRunRequest) {
return execute(api.createThreadAndRun(createThreadAndRunRequest));
}
public RunStep retrieveRunStep(String threadId, String runId, String stepId) {
return execute(api.retrieveRunStep(threadId, runId, stepId));
}
public OpenAiResponse<RunStep> listRunSteps(String threadId, String runId, ListSearchParameters listSearchParameters) {
Map<String, String> search = new HashMap<>();
if (listSearchParameters != null) {
ObjectMapper mapper = defaultObjectMapper();
search = mapper.convertValue(listSearchParameters, Map.class);
}
return execute(api.listRunSteps(threadId, runId, search));
}
/**
* Calls the Open AI api, returns the response, and parses error messages if the request fails
*/
public static <T> T execute(Single<T> apiCall) {
try {
return apiCall.blockingGet();
} catch (HttpException e) {
try {
if (e.response() == null || e.response().errorBody() == null) {
throw e;
}
String errorBody = e.response().errorBody().string();
OpenAiError error = mapper.readValue(errorBody, OpenAiError.class);
throw new OpenAiHttpException(error, e, e.code());
} catch (IOException ex) {
// couldn't parse OpenAI error
throw e;
}
}
}
/**
* Calls the Open AI api and returns a Flowable of SSE for streaming
* omitting the last message.
*
* @param apiCall The api call
*/
public static Flowable<SSE> stream(Call<ResponseBody> apiCall) {
return stream(apiCall, false);
}
/**
* Calls the Open AI api and returns a Flowable of SSE for streaming.
*
* @param apiCall The api call
* @param emitDone If true the last message ([DONE]) is emitted
*/
public static Flowable<SSE> stream(Call<ResponseBody> apiCall, boolean emitDone) {
return Flowable.create(emitter -> apiCall.enqueue(new ResponseBodyCallback(emitter, emitDone)), BackpressureStrategy.BUFFER);
}
/**
* Calls the Open AI api and returns a Flowable of type T for streaming
* omitting the last message.
*
* @param apiCall The api call
* @param cl Class of type T to return
*/
public static <T> Flowable<T> stream(Call<ResponseBody> apiCall, Class<T> cl) {
return stream(apiCall).map(sse -> mapper.readValue(sse.getData(), cl));
}
/**
* Shuts down the OkHttp ExecutorService.
* The default behaviour of OkHttp's ExecutorService (ConnectionPool)
* is to shut down after an idle timeout of 60s.
* Call this method to shut down the ExecutorService immediately.
*/
public void shutdownExecutor() {
Objects.requireNonNull(this.executorService, "executorService must be set in order to shut down");
this.executorService.shutdown();
}
public static OpenAiApi buildApi(String token, Duration timeout) {
ObjectMapper mapper = defaultObjectMapper();
OkHttpClient client = defaultClient(token, timeout);
Retrofit retrofit = defaultRetrofit(client, mapper);
return retrofit.create(OpenAiApi.class);
}
public static ObjectMapper defaultObjectMapper() {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
mapper.setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE);
mapper.addMixIn(ChatFunction.class, ChatFunctionMixIn.class);
mapper.addMixIn(ChatCompletionRequest.class, ChatCompletionRequestMixIn.class);
mapper.addMixIn(ChatFunctionCall.class, ChatFunctionCallMixIn.class);
return mapper;
}
public static OkHttpClient defaultClient(String token, Duration timeout) {
return new OkHttpClient.Builder()
.addInterceptor(new AuthenticationInterceptor(token))
.connectionPool(new ConnectionPool(5, 1, TimeUnit.SECONDS))
.readTimeout(timeout.toMillis(), TimeUnit.MILLISECONDS)
.build();
}
public static Retrofit defaultRetrofit(OkHttpClient client, ObjectMapper mapper) {
return new Retrofit.Builder()
.baseUrl(BASE_URL)
.client(client)
.addConverterFactory(JacksonConverterFactory.create(mapper))
.addCallAdapterFactory(RxJava2CallAdapterFactory.create())
.build();
}
public Flowable<ChatMessageAccumulator> mapStreamToAccumulator(Flowable<ChatCompletionChunk> flowable) {
ChatFunctionCall functionCall = new ChatFunctionCall(null, null);
ChatMessage accumulatedMessage = new ChatMessage(ChatMessageRole.ASSISTANT.value(), null);
return flowable.map(chunk -> {
ChatMessage messageChunk = chunk.getChoices().get(0).getMessage();
if (messageChunk.getFunctionCall() != null) {
if (messageChunk.getFunctionCall().getName() != null) {
String namePart = messageChunk.getFunctionCall().getName();
functionCall.setName((functionCall.getName() == null ? "" : functionCall.getName()) + namePart);
}
if (messageChunk.getFunctionCall().getArguments() != null) {
String argumentsPart = messageChunk.getFunctionCall().getArguments() == null ? "" : messageChunk.getFunctionCall().getArguments().asText();
functionCall.setArguments(new TextNode((functionCall.getArguments() == null ? "" : functionCall.getArguments().asText()) + argumentsPart));
}
accumulatedMessage.setFunctionCall(functionCall);
} else {
accumulatedMessage.setContent((accumulatedMessage.getContent() == null ? "" : accumulatedMessage.getContent()).toString() + (messageChunk.getContent() == null ? "" : messageChunk.getContent()));
}
if (chunk.getChoices().get(0).getFinishReason() != null) { // last
if (functionCall.getArguments() != null) {
functionCall.setArguments(mapper.readTree(functionCall.getArguments().asText()));
accumulatedMessage.setFunctionCall(functionCall);
}
}
return new ChatMessageAccumulator(messageChunk, accumulatedMessage);
});
}
/**
* Account information inquiry: including total amount and other information.
*
* @return Account information.
*/
public Subscription subscription() {
Single<Subscription> subscription = api.subscription();
return subscription.blockingGet();
}
/**
* Account API consumption amount information inquiry.
* Up to 100 days of inquiry.
*
* @param starDate
* @param endDate
* @return Consumption amount information.
*/
public BillingUsage billingUsage(@NotNull LocalDate starDate, @NotNull LocalDate endDate) {
Single<BillingUsage> billingUsage = api.billingUsage(starDate, endDate);
return billingUsage.blockingGet();
}
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/FunctionExecutor.java | package art.starrynift.service;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.node.ObjectNode;
import com.fasterxml.jackson.databind.node.TextNode;
import art.starrynift.completion.chat.ChatFunction;
import art.starrynift.completion.chat.ChatFunctionCall;
import art.starrynift.completion.chat.ChatMessage;
import art.starrynift.completion.chat.ChatMessageRole;
public class FunctionExecutor {
private ObjectMapper MAPPER = new ObjectMapper();
private final Map<String, ChatFunction> FUNCTIONS = new HashMap<>();
public FunctionExecutor(List<ChatFunction> functions) {
setFunctions(functions);
}
public FunctionExecutor(List<ChatFunction> functions, ObjectMapper objectMapper) {
setFunctions(functions);
setObjectMapper(objectMapper);
}
public Optional<ChatMessage> executeAndConvertToMessageSafely(ChatFunctionCall call) {
try {
return Optional.ofNullable(executeAndConvertToMessage(call));
} catch (Exception ignored) {
return Optional.empty();
}
}
public ChatMessage executeAndConvertToMessageHandlingExceptions(ChatFunctionCall call) {
try {
return executeAndConvertToMessage(call);
} catch (Exception exception) {
exception.printStackTrace();
return convertExceptionToMessage(exception);
}
}
public ChatMessage convertExceptionToMessage(Exception exception) {
String error = exception.getMessage() == null ? exception.toString() : exception.getMessage();
return new ChatMessage(ChatMessageRole.FUNCTION.value(), "{\"error\": \"" + error + "\"}", "error");
}
public ChatMessage executeAndConvertToMessage(ChatFunctionCall call) {
return new ChatMessage(ChatMessageRole.FUNCTION.value(), executeAndConvertToJson(call).toPrettyString(), call.getName());
}
public JsonNode executeAndConvertToJson(ChatFunctionCall call) {
try {
Object execution = execute(call);
if (execution instanceof TextNode) {
JsonNode objectNode = MAPPER.readTree(((TextNode) execution).asText());
if (objectNode.isMissingNode())
return (JsonNode) execution;
return objectNode;
}
if (execution instanceof ObjectNode) {
return (JsonNode) execution;
}
if (execution instanceof String) {
JsonNode objectNode = MAPPER.readTree((String) execution);
if (objectNode.isMissingNode())
throw new RuntimeException("Parsing exception");
return objectNode;
}
return MAPPER.readValue(MAPPER.writeValueAsString(execution), JsonNode.class);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
@SuppressWarnings("unchecked")
public <T> T execute(ChatFunctionCall call) {
ChatFunction function = FUNCTIONS.get(call.getName());
Object obj;
try {
JsonNode arguments = call.getArguments();
obj = MAPPER.readValue(arguments instanceof TextNode ? arguments.asText() : arguments.toPrettyString(), function.getParametersClass());
} catch (JsonProcessingException e) {
throw new RuntimeException(e);
}
return (T) function.getExecutor().apply(obj);
}
public List<ChatFunction> getFunctions() {
return new ArrayList<>(FUNCTIONS.values());
}
public void setFunctions(List<ChatFunction> functions) {
this.FUNCTIONS.clear();
functions.forEach(f -> this.FUNCTIONS.put(f.getName(), f));
}
public void setObjectMapper(ObjectMapper objectMapper) {
this.MAPPER = objectMapper;
}
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/OpenAiService.java | package art.starrynift.service;
import java.io.IOException;
import java.time.Duration;
import java.time.LocalDate;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import javax.validation.constraints.NotNull;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategy;
import com.fasterxml.jackson.databind.node.TextNode;
import art.starrynift.DeleteResult;
import art.starrynift.ListSearchParameters;
import art.starrynift.OpenAiError;
import art.starrynift.OpenAiHttpException;
import art.starrynift.OpenAiResponse;
import art.starrynift.assistants.Assistant;
import art.starrynift.assistants.AssistantFile;
import art.starrynift.assistants.AssistantFileRequest;
import art.starrynift.assistants.AssistantRequest;
import art.starrynift.assistants.ModifyAssistantRequest;
import art.starrynift.audio.CreateSpeechRequest;
import art.starrynift.audio.CreateTranscriptionRequest;
import art.starrynift.audio.CreateTranslationRequest;
import art.starrynift.audio.TranscriptionResult;
import art.starrynift.audio.TranslationResult;
import art.starrynift.billing.BillingUsage;
import art.starrynift.billing.Subscription;
import art.starrynift.client.OpenAiApi;
import art.starrynift.completion.CompletionChunk;
import art.starrynift.completion.CompletionRequest;
import art.starrynift.completion.CompletionResult;
import art.starrynift.completion.chat.ChatCompletionChunk;
import art.starrynift.completion.chat.ChatCompletionRequest;
import art.starrynift.completion.chat.ChatCompletionResult;
import art.starrynift.completion.chat.ChatFunction;
import art.starrynift.completion.chat.ChatFunctionCall;
import art.starrynift.completion.chat.ChatMessage;
import art.starrynift.completion.chat.ChatMessageRole;
import art.starrynift.edit.EditRequest;
import art.starrynift.edit.EditResult;
import art.starrynift.embedding.EmbeddingRequest;
import art.starrynift.embedding.EmbeddingResult;
import art.starrynift.file.File;
import art.starrynift.fine_tuning.FineTuningEvent;
import art.starrynift.fine_tuning.FineTuningJob;
import art.starrynift.fine_tuning.FineTuningJobRequest;
import art.starrynift.image.CreateImageEditRequest;
import art.starrynift.image.CreateImageRequest;
import art.starrynift.image.CreateImageVariationRequest;
import art.starrynift.image.ImageResult;
import art.starrynift.messages.Message;
import art.starrynift.messages.MessageFile;
import art.starrynift.messages.MessageRequest;
import art.starrynift.messages.ModifyMessageRequest;
import art.starrynift.model.Model;
import art.starrynift.moderation.ModerationRequest;
import art.starrynift.moderation.ModerationResult;
import art.starrynift.runs.CreateThreadAndRunRequest;
import art.starrynift.runs.Run;
import art.starrynift.runs.RunCreateRequest;
import art.starrynift.runs.RunStep;
import art.starrynift.runs.SubmitToolOutputsRequest;
import art.starrynift.threads.ThreadRequest;
import io.reactivex.BackpressureStrategy;
import io.reactivex.Flowable;
import io.reactivex.Single;
import okhttp3.ConnectionPool;
import okhttp3.MediaType;
import okhttp3.MultipartBody;
import okhttp3.OkHttpClient;
import okhttp3.RequestBody;
import okhttp3.ResponseBody;
import retrofit2.Call;
import retrofit2.HttpException;
import retrofit2.Retrofit;
import retrofit2.adapter.rxjava2.RxJava2CallAdapterFactory;
import retrofit2.converter.jackson.JacksonConverterFactory;
public class OpenAiService {
private static final String BASE_URL = "https://api.openai.com/";
private static final Duration DEFAULT_TIMEOUT = Duration.ofSeconds(10);
private static final ObjectMapper mapper = defaultObjectMapper();
private final OpenAiApi api;
private final ExecutorService executorService;
/**
* Creates a new OpenAiService that wraps OpenAiApi
*
* @param token OpenAi token string "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
*/
public OpenAiService(final String token) {
this(token, DEFAULT_TIMEOUT);
}
/**
* Creates a new OpenAiService that wraps OpenAiApi
*
* @param token OpenAi token string "sk-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
* @param timeout http read timeout, Duration.ZERO means no timeout
*/
public OpenAiService(final String token, final Duration timeout) {
ObjectMapper mapper = defaultObjectMapper();
OkHttpClient client = defaultClient(token, timeout);
Retrofit retrofit = defaultRetrofit(client, mapper);
this.api = retrofit.create(OpenAiApi.class);
this.executorService = client.dispatcher().executorService();
}
/**
* Creates a new OpenAiService that wraps OpenAiApi.
* Use this if you need more customization, but use OpenAiService(api, executorService) if you use streaming and
* want to shut down instantly
*
* @param api OpenAiApi instance to use for all methods
*/
public OpenAiService(final OpenAiApi api) {
this.api = api;
this.executorService = null;
}
/**
* Creates a new OpenAiService that wraps OpenAiApi.
* The ExecutorService must be the one you get from the client you created the api with
* otherwise shutdownExecutor() won't work.
* <p>
* Use this if you need more customization.
*
* @param api OpenAiApi instance to use for all methods
* @param executorService the ExecutorService from client.dispatcher().executorService()
*/
public OpenAiService(final OpenAiApi api, final ExecutorService executorService) {
this.api = api;
this.executorService = executorService;
}
public List<Model> listModels() {
return execute(api.listModels()).data;
}
public Model getModel(String modelId) {
return execute(api.getModel(modelId));
}
public CompletionResult createCompletion(CompletionRequest request) {
return execute(api.createCompletion(request));
}
public Flowable<CompletionChunk> streamCompletion(CompletionRequest request) {
request.setStream(true);
return stream(api.createCompletionStream(request), CompletionChunk.class);
}
public ChatCompletionResult createChatCompletion(ChatCompletionRequest request) {
return execute(api.createChatCompletion(request));
}
public Flowable<ChatCompletionChunk> streamChatCompletion(ChatCompletionRequest request) {
request.setStream(true);
return stream(api.createChatCompletionStream(request), ChatCompletionChunk.class);
}
public EditResult createEdit(EditRequest request) {
return execute(api.createEdit(request));
}
public EmbeddingResult createEmbeddings(EmbeddingRequest request) {
return execute(api.createEmbeddings(request));
}
public List<File> listFiles() {
return execute(api.listFiles()).data;
}
public File uploadFile(String purpose, String filepath) {
java.io.File file = new java.io.File(filepath);
RequestBody purposeBody = RequestBody.create(MultipartBody.FORM, purpose);
RequestBody fileBody = RequestBody.create(MediaType.parse("text"), file);
MultipartBody.Part body = MultipartBody.Part.createFormData("file", filepath, fileBody);
return execute(api.uploadFile(purposeBody, body));
}
public DeleteResult deleteFile(String fileId) {
return execute(api.deleteFile(fileId));
}
public File retrieveFile(String fileId) {
return execute(api.retrieveFile(fileId));
}
public ResponseBody retrieveFileContent(String fileId) {
return execute(api.retrieveFileContent(fileId));
}
public FineTuningJob createFineTuningJob(FineTuningJobRequest request) {
return execute(api.createFineTuningJob(request));
}
public List<FineTuningJob> listFineTuningJobs() {
return execute(api.listFineTuningJobs()).data;
}
public FineTuningJob retrieveFineTuningJob(String fineTuningJobId) {
return execute(api.retrieveFineTuningJob(fineTuningJobId));
}
public FineTuningJob cancelFineTuningJob(String fineTuningJobId) {
return execute(api.cancelFineTuningJob(fineTuningJobId));
}
public List<FineTuningEvent> listFineTuningJobEvents(String fineTuningJobId) {
return execute(api.listFineTuningJobEvents(fineTuningJobId)).data;
}
public CompletionResult createFineTuneCompletion(CompletionRequest request) {
return execute(api.createFineTuneCompletion(request));
}
public DeleteResult deleteFineTune(String fineTuneId) {
return execute(api.deleteFineTune(fineTuneId));
}
public ImageResult createImage(CreateImageRequest request) {
return execute(api.createImage(request));
}
public ImageResult createImageEdit(CreateImageEditRequest request, String imagePath, String maskPath) {
java.io.File image = new java.io.File(imagePath);
java.io.File mask = null;
if (maskPath != null) {
mask = new java.io.File(maskPath);
}
return createImageEdit(request, image, mask);
}
public ImageResult createImageEdit(CreateImageEditRequest request, java.io.File image, java.io.File mask) {
RequestBody imageBody = RequestBody.create(MediaType.parse("image"), image);
MultipartBody.Builder builder = new MultipartBody.Builder()
.setType(MediaType.get("multipart/form-data"))
.addFormDataPart("prompt", request.getPrompt())
.addFormDataPart("size", request.getSize())
.addFormDataPart("response_format", request.getResponseFormat())
.addFormDataPart("image", "image", imageBody);
if (request.getN() != null) {
builder.addFormDataPart("n", request.getN().toString());
}
if (mask != null) {
RequestBody maskBody = RequestBody.create(MediaType.parse("image"), mask);
builder.addFormDataPart("mask", "mask", maskBody);
}
if (request.getModel() != null) {
builder.addFormDataPart("model", request.getModel());
}
return execute(api.createImageEdit(builder.build()));
}
public ImageResult createImageVariation(CreateImageVariationRequest request, String imagePath) {
java.io.File image = new java.io.File(imagePath);
return createImageVariation(request, image);
}
public ImageResult createImageVariation(CreateImageVariationRequest request, java.io.File image) {
RequestBody imageBody = RequestBody.create(MediaType.parse("image"), image);
MultipartBody.Builder builder = new MultipartBody.Builder()
.setType(MediaType.get("multipart/form-data"))
.addFormDataPart("size", request.getSize())
.addFormDataPart("response_format", request.getResponseFormat())
.addFormDataPart("image", "image", imageBody);
if (request.getN() != null) {
builder.addFormDataPart("n", request.getN().toString());
}
if (request.getModel() != null) {
builder.addFormDataPart("model", request.getModel());
}
return execute(api.createImageVariation(builder.build()));
}
public TranscriptionResult createTranscription(CreateTranscriptionRequest request, String audioPath) {
java.io.File audio = new java.io.File(audioPath);
return createTranscription(request, audio);
}
public TranscriptionResult createTranscription(CreateTranscriptionRequest request, java.io.File audio) {
RequestBody audioBody = RequestBody.create(MediaType.parse("audio"), audio);
MultipartBody.Builder builder = new MultipartBody.Builder()
.setType(MediaType.get("multipart/form-data"))
.addFormDataPart("model", request.getModel())
.addFormDataPart("file", audio.getName(), audioBody);
if (request.getPrompt() != null) {
builder.addFormDataPart("prompt", request.getPrompt());
}
if (request.getResponseFormat() != null) {
builder.addFormDataPart("response_format", request.getResponseFormat());
}
if (request.getTemperature() != null) {
builder.addFormDataPart("temperature", request.getTemperature().toString());
}
if (request.getLanguage() != null) {
builder.addFormDataPart("language", request.getLanguage());
}
return execute(api.createTranscription(builder.build()));
}
public TranslationResult createTranslation(CreateTranslationRequest request, String audioPath) {
java.io.File audio = new java.io.File(audioPath);
return createTranslation(request, audio);
}
public TranslationResult createTranslation(CreateTranslationRequest request, java.io.File audio) {
RequestBody audioBody = RequestBody.create(MediaType.parse("audio"), audio);
MultipartBody.Builder builder = new MultipartBody.Builder()
.setType(MediaType.get("multipart/form-data"))
.addFormDataPart("model", request.getModel())
.addFormDataPart("file", audio.getName(), audioBody);
if (request.getPrompt() != null) {
builder.addFormDataPart("prompt", request.getPrompt());
}
if (request.getResponseFormat() != null) {
builder.addFormDataPart("response_format", request.getResponseFormat());
}
if (request.getTemperature() != null) {
builder.addFormDataPart("temperature", request.getTemperature().toString());
}
return execute(api.createTranslation(builder.build()));
}
public ModerationResult createModeration(ModerationRequest request) {
return execute(api.createModeration(request));
}
public ResponseBody createSpeech(CreateSpeechRequest request) {
return execute(api.createSpeech(request));
}
public Assistant createAssistant(AssistantRequest request) {
return execute(api.createAssistant(request));
}
public Assistant retrieveAssistant(String assistantId) {
return execute(api.retrieveAssistant(assistantId));
}
public Assistant modifyAssistant(String assistantId, ModifyAssistantRequest request) {
return execute(api.modifyAssistant(assistantId, request));
}
public DeleteResult deleteAssistant(String assistantId) {
return execute(api.deleteAssistant(assistantId));
}
public OpenAiResponse<Assistant> listAssistants(ListSearchParameters params) {
Map<String, Object> queryParameters = mapper.convertValue(params, new TypeReference<Map<String, Object>>() {
});
return execute(api.listAssistants(queryParameters));
}
public AssistantFile createAssistantFile(String assistantId, AssistantFileRequest fileRequest) {
return execute(api.createAssistantFile(assistantId, fileRequest));
}
public AssistantFile retrieveAssistantFile(String assistantId, String fileId) {
return execute(api.retrieveAssistantFile(assistantId, fileId));
}
public DeleteResult deleteAssistantFile(String assistantId, String fileId) {
return execute(api.deleteAssistantFile(assistantId, fileId));
}
public OpenAiResponse<AssistantFile> listAssistantFiles(String assistantId, ListSearchParameters params) {
Map<String, Object> queryParameters = mapper.convertValue(params, new TypeReference<Map<String, Object>>() {
});
return execute(api.listAssistantFiles(assistantId, queryParameters));
}
public Thread createThread(ThreadRequest request) {
return execute(api.createThread(request));
}
public Thread retrieveThread(String threadId) {
return execute(api.retrieveThread(threadId));
}
public Thread modifyThread(String threadId, ThreadRequest request) {
return execute(api.modifyThread(threadId, request));
}
public DeleteResult deleteThread(String threadId) {
return execute(api.deleteThread(threadId));
}
public Message createMessage(String threadId, MessageRequest request) {
return execute(api.createMessage(threadId, request));
}
public Message retrieveMessage(String threadId, String messageId) {
return execute(api.retrieveMessage(threadId, messageId));
}
public Message modifyMessage(String threadId, String messageId, ModifyMessageRequest request) {
return execute(api.modifyMessage(threadId, messageId, request));
}
public OpenAiResponse<Message> listMessages(String threadId) {
return execute(api.listMessages(threadId));
}
public OpenAiResponse<Message> listMessages(String threadId, ListSearchParameters params) {
Map<String, Object> queryParameters = mapper.convertValue(params, new TypeReference<Map<String, Object>>() {
});
return execute(api.listMessages(threadId, queryParameters));
}
public MessageFile retrieveMessageFile(String threadId, String messageId, String fileId) {
return execute(api.retrieveMessageFile(threadId, messageId, fileId));
}
public OpenAiResponse<MessageFile> listMessageFiles(String threadId, String messageId) {
return execute(api.listMessageFiles(threadId, messageId));
}
public OpenAiResponse<MessageFile> listMessageFiles(String threadId, String messageId, ListSearchParameters params) {
Map<String, Object> queryParameters = mapper.convertValue(params, new TypeReference<Map<String, Object>>() {
});
return execute(api.listMessageFiles(threadId, messageId, queryParameters));
}
public Run createRun(String threadId, RunCreateRequest runCreateRequest) {
return execute(api.createRun(threadId, runCreateRequest));
}
public Run retrieveRun(String threadId, String runId) {
return execute(api.retrieveRun(threadId, runId));
}
public Run modifyRun(String threadId, String runId, Map<String, String> metadata) {
return execute(api.modifyRun(threadId, runId, metadata));
}
public OpenAiResponse<Run> listRuns(String threadId, ListSearchParameters listSearchParameters) {
Map<String, String> search = new HashMap<>();
if (listSearchParameters != null) {
ObjectMapper mapper = defaultObjectMapper();
search = mapper.convertValue(listSearchParameters, Map.class);
}
return execute(api.listRuns(threadId, search));
}
public Run submitToolOutputs(String threadId, String runId, SubmitToolOutputsRequest submitToolOutputsRequest) {
return execute(api.submitToolOutputs(threadId, runId, submitToolOutputsRequest));
}
public Run cancelRun(String threadId, String runId) {
return execute(api.cancelRun(threadId, runId));
}
public Run createThreadAndRun(CreateThreadAndRunRequest createThreadAndRunRequest) {
return execute(api.createThreadAndRun(createThreadAndRunRequest));
}
public RunStep retrieveRunStep(String threadId, String runId, String stepId) {
return execute(api.retrieveRunStep(threadId, runId, stepId));
}
public OpenAiResponse<RunStep> listRunSteps(String threadId, String runId, ListSearchParameters listSearchParameters) {
Map<String, String> search = new HashMap<>();
if (listSearchParameters != null) {
ObjectMapper mapper = defaultObjectMapper();
search = mapper.convertValue(listSearchParameters, Map.class);
}
return execute(api.listRunSteps(threadId, runId, search));
}
/**
* Calls the Open AI api, returns the response, and parses error messages if the request fails
*/
public static <T> T execute(Single<T> apiCall) {
try {
return apiCall.blockingGet();
} catch (HttpException e) {
try {
if (e.response() == null || e.response().errorBody() == null) {
throw e;
}
String errorBody = e.response().errorBody().string();
OpenAiError error = mapper.readValue(errorBody, OpenAiError.class);
throw new OpenAiHttpException(error, e, e.code());
} catch (IOException ex) {
// couldn't parse OpenAI error
throw e;
}
}
}
/**
* Calls the Open AI api and returns a Flowable of SSE for streaming
* omitting the last message.
*
* @param apiCall The api call
*/
public static Flowable<SSE> stream(Call<ResponseBody> apiCall) {
return stream(apiCall, false);
}
/**
* Calls the Open AI api and returns a Flowable of SSE for streaming.
*
* @param apiCall The api call
* @param emitDone If true the last message ([DONE]) is emitted
*/
public static Flowable<SSE> stream(Call<ResponseBody> apiCall, boolean emitDone) {
return Flowable.create(emitter -> apiCall.enqueue(new ResponseBodyCallback(emitter, emitDone)), BackpressureStrategy.BUFFER);
}
/**
* Calls the Open AI api and returns a Flowable of type T for streaming
* omitting the last message.
*
* @param apiCall The api call
* @param cl Class of type T to return
*/
public static <T> Flowable<T> stream(Call<ResponseBody> apiCall, Class<T> cl) {
return stream(apiCall).map(sse -> mapper.readValue(sse.getData(), cl));
}
/**
* Shuts down the OkHttp ExecutorService.
* The default behaviour of OkHttp's ExecutorService (ConnectionPool)
* is to shut down after an idle timeout of 60s.
* Call this method to shut down the ExecutorService immediately.
*/
public void shutdownExecutor() {
Objects.requireNonNull(this.executorService, "executorService must be set in order to shut down");
this.executorService.shutdown();
}
public static OpenAiApi buildApi(String token, Duration timeout) {
ObjectMapper mapper = defaultObjectMapper();
OkHttpClient client = defaultClient(token, timeout);
Retrofit retrofit = defaultRetrofit(client, mapper);
return retrofit.create(OpenAiApi.class);
}
public static ObjectMapper defaultObjectMapper() {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false);
mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
mapper.setPropertyNamingStrategy(PropertyNamingStrategy.SNAKE_CASE);
mapper.addMixIn(ChatFunction.class, ChatFunctionMixIn.class);
mapper.addMixIn(ChatCompletionRequest.class, ChatCompletionRequestMixIn.class);
mapper.addMixIn(ChatFunctionCall.class, ChatFunctionCallMixIn.class);
return mapper;
}
public static OkHttpClient defaultClient(String token, Duration timeout) {
return new OkHttpClient.Builder()
.addInterceptor(new AuthenticationInterceptor(token))
.connectionPool(new ConnectionPool(5, 1, TimeUnit.SECONDS))
.readTimeout(timeout.toMillis(), TimeUnit.MILLISECONDS)
.build();
}
public static Retrofit defaultRetrofit(OkHttpClient client, ObjectMapper mapper) {
return new Retrofit.Builder()
.baseUrl(BASE_URL)
.client(client)
.addConverterFactory(JacksonConverterFactory.create(mapper))
.addCallAdapterFactory(RxJava2CallAdapterFactory.create())
.build();
}
public Flowable<ChatMessageAccumulator> mapStreamToAccumulator(Flowable<ChatCompletionChunk> flowable) {
ChatFunctionCall functionCall = new ChatFunctionCall(null, null);
ChatMessage accumulatedMessage = new ChatMessage(ChatMessageRole.ASSISTANT.value(), null);
return flowable.map(chunk -> {
ChatMessage messageChunk = chunk.getChoices().get(0).getMessage();
if (messageChunk.getFunctionCall() != null) {
if (messageChunk.getFunctionCall().getName() != null) {
String namePart = messageChunk.getFunctionCall().getName();
functionCall.setName((functionCall.getName() == null ? "" : functionCall.getName()) + namePart);
}
if (messageChunk.getFunctionCall().getArguments() != null) {
String argumentsPart = messageChunk.getFunctionCall().getArguments() == null ? "" : messageChunk.getFunctionCall().getArguments().asText();
functionCall.setArguments(new TextNode((functionCall.getArguments() == null ? "" : functionCall.getArguments().asText()) + argumentsPart));
}
accumulatedMessage.setFunctionCall(functionCall);
} else {
accumulatedMessage.setContent((accumulatedMessage.getContent() == null ? "" : accumulatedMessage.getContent()).toString() + (messageChunk.getContent() == null ? "" : messageChunk.getContent()));
}
if (chunk.getChoices().get(0).getFinishReason() != null) { // last
if (functionCall.getArguments() != null) {
functionCall.setArguments(mapper.readTree(functionCall.getArguments().asText()));
accumulatedMessage.setFunctionCall(functionCall);
}
}
return new ChatMessageAccumulator(messageChunk, accumulatedMessage);
});
}
/**
* Account information inquiry: including total amount and other information.
*
* @return Account information.
*/
public Subscription subscription() {
Single<Subscription> subscription = api.subscription();
return subscription.blockingGet();
}
/**
* Account API consumption amount information inquiry.
* Up to 100 days of inquiry.
*
* @param starDate
* @param endDate
* @return Consumption amount information.
*/
public BillingUsage billingUsage(@NotNull LocalDate starDate, @NotNull LocalDate endDate) {
Single<BillingUsage> billingUsage = api.billingUsage(starDate, endDate);
return billingUsage.blockingGet();
}
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/ResponseBodyCallback.java | package art.starrynift.service;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.nio.charset.StandardCharsets;
import com.fasterxml.jackson.databind.ObjectMapper;
import art.starrynift.OpenAiError;
import art.starrynift.OpenAiHttpException;
import io.reactivex.FlowableEmitter;
import okhttp3.ResponseBody;
import retrofit2.Call;
import retrofit2.Callback;
import retrofit2.HttpException;
import retrofit2.Response;
/**
* Callback to parse Server Sent Events (SSE) from raw InputStream and
* emit the events with io.reactivex.FlowableEmitter to allow streaming of
* SSE.
*/
public class ResponseBodyCallback implements Callback<ResponseBody> {
private static final ObjectMapper mapper = OpenAiService.defaultObjectMapper();
private FlowableEmitter<SSE> emitter;
private boolean emitDone;
public ResponseBodyCallback(FlowableEmitter<SSE> emitter, boolean emitDone) {
this.emitter = emitter;
this.emitDone = emitDone;
}
@Override
public void onResponse(Call<ResponseBody> call, Response<ResponseBody> response) {
BufferedReader reader = null;
try {
if (!response.isSuccessful()) {
HttpException e = new HttpException(response);
ResponseBody errorBody = response.errorBody();
if (errorBody == null) {
throw e;
} else {
OpenAiError error = mapper.readValue(
errorBody.string(),
OpenAiError.class
);
throw new OpenAiHttpException(error, e, e.code());
}
}
InputStream in = response.body().byteStream();
reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8));
String line;
SSE sse = null;
while (!emitter.isCancelled() && (line = reader.readLine()) != null) {
if (line.startsWith("data:")) {
String data = line.substring(5).trim();
sse = new SSE(data);
} else if (line.equals("") && sse != null) {
if (sse.isDone()) {
if (emitDone) {
emitter.onNext(sse);
}
break;
}
emitter.onNext(sse);
sse = null;
} else {
throw new SSEFormatException("Invalid sse format! " + line);
}
}
emitter.onComplete();
} catch (Throwable t) {
onFailure(call, t);
} finally {
if (reader != null) {
try {
reader.close();
} catch (IOException e) {
// do nothing
}
}
}
}
@Override
public void onFailure(Call<ResponseBody> call, Throwable t) {
emitter.onError(t);
}
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/SSE.java | package art.starrynift.service;
/**
* Simple Server Sent Event representation
*/
public class SSE {
private static final String DONE_DATA = "[DONE]";
private final String data;
public SSE(String data){
this.data = data;
}
public String getData(){
return this.data;
}
public byte[] toBytes(){
return String.format("data: %s\n\n", this.data).getBytes();
}
public boolean isDone(){
return DONE_DATA.equalsIgnoreCase(this.data);
}
} |
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/service/SSEFormatException.java | package art.starrynift.service;
/**
* Exception indicating a SSE format error
*/
public class SSEFormatException extends Throwable{
public SSEFormatException(String msg){
super(msg);
}
} |
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/threads/Thread.java | package art.starrynift.threads;
import com.fasterxml.jackson.annotation.JsonProperty;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.NoArgsConstructor;
import java.util.Map;
/**
* Represents a Thread with an assistant
* <p>
* https://platform.openai.com/docs/api-reference/threads/object
*/
@NoArgsConstructor
@AllArgsConstructor
@Data
public class Thread {
/**
* The identifier, which can be referenced in API endpoints.
*/
String id;
/**
* The object type, which is always thread.
*/
String object;
/**
* The Unix timestamp (in seconds) for when the thread was created.
*/
@JsonProperty("created_at")
int createdAt;
/**
* Set of 16 key-value pairs that can be attached to an object.
* This can be useful for storing additional information about the object in a structured format.
* Keys can be a maximum of 64 characters long, and values can be a maximum of 512 characters long.
*/
Map<String, String> metadata;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/threads/ThreadRequest.java | package art.starrynift.threads;
import java.util.List;
import java.util.Map;
import art.starrynift.messages.MessageRequest;
import lombok.AllArgsConstructor;
import lombok.Builder;
import lombok.Data;
import lombok.NoArgsConstructor;
/**
* Creates a thread
* <p>
* https://platform.openai.com/docs/api-reference/threads/createThread
*/
@Builder
@NoArgsConstructor
@AllArgsConstructor
@Data
public class ThreadRequest {
/**
* A list of messages to start the thread with. Optional.
*/
List<MessageRequest> messages;
/**
* Set of 16 key-value pairs that can be attached to an object.
* This can be useful for storing additional information about the object in a structured format.
* Keys can be a maximum of 64 characters long, and values can be a maximum of 512 characters long.
*/
Map<String, String> metadata;
}
|
0 | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift | java-sources/ai/starrynift/aisdk/starry-ai-sdk/1.0.3/art/starrynift/utils/TikTokensUtil.java | package art.starrynift.utils;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import com.knuddels.jtokkit.Encodings;
import com.knuddels.jtokkit.api.Encoding;
import com.knuddels.jtokkit.api.EncodingRegistry;
import com.knuddels.jtokkit.api.EncodingType;
import com.knuddels.jtokkit.api.ModelType;
import art.starrynift.completion.chat.ChatMessage;
import lombok.AllArgsConstructor;
import lombok.Getter;
/**
* Token calculation tool class
*/
public class TikTokensUtil {
/**
* Model name corresponds to Encoding
*/
private static final Map<String, Encoding> modelMap = new HashMap<>();
/**
* Registry instance
*/
private static final EncodingRegistry registry = Encodings.newDefaultEncodingRegistry();
static {
for (ModelType modelType : ModelType.values()) {
modelMap.put(modelType.getName(), registry.getEncodingForModel(modelType));
}
modelMap.put(ModelEnum.GPT_3_5_TURBO_0301.getName(), registry.getEncodingForModel(ModelType.GPT_3_5_TURBO));
modelMap.put(ModelEnum.GPT_4_32K.getName(), registry.getEncodingForModel(ModelType.GPT_4));
modelMap.put(ModelEnum.GPT_4_32K_0314.getName(), registry.getEncodingForModel(ModelType.GPT_4));
modelMap.put(ModelEnum.GPT_4_0314.getName(), registry.getEncodingForModel(ModelType.GPT_4));
modelMap.put(ModelEnum.GPT_4_1106_preview.getName(), registry.getEncodingForModel(ModelType.GPT_4));
}
/**
* Get encoding array through Encoding and text.
*
* @param enc Encoding type
* @param text Text information
* @return Encoding array
*/
public static List<Integer> encode(Encoding enc, String text) {
return isBlank(text) ? new ArrayList<>() : enc.encode(text);
}
/**
* Calculate tokens of text information through Encoding.
*
* @param enc Encoding type
* @param text Text information
* @return Number of tokens
*/
public static int tokens(Encoding enc, String text) {
return encode(enc, text).size();
}
/**
* Reverse calculate text information through Encoding and encoded array
*
* @param enc Encoding
* @param encoded Encoding array
* @return Text information corresponding to the encoding array.
*/
public static String decode(Encoding enc, List<Integer> encoded) {
return enc.decode(encoded);
}
/**
* Get an Encoding object by Encoding type
*
* @param encodingType
* @return Encoding
*/
public static Encoding getEncoding(EncodingType encodingType) {
Encoding enc = registry.getEncoding(encodingType);
return enc;
}
/**
* Obtain the encoding array by encoding;
*
* @param text
* @return Encoding array
*/
public static List<Integer> encode(EncodingType encodingType, String text) {
if (isBlank(text)) {
return new ArrayList<>();
}
Encoding enc = getEncoding(encodingType);
List<Integer> encoded = enc.encode(text);
return encoded;
}
/**
* Compute the tokens of the specified string through EncodingType.
*
* @param encodingType
* @param text
* @return Number of tokens
*/
public static int tokens(EncodingType encodingType, String text) {
return encode(encodingType, text).size();
}
/**
* Reverse the encoded array to get the string text using EncodingType and the encoded array.
*
* @param encodingType
* @param encoded
* @return The string corresponding to the encoding array.
*/
public static String decode(EncodingType encodingType, List<Integer> encoded) {
Encoding enc = getEncoding(encodingType);
return enc.decode(encoded);
}
/**
* Get an Encoding object by model name.
*
* @param modelName
* @return Encoding
*/
public static Encoding getEncoding(String modelName) {
return modelMap.get(modelName);
}
/**
* Get the encoded array by model name using encode.
*
* @param text Text information
* @return Encoding array
*/
public static List<Integer> encode(String modelName, String text) {
if (isBlank(text)) {
return new ArrayList<>();
}
Encoding enc = getEncoding(modelName);
if (Objects.isNull(enc)) {
return new ArrayList<>();
}
List<Integer> encoded = enc.encode(text);
return encoded;
}
/**
* Calculate the tokens of a specified string by model name.
*
* @param modelName
* @param text
* @return Number of tokens
*/
public static int tokens(String modelName, String text) {
return encode(modelName, text).size();
}
/**
* Calculate the encoded array for messages by model name.
* Refer to the official processing logic:
* <a href=https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb>https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb</a>
*
* @param modelName
* @param messages
* @return Number of tokens
*/
public static int tokens(String modelName, List<ChatMessage> messages) {
Encoding encoding = getEncoding(modelName);
int tokensPerMessage = 0;
int tokensPerName = 0;
//3.5统一处理
if (modelName.equals("gpt-3.5-turbo-0301") || modelName.equals("gpt-3.5-turbo")) {
tokensPerMessage = 4;
tokensPerName = -1;
}
//4.0统一处理
if (modelName.equals("gpt-4") || modelName.equals("gpt-4-0314")) {
tokensPerMessage = 3;
tokensPerName = 1;
}
int sum = 0;
for (ChatMessage msg : messages) {
sum += tokensPerMessage;
sum += tokens(encoding, msg.getContent().toString());
sum += tokens(encoding, msg.getRole());
sum += tokens(encoding, msg.getName());
if (isNotBlank(msg.getName())) {
sum += tokensPerName;
}
}
sum += 3;
return sum;
}
/**
* Reverse the string text through the model name and the encoded array.
*
* @param modelName
* @param encoded
* @return
*/
public static String decode(String modelName, List<Integer> encoded) {
Encoding enc = getEncoding(modelName);
return enc.decode(encoded);
}
/**
* Obtain the modelType.
*
* @param name
* @return
*/
public static ModelType getModelTypeByName(String name) {
if (ModelEnum.GPT_3_5_TURBO_0301.getName().equals(name)) {
return ModelType.GPT_3_5_TURBO;
}
if (ModelEnum.GPT_4.getName().equals(name)
|| ModelEnum.GPT_4_32K.getName().equals(name)
|| ModelEnum.GPT_4_32K_0314.getName().equals(name)
|| ModelEnum.GPT_4_0314.getName().equals(name)) {
return ModelType.GPT_4;
}
for (ModelType modelType : ModelType.values()) {
if (modelType.getName().equals(name)) {
return modelType;
}
}
return null;
}
@Getter
@AllArgsConstructor
public enum ModelEnum {
/**
* gpt-3.5-turbo
*/
GPT_3_5_TURBO("gpt-3.5-turbo"),
/**
* Temporary model, not recommended for use.
*/
GPT_3_5_TURBO_0301("gpt-3.5-turbo-0301"),
/**
* GPT4.0
*/
GPT_4("gpt-4"),
/**
* Temporary model, not recommended for use.
*/
GPT_4_0314("gpt-4-0314"),
/**
* GPT4.0 超长上下文
*/
GPT_4_32K("gpt-4-32k"),
/**
* Temporary model, not recommended for use.
*/
GPT_4_32K_0314("gpt-4-32k-0314"),
/**
* Temporary model, not recommended for use.
*/
GPT_4_1106_preview("gpt-4-1106-preview");
private String name;
}
public static boolean isBlankChar(int c) {
return Character.isWhitespace(c) || Character.isSpaceChar(c) || c == 65279 || c == 8234 || c == 0 || c == 12644 || c == 10240 || c == 6158;
}
public static boolean isBlankChar(char c) {
return isBlankChar((int) c);
}
public static boolean isNotBlank(CharSequence str) {
return !isBlank(str);
}
public static boolean isBlank(CharSequence str) {
int length;
if (str != null && (length = str.length()) != 0) {
for (int i = 0; i < length; ++i) {
if (!isBlankChar(str.charAt(i))) {
return false;
}
}
return true;
} else {
return true;
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/ApiVersions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.requests.ProduceRequest;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
/**
* Maintains node api versions for access outside of NetworkClient (which is where the information is derived).
* The pattern is akin to the use of {@link Metadata} for topic metadata.
*
* NOTE: This class is intended for INTERNAL usage only within Kafka.
*/
public class ApiVersions {
private final Map<String, NodeApiVersions> nodeApiVersions = new HashMap<>();
private byte maxUsableProduceMagic = RecordBatch.CURRENT_MAGIC_VALUE;
public synchronized void update(String nodeId, NodeApiVersions nodeApiVersions) {
this.nodeApiVersions.put(nodeId, nodeApiVersions);
this.maxUsableProduceMagic = computeMaxUsableProduceMagic();
}
public synchronized void remove(String nodeId) {
this.nodeApiVersions.remove(nodeId);
this.maxUsableProduceMagic = computeMaxUsableProduceMagic();
}
public synchronized NodeApiVersions get(String nodeId) {
return this.nodeApiVersions.get(nodeId);
}
private byte computeMaxUsableProduceMagic() {
// use a magic version which is supported by all brokers to reduce the chance that
// we will need to convert the messages when they are ready to be sent.
Optional<Byte> knownBrokerNodesMinRequiredMagicForProduce = this.nodeApiVersions.values().stream()
.filter(versions -> versions.apiVersion(ApiKeys.PRODUCE) != null) // filter out Raft controller nodes
.map(versions -> ProduceRequest.requiredMagicForVersion(versions.latestUsableVersion(ApiKeys.PRODUCE)))
.min(Byte::compare);
return (byte) Math.min(RecordBatch.CURRENT_MAGIC_VALUE,
knownBrokerNodesMinRequiredMagicForProduce.orElse(RecordBatch.CURRENT_MAGIC_VALUE));
}
public synchronized byte maxUsableProduceMagic() {
return maxUsableProduceMagic;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/ClientDnsLookup.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import java.util.Locale;
public enum ClientDnsLookup {
USE_ALL_DNS_IPS("use_all_dns_ips"),
RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY("resolve_canonical_bootstrap_servers_only");
private final String clientDnsLookup;
ClientDnsLookup(String clientDnsLookup) {
this.clientDnsLookup = clientDnsLookup;
}
@Override
public String toString() {
return clientDnsLookup;
}
public static ClientDnsLookup forConfig(String config) {
return ClientDnsLookup.valueOf(config.toUpperCase(Locale.ROOT));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/ClientRequest.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.message.RequestHeaderData;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.requests.AbstractRequest;
import org.apache.kafka.common.requests.RequestHeader;
/**
* A request being sent to the server. This holds both the network send as well as the client-level metadata.
*/
public final class ClientRequest {
private final String destination;
private final AbstractRequest.Builder<?> requestBuilder;
private final int correlationId;
private final String clientId;
private final long createdTimeMs;
private final boolean expectResponse;
private final int requestTimeoutMs;
private final RequestCompletionHandler callback;
/**
* @param destination The brokerId to send the request to
* @param requestBuilder The builder for the request to make
* @param correlationId The correlation id for this client request
* @param clientId The client ID to use for the header
* @param createdTimeMs The unix timestamp in milliseconds for the time at which this request was created.
* @param expectResponse Should we expect a response message or is this request complete once it is sent?
* @param callback A callback to execute when the response has been received (or null if no callback is necessary)
*/
public ClientRequest(String destination,
AbstractRequest.Builder<?> requestBuilder,
int correlationId,
String clientId,
long createdTimeMs,
boolean expectResponse,
int requestTimeoutMs,
RequestCompletionHandler callback) {
this.destination = destination;
this.requestBuilder = requestBuilder;
this.correlationId = correlationId;
this.clientId = clientId;
this.createdTimeMs = createdTimeMs;
this.expectResponse = expectResponse;
this.requestTimeoutMs = requestTimeoutMs;
this.callback = callback;
}
@Override
public String toString() {
return "ClientRequest(expectResponse=" + expectResponse +
", callback=" + callback +
", destination=" + destination +
", correlationId=" + correlationId +
", clientId=" + clientId +
", createdTimeMs=" + createdTimeMs +
", requestBuilder=" + requestBuilder +
")";
}
public boolean expectResponse() {
return expectResponse;
}
public ApiKeys apiKey() {
return requestBuilder.apiKey();
}
public RequestHeader makeHeader(short version) {
ApiKeys requestApiKey = apiKey();
return new RequestHeader(
new RequestHeaderData()
.setRequestApiKey(requestApiKey.id)
.setRequestApiVersion(version)
.setClientId(clientId)
.setCorrelationId(correlationId),
requestApiKey.requestHeaderVersion(version));
}
public AbstractRequest.Builder<?> requestBuilder() {
return requestBuilder;
}
public String destination() {
return destination;
}
public RequestCompletionHandler callback() {
return callback;
}
public long createdTimeMs() {
return createdTimeMs;
}
public int correlationId() {
return correlationId;
}
public int requestTimeoutMs() {
return requestTimeoutMs;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/ClientResponse.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.requests.AbstractResponse;
import org.apache.kafka.common.requests.RequestHeader;
/**
* A response from the server. Contains both the body of the response as well as the correlated request
* metadata that was originally sent.
*/
public class ClientResponse {
private final RequestHeader requestHeader;
private final RequestCompletionHandler callback;
private final String destination;
private final long receivedTimeMs;
private final long latencyMs;
private final boolean disconnected;
private final boolean timedOut;
private final UnsupportedVersionException versionMismatch;
private final AuthenticationException authenticationException;
private final AbstractResponse responseBody;
/**
* @param requestHeader The header of the corresponding request
* @param callback The callback to be invoked
* @param destination The node the corresponding request was sent to
* @param createdTimeMs The unix timestamp when the corresponding request was created
* @param receivedTimeMs The unix timestamp when this response was received
* @param disconnected Whether the client disconnected before fully reading a response
* @param versionMismatch Whether there was a version mismatch that prevented sending the request.
* @param responseBody The response contents (or null) if we disconnected, no response was expected,
* or if there was a version mismatch.
*/
public ClientResponse(RequestHeader requestHeader,
RequestCompletionHandler callback,
String destination,
long createdTimeMs,
long receivedTimeMs,
boolean disconnected,
UnsupportedVersionException versionMismatch,
AuthenticationException authenticationException,
AbstractResponse responseBody) {
this(requestHeader,
callback,
destination,
createdTimeMs,
receivedTimeMs,
disconnected,
false,
versionMismatch,
authenticationException,
responseBody);
}
/**
* @param requestHeader The header of the corresponding request
* @param callback The callback to be invoked
* @param destination The node the corresponding request was sent to
* @param createdTimeMs The unix timestamp when the corresponding request was created
* @param receivedTimeMs The unix timestamp when this response was received
* @param disconnected Whether the client disconnected before fully reading a response
* @param timedOut Whether the client was disconnected because of a timeout; when setting this
* to <code>true</code>, <code>disconnected</code> must be <code>true</code>
* or an {@link IllegalStateException} will be thrown
* @param versionMismatch Whether there was a version mismatch that prevented sending the request.
* @param responseBody The response contents (or null) if we disconnected, no response was expected,
* or if there was a version mismatch.
*/
public ClientResponse(RequestHeader requestHeader,
RequestCompletionHandler callback,
String destination,
long createdTimeMs,
long receivedTimeMs,
boolean disconnected,
boolean timedOut,
UnsupportedVersionException versionMismatch,
AuthenticationException authenticationException,
AbstractResponse responseBody) {
if (!disconnected && timedOut)
throw new IllegalStateException("The client response can't be in the state of connected, yet timed out");
this.requestHeader = requestHeader;
this.callback = callback;
this.destination = destination;
this.receivedTimeMs = receivedTimeMs;
this.latencyMs = receivedTimeMs - createdTimeMs;
this.disconnected = disconnected;
this.timedOut = timedOut;
this.versionMismatch = versionMismatch;
this.authenticationException = authenticationException;
this.responseBody = responseBody;
}
public long receivedTimeMs() {
return receivedTimeMs;
}
public boolean wasDisconnected() {
return disconnected;
}
public boolean wasTimedOut() {
return timedOut;
}
public UnsupportedVersionException versionMismatch() {
return versionMismatch;
}
public AuthenticationException authenticationException() {
return authenticationException;
}
public RequestHeader requestHeader() {
return requestHeader;
}
public String destination() {
return destination;
}
public AbstractResponse responseBody() {
return responseBody;
}
public boolean hasResponse() {
return responseBody != null;
}
public long requestLatencyMs() {
return latencyMs;
}
public void onComplete() {
if (callback != null)
callback.onComplete(this);
}
@Override
public String toString() {
return "ClientResponse(receivedTimeMs=" + receivedTimeMs +
", latencyMs=" +
latencyMs +
", disconnected=" +
disconnected +
", timedOut=" +
timedOut +
", requestHeader=" +
requestHeader +
", responseBody=" +
responseBody +
")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/ClientUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.network.ChannelBuilder;
import org.apache.kafka.common.network.ChannelBuilders;
import org.apache.kafka.common.security.JaasContext;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;
import static org.apache.kafka.common.utils.Utils.getHost;
import static org.apache.kafka.common.utils.Utils.getPort;
public final class ClientUtils {
private static final Logger log = LoggerFactory.getLogger(ClientUtils.class);
private ClientUtils() {
}
public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls, String clientDnsLookupConfig) {
return parseAndValidateAddresses(urls, ClientDnsLookup.forConfig(clientDnsLookupConfig));
}
public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls, ClientDnsLookup clientDnsLookup) {
List<InetSocketAddress> addresses = new ArrayList<>();
for (String url : urls) {
if (url != null && !url.isEmpty()) {
try {
String host = getHost(url);
Integer port = getPort(url);
if (host == null || port == null)
throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url);
if (clientDnsLookup == ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY) {
InetAddress[] inetAddresses = InetAddress.getAllByName(host);
for (InetAddress inetAddress : inetAddresses) {
String resolvedCanonicalName = inetAddress.getCanonicalHostName();
InetSocketAddress address = new InetSocketAddress(resolvedCanonicalName, port);
if (address.isUnresolved()) {
log.warn("Couldn't resolve server {} from {} as DNS resolution of the canonical hostname {} failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, resolvedCanonicalName, host);
} else {
addresses.add(address);
}
}
} else {
InetSocketAddress address = new InetSocketAddress(host, port);
if (address.isUnresolved()) {
log.warn("Couldn't resolve server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host);
} else {
addresses.add(address);
}
}
} catch (IllegalArgumentException e) {
throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url);
} catch (UnknownHostException e) {
throw new ConfigException("Unknown host in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url);
}
}
}
if (addresses.isEmpty())
throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG);
return addresses;
}
/**
* Create a new channel builder from the provided configuration.
*
* @param config client configs
* @param time the time implementation
* @param logContext the logging context
*
* @return configured ChannelBuilder based on the configs.
*/
public static ChannelBuilder createChannelBuilder(AbstractConfig config, Time time, LogContext logContext) {
SecurityProtocol securityProtocol = SecurityProtocol.forName(config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG));
String clientSaslMechanism = config.getString(SaslConfigs.SASL_MECHANISM);
return ChannelBuilders.clientChannelBuilder(securityProtocol, JaasContext.Type.CLIENT, config, null,
clientSaslMechanism, time, true, logContext);
}
static List<InetAddress> resolve(String host, HostResolver hostResolver) throws UnknownHostException {
InetAddress[] addresses = hostResolver.resolve(host);
List<InetAddress> result = filterPreferredAddresses(addresses);
if (log.isDebugEnabled())
log.debug("Resolved host {} as {}", host, result.stream().map(i -> i.getHostAddress()).collect(Collectors.joining(",")));
return result;
}
/**
* Return a list containing the first address in `allAddresses` and subsequent addresses
* that are a subtype of the first address.
*
* The outcome is that all returned addresses are either IPv4 or IPv6 (InetAddress has two
* subclasses: Inet4Address and Inet6Address).
*/
static List<InetAddress> filterPreferredAddresses(InetAddress[] allAddresses) {
List<InetAddress> preferredAddresses = new ArrayList<>();
Class<? extends InetAddress> clazz = null;
for (InetAddress address : allAddresses) {
if (clazz == null) {
clazz = address.getClass();
}
if (clazz.isInstance(address)) {
preferredAddresses.add(address);
}
}
return preferredAddresses;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/ClusterConnectionStates.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import java.util.HashSet;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.utils.ExponentialBackoff;
import org.apache.kafka.common.utils.LogContext;
import org.slf4j.Logger;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* The state of our connection to each node in the cluster.
*
*/
final class ClusterConnectionStates {
final static int RECONNECT_BACKOFF_EXP_BASE = 2;
final static double RECONNECT_BACKOFF_JITTER = 0.2;
final static int CONNECTION_SETUP_TIMEOUT_EXP_BASE = 2;
final static double CONNECTION_SETUP_TIMEOUT_JITTER = 0.2;
private final Map<String, NodeConnectionState> nodeState;
private final Logger log;
private final HostResolver hostResolver;
private Set<String> connectingNodes;
private ExponentialBackoff reconnectBackoff;
private ExponentialBackoff connectionSetupTimeout;
public ClusterConnectionStates(long reconnectBackoffMs, long reconnectBackoffMaxMs,
long connectionSetupTimeoutMs, long connectionSetupTimeoutMaxMs,
LogContext logContext, HostResolver hostResolver) {
this.log = logContext.logger(ClusterConnectionStates.class);
this.reconnectBackoff = new ExponentialBackoff(
reconnectBackoffMs,
RECONNECT_BACKOFF_EXP_BASE,
reconnectBackoffMaxMs,
RECONNECT_BACKOFF_JITTER);
this.connectionSetupTimeout = new ExponentialBackoff(
connectionSetupTimeoutMs,
CONNECTION_SETUP_TIMEOUT_EXP_BASE,
connectionSetupTimeoutMaxMs,
CONNECTION_SETUP_TIMEOUT_JITTER);
this.nodeState = new HashMap<>();
this.connectingNodes = new HashSet<>();
this.hostResolver = hostResolver;
}
/**
* Return true iff we can currently initiate a new connection. This will be the case if we are not
* connected and haven't been connected for at least the minimum reconnection backoff period.
* @param id the connection id to check
* @param now the current time in ms
* @return true if we can initiate a new connection
*/
public boolean canConnect(String id, long now) {
NodeConnectionState state = nodeState.get(id);
if (state == null)
return true;
else
return state.state.isDisconnected() &&
now - state.lastConnectAttemptMs >= state.reconnectBackoffMs;
}
/**
* Return true if we are disconnected from the given node and can't re-establish a connection yet.
* @param id the connection to check
* @param now the current time in ms
*/
public boolean isBlackedOut(String id, long now) {
NodeConnectionState state = nodeState.get(id);
return state != null
&& state.state.isDisconnected()
&& now - state.lastConnectAttemptMs < state.reconnectBackoffMs;
}
/**
* Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When
* disconnected, this respects the reconnect backoff time. When connecting, return a delay based on the connection timeout.
* When connected, wait indefinitely (i.e. until a wakeup).
* @param id the connection to check
* @param now the current time in ms
*/
public long connectionDelay(String id, long now) {
NodeConnectionState state = nodeState.get(id);
if (state == null) return 0;
if (state.state == ConnectionState.CONNECTING) {
return connectionSetupTimeoutMs(id);
} else if (state.state.isDisconnected()) {
long timeWaited = now - state.lastConnectAttemptMs;
return Math.max(state.reconnectBackoffMs - timeWaited, 0);
} else {
// When connected, we should be able to delay indefinitely since other events (connection or
// data acked) will cause a wakeup once data can be sent.
return Long.MAX_VALUE;
}
}
/**
* Return true if a specific connection establishment is currently underway
* @param id The id of the node to check
*/
public boolean isConnecting(String id) {
NodeConnectionState state = nodeState.get(id);
return state != null && state.state == ConnectionState.CONNECTING;
}
/**
* Check whether a connection is either being established or awaiting API version information.
* @param id The id of the node to check
* @return true if the node is either connecting or has connected and is awaiting API versions, false otherwise
*/
public boolean isPreparingConnection(String id) {
NodeConnectionState state = nodeState.get(id);
return state != null &&
(state.state == ConnectionState.CONNECTING || state.state == ConnectionState.CHECKING_API_VERSIONS);
}
/**
* Enter the connecting state for the given connection, moving to a new resolved address if necessary.
* @param id the id of the connection
* @param now the current time in ms
* @param host the host of the connection, to be resolved internally if needed
*/
public void connecting(String id, long now, String host) {
NodeConnectionState connectionState = nodeState.get(id);
if (connectionState != null && connectionState.host().equals(host)) {
connectionState.lastConnectAttemptMs = now;
connectionState.state = ConnectionState.CONNECTING;
// Move to next resolved address, or if addresses are exhausted, mark node to be re-resolved
connectionState.moveToNextAddress();
connectingNodes.add(id);
return;
} else if (connectionState != null) {
log.info("Hostname for node {} changed from {} to {}.", id, connectionState.host(), host);
}
// Create a new NodeConnectionState if nodeState does not already contain one
// for the specified id or if the hostname associated with the node id changed.
nodeState.put(id, new NodeConnectionState(ConnectionState.CONNECTING, now,
reconnectBackoff.backoff(0), connectionSetupTimeout.backoff(0), host, hostResolver));
connectingNodes.add(id);
}
/**
* Returns a resolved address for the given connection, resolving it if necessary.
* @param id the id of the connection
* @throws UnknownHostException if the address was not resolvable
*/
public InetAddress currentAddress(String id) throws UnknownHostException {
return nodeState(id).currentAddress();
}
/**
* Enter the disconnected state for the given node.
* @param id the connection we have disconnected
* @param now the current time in ms
*/
public void disconnected(String id, long now) {
NodeConnectionState nodeState = nodeState(id);
nodeState.lastConnectAttemptMs = now;
updateReconnectBackoff(nodeState);
if (nodeState.state == ConnectionState.CONNECTING) {
updateConnectionSetupTimeout(nodeState);
connectingNodes.remove(id);
} else {
resetConnectionSetupTimeout(nodeState);
if (nodeState.state.isConnected()) {
// If a connection had previously been established, clear the addresses to trigger a new DNS resolution
// because the node IPs may have changed
nodeState.clearAddresses();
}
}
nodeState.state = ConnectionState.DISCONNECTED;
}
/**
* Indicate that the connection is throttled until the specified deadline.
* @param id the connection to be throttled
* @param throttleUntilTimeMs the throttle deadline in milliseconds
*/
public void throttle(String id, long throttleUntilTimeMs) {
NodeConnectionState state = nodeState.get(id);
// The throttle deadline should never regress.
if (state != null && state.throttleUntilTimeMs < throttleUntilTimeMs) {
state.throttleUntilTimeMs = throttleUntilTimeMs;
}
}
/**
* Return the remaining throttling delay in milliseconds if throttling is in progress. Return 0, otherwise.
* @param id the connection to check
* @param now the current time in ms
*/
public long throttleDelayMs(String id, long now) {
NodeConnectionState state = nodeState.get(id);
if (state != null && state.throttleUntilTimeMs > now) {
return state.throttleUntilTimeMs - now;
} else {
return 0;
}
}
/**
* Return the number of milliseconds to wait, based on the connection state and the throttle time, before
* attempting to send data. If the connection has been established but being throttled, return throttle delay.
* Otherwise, return connection delay.
* @param id the connection to check
* @param now the current time in ms
*/
public long pollDelayMs(String id, long now) {
long throttleDelayMs = throttleDelayMs(id, now);
if (isConnected(id) && throttleDelayMs > 0) {
return throttleDelayMs;
} else {
return connectionDelay(id, now);
}
}
/**
* Enter the checking_api_versions state for the given node.
* @param id the connection identifier
*/
public void checkingApiVersions(String id) {
NodeConnectionState nodeState = nodeState(id);
nodeState.state = ConnectionState.CHECKING_API_VERSIONS;
resetConnectionSetupTimeout(nodeState);
connectingNodes.remove(id);
}
/**
* Enter the ready state for the given node.
* @param id the connection identifier
*/
public void ready(String id) {
NodeConnectionState nodeState = nodeState(id);
nodeState.state = ConnectionState.READY;
nodeState.authenticationException = null;
resetReconnectBackoff(nodeState);
resetConnectionSetupTimeout(nodeState);
connectingNodes.remove(id);
}
/**
* Enter the authentication failed state for the given node.
* @param id the connection identifier
* @param now the current time in ms
* @param exception the authentication exception
*/
public void authenticationFailed(String id, long now, AuthenticationException exception) {
NodeConnectionState nodeState = nodeState(id);
nodeState.authenticationException = exception;
nodeState.state = ConnectionState.AUTHENTICATION_FAILED;
nodeState.lastConnectAttemptMs = now;
updateReconnectBackoff(nodeState);
}
/**
* Return true if the connection is in the READY state and currently not throttled.
*
* @param id the connection identifier
* @param now the current time in ms
*/
public boolean isReady(String id, long now) {
return isReady(nodeState.get(id), now);
}
private boolean isReady(NodeConnectionState state, long now) {
return state != null && state.state == ConnectionState.READY && state.throttleUntilTimeMs <= now;
}
/**
* Return true if there is at least one node with connection in the READY state and not throttled. Returns false
* otherwise.
*
* @param now the current time in ms
*/
public boolean hasReadyNodes(long now) {
for (Map.Entry<String, NodeConnectionState> entry : nodeState.entrySet()) {
if (isReady(entry.getValue(), now)) {
return true;
}
}
return false;
}
/**
* Return true if the connection has been established
* @param id The id of the node to check
*/
public boolean isConnected(String id) {
NodeConnectionState state = nodeState.get(id);
return state != null && state.state.isConnected();
}
/**
* Return true if the connection has been disconnected
* @param id The id of the node to check
*/
public boolean isDisconnected(String id) {
NodeConnectionState state = nodeState.get(id);
return state != null && state.state.isDisconnected();
}
/**
* Return authentication exception if an authentication error occurred
* @param id The id of the node to check
*/
public AuthenticationException authenticationException(String id) {
NodeConnectionState state = nodeState.get(id);
return state != null ? state.authenticationException : null;
}
/**
* Resets the failure count for a node and sets the reconnect backoff to the base
* value configured via reconnect.backoff.ms
*
* @param nodeState The node state object to update
*/
private void resetReconnectBackoff(NodeConnectionState nodeState) {
nodeState.failedAttempts = 0;
nodeState.reconnectBackoffMs = reconnectBackoff.backoff(0);
}
/**
* Resets the failure count for a node and sets the connection setup timeout to the base
* value configured via socket.connection.setup.timeout.ms
*
* @param nodeState The node state object to update
*/
private void resetConnectionSetupTimeout(NodeConnectionState nodeState) {
nodeState.failedConnectAttempts = 0;
nodeState.connectionSetupTimeoutMs = connectionSetupTimeout.backoff(0);
}
/**
* Increment the failure counter, update the node reconnect backoff exponentially,
* and record the current timestamp.
* The delay is reconnect.backoff.ms * 2**(failures - 1) * (+/- 20% random jitter)
* Up to a (pre-jitter) maximum of reconnect.backoff.max.ms
*
* @param nodeState The node state object to update
*/
private void updateReconnectBackoff(NodeConnectionState nodeState) {
nodeState.reconnectBackoffMs = reconnectBackoff.backoff(nodeState.failedAttempts);
nodeState.failedAttempts++;
}
/**
* Increment the failure counter and update the node connection setup timeout exponentially.
* The delay is socket.connection.setup.timeout.ms * 2**(failures) * (+/- 20% random jitter)
* Up to a (pre-jitter) maximum of reconnect.backoff.max.ms
*
* @param nodeState The node state object to update
*/
private void updateConnectionSetupTimeout(NodeConnectionState nodeState) {
nodeState.failedConnectAttempts++;
nodeState.connectionSetupTimeoutMs = connectionSetupTimeout.backoff(nodeState.failedConnectAttempts);
}
/**
* Remove the given node from the tracked connection states. The main difference between this and `disconnected`
* is the impact on `connectionDelay`: it will be 0 after this call whereas `reconnectBackoffMs` will be taken
* into account after `disconnected` is called.
*
* @param id the connection to remove
*/
public void remove(String id) {
nodeState.remove(id);
connectingNodes.remove(id);
}
/**
* Get the state of a given connection.
* @param id the id of the connection
* @return the state of our connection
*/
public ConnectionState connectionState(String id) {
return nodeState(id).state;
}
/**
* Get the state of a given node.
* @param id the connection to fetch the state for
*/
private NodeConnectionState nodeState(String id) {
NodeConnectionState state = this.nodeState.get(id);
if (state == null)
throw new IllegalStateException("No entry found for connection " + id);
return state;
}
/**
* Get the id set of nodes which are in CONNECTING state
*/
// package private for testing only
Set<String> connectingNodes() {
return this.connectingNodes;
}
/**
* Get the timestamp of the latest connection attempt of a given node
* @param id the connection to fetch the state for
*/
public long lastConnectAttemptMs(String id) {
NodeConnectionState nodeState = this.nodeState.get(id);
return nodeState == null ? 0 : nodeState.lastConnectAttemptMs;
}
/**
* Get the current socket connection setup timeout of the given node.
* The base value is defined via socket.connection.setup.timeout.
* @param id the connection to fetch the state for
*/
public long connectionSetupTimeoutMs(String id) {
NodeConnectionState nodeState = this.nodeState(id);
return nodeState.connectionSetupTimeoutMs;
}
/**
* Test if the connection to the given node has reached its timeout
* @param id the connection to fetch the state for
* @param now the current time in ms
*/
public boolean isConnectionSetupTimeout(String id, long now) {
NodeConnectionState nodeState = this.nodeState(id);
if (nodeState.state != ConnectionState.CONNECTING)
throw new IllegalStateException("Node " + id + " is not in connecting state");
return now - lastConnectAttemptMs(id) > connectionSetupTimeoutMs(id);
}
/**
* Return the List of nodes whose connection setup has timed out.
* @param now the current time in ms
*/
public List<String> nodesWithConnectionSetupTimeout(long now) {
return connectingNodes.stream()
.filter(id -> isConnectionSetupTimeout(id, now))
.collect(Collectors.toList());
}
/**
* The state of our connection to a node.
*/
private static class NodeConnectionState {
ConnectionState state;
AuthenticationException authenticationException;
long lastConnectAttemptMs;
long failedAttempts;
long failedConnectAttempts;
long reconnectBackoffMs;
long connectionSetupTimeoutMs;
// Connection is being throttled if current time < throttleUntilTimeMs.
long throttleUntilTimeMs;
private List<InetAddress> addresses;
private int addressIndex;
private final String host;
private final HostResolver hostResolver;
private NodeConnectionState(ConnectionState state, long lastConnectAttempt, long reconnectBackoffMs,
long connectionSetupTimeoutMs, String host, HostResolver hostResolver) {
this.state = state;
this.addresses = Collections.emptyList();
this.addressIndex = -1;
this.authenticationException = null;
this.lastConnectAttemptMs = lastConnectAttempt;
this.failedAttempts = 0;
this.reconnectBackoffMs = reconnectBackoffMs;
this.connectionSetupTimeoutMs = connectionSetupTimeoutMs;
this.throttleUntilTimeMs = 0;
this.host = host;
this.hostResolver = hostResolver;
}
public String host() {
return host;
}
/**
* Fetches the current selected IP address for this node, resolving {@link #host()} if necessary.
* @return the selected address
* @throws UnknownHostException if resolving {@link #host()} fails
*/
private InetAddress currentAddress() throws UnknownHostException {
if (addresses.isEmpty()) {
// (Re-)initialize list
addresses = ClientUtils.resolve(host, hostResolver);
addressIndex = 0;
}
return addresses.get(addressIndex);
}
/**
* Jumps to the next available resolved address for this node. If no other addresses are available, marks the
* list to be refreshed on the next {@link #currentAddress()} call.
*/
private void moveToNextAddress() {
if (addresses.isEmpty())
return; // Avoid div0. List will initialize on next currentAddress() call
addressIndex = (addressIndex + 1) % addresses.size();
if (addressIndex == 0)
addresses = Collections.emptyList(); // Exhausted list. Re-resolve on next currentAddress() call
}
/**
* Clears the resolved addresses in order to trigger re-resolving on the next {@link #currentAddress()} call.
*/
private void clearAddresses() {
addresses = Collections.emptyList();
}
public String toString() {
return "NodeState(" + state + ", " + lastConnectAttemptMs + ", " + failedAttempts + ", " + throttleUntilTimeMs + ")";
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/CommonClientConfigs.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.ConfigException;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.metrics.JmxReporter;
import org.apache.kafka.common.metrics.MetricsReporter;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
* Configurations shared by Kafka client applications: producer, consumer, connect, etc.
*/
public class CommonClientConfigs {
private static final Logger log = LoggerFactory.getLogger(CommonClientConfigs.class);
/*
* NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE.
*/
public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers";
public static final String BOOTSTRAP_SERVERS_DOC = "A list of host/port pairs to use for establishing the initial connection to the Kafka cluster. The client will make use of all servers irrespective of which servers are specified here for bootstrapping—this list only impacts the initial hosts used to discover the full set of servers. This list should be in the form "
+ "<code>host1:port1,host2:port2,...</code>. Since these servers are just used for the initial connection to "
+ "discover the full cluster membership (which may change dynamically), this list need not contain the full set of "
+ "servers (you may want more than one, though, in case a server is down).";
public static final String CLIENT_DNS_LOOKUP_CONFIG = "client.dns.lookup";
public static final String CLIENT_DNS_LOOKUP_DOC = "Controls how the client uses DNS lookups. "
+ "If set to <code>use_all_dns_ips</code>, connect to each returned IP "
+ "address in sequence until a successful connection is established. "
+ "After a disconnection, the next IP is used. Once all IPs have been "
+ "used once, the client resolves the IP(s) from the hostname again "
+ "(both the JVM and the OS cache DNS name lookups, however). "
+ "If set to <code>resolve_canonical_bootstrap_servers_only</code>, "
+ "resolve each bootstrap address into a list of canonical names. After "
+ "the bootstrap phase, this behaves the same as <code>use_all_dns_ips</code>.";
public static final String METADATA_MAX_AGE_CONFIG = "metadata.max.age.ms";
public static final String METADATA_MAX_AGE_DOC = "The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.";
public static final String SEND_BUFFER_CONFIG = "send.buffer.bytes";
public static final String SEND_BUFFER_DOC = "The size of the TCP send buffer (SO_SNDBUF) to use when sending data. If the value is -1, the OS default will be used.";
public static final int SEND_BUFFER_LOWER_BOUND = -1;
public static final String RECEIVE_BUFFER_CONFIG = "receive.buffer.bytes";
public static final String RECEIVE_BUFFER_DOC = "The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. If the value is -1, the OS default will be used.";
public static final int RECEIVE_BUFFER_LOWER_BOUND = -1;
public static final String CLIENT_ID_CONFIG = "client.id";
public static final String CLIENT_ID_DOC = "An id string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included in server-side request logging.";
public static final String CLIENT_RACK_CONFIG = "client.rack";
public static final String CLIENT_RACK_DOC = "A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config 'broker.rack'";
public static final String DEFAULT_CLIENT_RACK = "";
public static final String RECONNECT_BACKOFF_MS_CONFIG = "reconnect.backoff.ms";
public static final String RECONNECT_BACKOFF_MS_DOC = "The base amount of time to wait before attempting to reconnect to a given host. This avoids repeatedly connecting to a host in a tight loop. This backoff applies to all connection attempts by the client to a broker.";
public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = "reconnect.backoff.max.ms";
public static final String RECONNECT_BACKOFF_MAX_MS_DOC = "The maximum amount of time in milliseconds to wait when reconnecting to a broker that has repeatedly failed to connect. If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms.";
public static final String RETRIES_CONFIG = "retries";
public static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any request that fails with a potentially transient error." +
" It is recommended to set the value to either zero or `MAX_VALUE` and use corresponding timeout parameters to control how long a client should retry a request.";
public static final String RETRY_BACKOFF_MS_CONFIG = "retry.backoff.ms";
public static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to retry a failed request to a given topic partition. This avoids repeatedly sending requests in a tight loop under some failure scenarios.";
public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = "metrics.sample.window.ms";
public static final String METRICS_SAMPLE_WINDOW_MS_DOC = "The window of time a metrics sample is computed over.";
public static final String METRICS_NUM_SAMPLES_CONFIG = "metrics.num.samples";
public static final String METRICS_NUM_SAMPLES_DOC = "The number of samples maintained to compute metrics.";
public static final String METRICS_RECORDING_LEVEL_CONFIG = "metrics.recording.level";
public static final String METRICS_RECORDING_LEVEL_DOC = "The highest recording level for metrics.";
public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters";
public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the <code>org.apache.kafka.common.metrics.MetricsReporter</code> interface allows plugging in classes that will be notified of new metric creation. The JmxReporter is always included to register JMX statistics.";
public static final String METRICS_CONTEXT_PREFIX = "metrics.context.";
@Deprecated
public static final String AUTO_INCLUDE_JMX_REPORTER_CONFIG = "auto.include.jmx.reporter";
public static final String AUTO_INCLUDE_JMX_REPORTER_DOC = "Deprecated. Whether to automatically include JmxReporter even if it's not listed in <code>metric.reporters</code>. This configuration will be removed in Kafka 4.0, users should instead include <code>org.apache.kafka.common.metrics.JmxReporter</code> in <code>metric.reporters</code> in order to enable the JmxReporter.";
public static final String SECURITY_PROTOCOL_CONFIG = "security.protocol";
public static final String SECURITY_PROTOCOL_DOC = "Protocol used to communicate with brokers. Valid values are: " +
Utils.join(SecurityProtocol.names(), ", ") + ".";
public static final String DEFAULT_SECURITY_PROTOCOL = "PLAINTEXT";
public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = "socket.connection.setup.timeout.ms";
public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_DOC = "The amount of time the client will wait for the socket connection to be established. If the connection is not built before the timeout elapses, clients will close the socket channel.";
public static final Long DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MS = 10 * 1000L;
public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = "socket.connection.setup.timeout.max.ms";
public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_DOC = "The maximum amount of time the client will wait for the socket connection to be established. The connection setup timeout will increase exponentially for each consecutive connection failure up to this maximum. To avoid connection storms, a randomization factor of 0.2 will be applied to the timeout resulting in a random range between 20% below and 20% above the computed value.";
public static final Long DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS = 30 * 1000L;
public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = "connections.max.idle.ms";
public static final String CONNECTIONS_MAX_IDLE_MS_DOC = "Close idle connections after the number of milliseconds specified by this config.";
public static final String REQUEST_TIMEOUT_MS_CONFIG = "request.timeout.ms";
public static final String REQUEST_TIMEOUT_MS_DOC = "The configuration controls the maximum amount of time the client will wait "
+ "for the response of a request. If the response is not received before the timeout "
+ "elapses the client will resend the request if necessary or fail the request if "
+ "retries are exhausted.";
public static final String DEFAULT_LIST_KEY_SERDE_INNER_CLASS = "default.list.key.serde.inner";
public static final String DEFAULT_LIST_KEY_SERDE_INNER_CLASS_DOC = "Default inner class of list serde for key that implements the <code>org.apache.kafka.common.serialization.Serde</code> interface. "
+ "This configuration will be read if and only if <code>default.key.serde</code> configuration is set to <code>org.apache.kafka.common.serialization.Serdes.ListSerde</code>";
public static final String DEFAULT_LIST_VALUE_SERDE_INNER_CLASS = "default.list.value.serde.inner";
public static final String DEFAULT_LIST_VALUE_SERDE_INNER_CLASS_DOC = "Default inner class of list serde for value that implements the <code>org.apache.kafka.common.serialization.Serde</code> interface. "
+ "This configuration will be read if and only if <code>default.value.serde</code> configuration is set to <code>org.apache.kafka.common.serialization.Serdes.ListSerde</code>";
public static final String DEFAULT_LIST_KEY_SERDE_TYPE_CLASS = "default.list.key.serde.type";
public static final String DEFAULT_LIST_KEY_SERDE_TYPE_CLASS_DOC = "Default class for key that implements the <code>java.util.List</code> interface. "
+ "This configuration will be read if and only if <code>default.key.serde</code> configuration is set to <code>org.apache.kafka.common.serialization.Serdes.ListSerde</code> "
+ "Note when list serde class is used, one needs to set the inner serde class that implements the <code>org.apache.kafka.common.serialization.Serde</code> interface via '"
+ DEFAULT_LIST_KEY_SERDE_INNER_CLASS + "'";
public static final String DEFAULT_LIST_VALUE_SERDE_TYPE_CLASS = "default.list.value.serde.type";
public static final String DEFAULT_LIST_VALUE_SERDE_TYPE_CLASS_DOC = "Default class for value that implements the <code>java.util.List</code> interface. "
+ "This configuration will be read if and only if <code>default.value.serde</code> configuration is set to <code>org.apache.kafka.common.serialization.Serdes.ListSerde</code> "
+ "Note when list serde class is used, one needs to set the inner serde class that implements the <code>org.apache.kafka.common.serialization.Serde</code> interface via '"
+ DEFAULT_LIST_VALUE_SERDE_INNER_CLASS + "'";
public static final String GROUP_ID_CONFIG = "group.id";
public static final String GROUP_ID_DOC = "A unique string that identifies the consumer group this consumer belongs to. This property is required if the consumer uses either the group management functionality by using <code>subscribe(topic)</code> or the Kafka-based offset management strategy.";
public static final String GROUP_INSTANCE_ID_CONFIG = "group.instance.id";
public static final String GROUP_INSTANCE_ID_DOC = "A unique identifier of the consumer instance provided by the end user. "
+ "Only non-empty strings are permitted. If set, the consumer is treated as a static member, "
+ "which means that only one instance with this ID is allowed in the consumer group at any time. "
+ "This can be used in combination with a larger session timeout to avoid group rebalances caused by transient unavailability "
+ "(e.g. process restarts). If not set, the consumer will join the group as a dynamic member, which is the traditional behavior.";
public static final String MAX_POLL_INTERVAL_MS_CONFIG = "max.poll.interval.ms";
public static final String MAX_POLL_INTERVAL_MS_DOC = "The maximum delay between invocations of poll() when using "
+ "consumer group management. This places an upper bound on the amount of time that the consumer can be idle "
+ "before fetching more records. If poll() is not called before expiration of this timeout, then the consumer "
+ "is considered failed and the group will rebalance in order to reassign the partitions to another member. "
+ "For consumers using a non-null <code>group.instance.id</code> which reach this timeout, partitions will not be immediately reassigned. "
+ "Instead, the consumer will stop sending heartbeats and partitions will be reassigned "
+ "after expiration of <code>session.timeout.ms</code>. This mirrors the behavior of a static consumer which has shutdown.";
public static final String REBALANCE_TIMEOUT_MS_CONFIG = "rebalance.timeout.ms";
public static final String REBALANCE_TIMEOUT_MS_DOC = "The maximum allowed time for each worker to join the group "
+ "once a rebalance has begun. This is basically a limit on the amount of time needed for all tasks to "
+ "flush any pending data and commit offsets. If the timeout is exceeded, then the worker will be removed "
+ "from the group, which will cause offset commit failures.";
public static final String SESSION_TIMEOUT_MS_CONFIG = "session.timeout.ms";
public static final String SESSION_TIMEOUT_MS_DOC = "The timeout used to detect client failures when using "
+ "Kafka's group management facility. The client sends periodic heartbeats to indicate its liveness "
+ "to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, "
+ "then the broker will remove this client from the group and initiate a rebalance. Note that the value "
+ "must be in the allowable range as configured in the broker configuration by <code>group.min.session.timeout.ms</code> "
+ "and <code>group.max.session.timeout.ms</code>.";
public static final String HEARTBEAT_INTERVAL_MS_CONFIG = "heartbeat.interval.ms";
public static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the consumer "
+ "coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the "
+ "consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. "
+ "The value must be set lower than <code>session.timeout.ms</code>, but typically should be set no higher "
+ "than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances.";
public static final String DEFAULT_API_TIMEOUT_MS_CONFIG = "default.api.timeout.ms";
public static final String DEFAULT_API_TIMEOUT_MS_DOC = "Specifies the timeout (in milliseconds) for client APIs. " +
"This configuration is used as the default timeout for all client operations that do not specify a <code>timeout</code> parameter.";
/**
* Postprocess the configuration so that exponential backoff is disabled when reconnect backoff
* is explicitly configured but the maximum reconnect backoff is not explicitly configured.
*
* @param config The config object.
* @param parsedValues The parsedValues as provided to postProcessParsedConfig.
*
* @return The new values which have been set as described in postProcessParsedConfig.
*/
public static Map<String, Object> postProcessReconnectBackoffConfigs(AbstractConfig config,
Map<String, Object> parsedValues) {
HashMap<String, Object> rval = new HashMap<>();
Map<String, Object> originalConfig = config.originals();
if ((!originalConfig.containsKey(RECONNECT_BACKOFF_MAX_MS_CONFIG)) &&
originalConfig.containsKey(RECONNECT_BACKOFF_MS_CONFIG)) {
log.debug("Disabling exponential reconnect backoff because {} is set, but {} is not.",
RECONNECT_BACKOFF_MS_CONFIG, RECONNECT_BACKOFF_MAX_MS_CONFIG);
rval.put(RECONNECT_BACKOFF_MAX_MS_CONFIG, parsedValues.get(RECONNECT_BACKOFF_MS_CONFIG));
}
return rval;
}
public static void postValidateSaslMechanismConfig(AbstractConfig config) {
SecurityProtocol securityProtocol = SecurityProtocol.forName(config.getString(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG));
String clientSaslMechanism = config.getString(SaslConfigs.SASL_MECHANISM);
if (securityProtocol == SecurityProtocol.SASL_PLAINTEXT || securityProtocol == SecurityProtocol.SASL_SSL) {
if (clientSaslMechanism == null || clientSaslMechanism.isEmpty()) {
throw new ConfigException(SaslConfigs.SASL_MECHANISM, null, "When the " + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG +
" configuration enables SASL, mechanism must be non-null and non-empty string.");
}
}
}
public static List<MetricsReporter> metricsReporters(AbstractConfig config) {
return metricsReporters(Collections.emptyMap(), config);
}
public static List<MetricsReporter> metricsReporters(String clientId, AbstractConfig config) {
return metricsReporters(Collections.singletonMap(CommonClientConfigs.CLIENT_ID_CONFIG, clientId), config);
}
public static List<MetricsReporter> metricsReporters(Map<String, Object> clientIdOverride, AbstractConfig config) {
List<MetricsReporter> reporters = config.getConfiguredInstances(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG,
MetricsReporter.class, clientIdOverride);
if (config.getBoolean(CommonClientConfigs.AUTO_INCLUDE_JMX_REPORTER_CONFIG) &&
reporters.stream().noneMatch(r -> JmxReporter.class.equals(r.getClass()))) {
JmxReporter jmxReporter = new JmxReporter();
jmxReporter.configure(config.originals(clientIdOverride));
reporters.add(jmxReporter);
}
return reporters;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/ConnectionState.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
/**
* The states of a node connection
*
* DISCONNECTED: connection has not been successfully established yet
* CONNECTING: connection is under progress
* CHECKING_API_VERSIONS: connection has been established and api versions check is in progress. Failure of this check will cause connection to close
* READY: connection is ready to send requests
* AUTHENTICATION_FAILED: connection failed due to an authentication error
*/
public enum ConnectionState {
DISCONNECTED, CONNECTING, CHECKING_API_VERSIONS, READY, AUTHENTICATION_FAILED;
public boolean isDisconnected() {
return this == AUTHENTICATION_FAILED || this == DISCONNECTED;
}
public boolean isConnected() {
return this == CHECKING_API_VERSIONS || this == READY;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/DefaultHostResolver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import java.net.InetAddress;
import java.net.UnknownHostException;
public class DefaultHostResolver implements HostResolver {
@Override
public InetAddress[] resolve(String host) throws UnknownHostException {
return InetAddress.getAllByName(host);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/FetchSessionHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.TopicIdPartition;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.FetchMetadata;
import org.apache.kafka.common.requests.FetchRequest.PartitionData;
import org.apache.kafka.common.requests.FetchResponse;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID;
/**
* FetchSessionHandler maintains the fetch session state for connecting to a broker.
*
* Using the protocol outlined by KIP-227, clients can create incremental fetch sessions.
* These sessions allow the client to fetch information about a set of partition over
* and over, without explicitly enumerating all the partitions in the request and the
* response.
*
* FetchSessionHandler tracks the partitions which are in the session. It also
* determines which partitions need to be included in each fetch request, and what
* the attached fetch session metadata should be for each request. The corresponding
* class on the receiving broker side is FetchManager.
*/
public class FetchSessionHandler {
private final Logger log;
private final int node;
/**
* The metadata for the next fetch request.
*/
private FetchMetadata nextMetadata = FetchMetadata.INITIAL;
public FetchSessionHandler(LogContext logContext, int node) {
this.log = logContext.logger(FetchSessionHandler.class);
this.node = node;
}
// visible for testing
public int sessionId() {
return nextMetadata.sessionId();
}
/**
* All of the partitions which exist in the fetch request session.
*/
private LinkedHashMap<TopicPartition, PartitionData> sessionPartitions =
new LinkedHashMap<>(0);
/**
* All of the topic names mapped to topic ids for topics which exist in the fetch request session.
*/
private Map<Uuid, String> sessionTopicNames = new HashMap<>(0);
public Map<Uuid, String> sessionTopicNames() {
return sessionTopicNames;
}
public static class FetchRequestData {
/**
* The partitions to send in the fetch request.
*/
private final Map<TopicPartition, PartitionData> toSend;
/**
* The partitions to send in the request's "forget" list.
*/
private final List<TopicIdPartition> toForget;
/**
* The partitions to send in the request's "forget" list if
* the version is >= 13.
*/
private final List<TopicIdPartition> toReplace;
/**
* All of the partitions which exist in the fetch request session.
*/
private final Map<TopicPartition, PartitionData> sessionPartitions;
/**
* The metadata to use in this fetch request.
*/
private final FetchMetadata metadata;
/**
* A boolean indicating whether we have a topic ID for every topic in the request so that we can send a request that
* uses topic IDs
*/
private final boolean canUseTopicIds;
FetchRequestData(Map<TopicPartition, PartitionData> toSend,
List<TopicIdPartition> toForget,
List<TopicIdPartition> toReplace,
Map<TopicPartition, PartitionData> sessionPartitions,
FetchMetadata metadata,
boolean canUseTopicIds) {
this.toSend = toSend;
this.toForget = toForget;
this.toReplace = toReplace;
this.sessionPartitions = sessionPartitions;
this.metadata = metadata;
this.canUseTopicIds = canUseTopicIds;
}
/**
* Get the set of partitions to send in this fetch request.
*/
public Map<TopicPartition, PartitionData> toSend() {
return toSend;
}
/**
* Get a list of partitions to forget in this fetch request.
*/
public List<TopicIdPartition> toForget() {
return toForget;
}
/**
* Get a list of partitions to forget in this fetch request.
*/
public List<TopicIdPartition> toReplace() {
return toReplace;
}
/**
* Get the full set of partitions involved in this fetch request.
*/
public Map<TopicPartition, PartitionData> sessionPartitions() {
return sessionPartitions;
}
public FetchMetadata metadata() {
return metadata;
}
public boolean canUseTopicIds() {
return canUseTopicIds;
}
@Override
public String toString() {
StringBuilder bld;
if (metadata.isFull()) {
bld = new StringBuilder("FullFetchRequest(toSend=(");
String prefix = "";
for (TopicPartition partition : toSend.keySet()) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
} else {
bld = new StringBuilder("IncrementalFetchRequest(toSend=(");
String prefix = "";
for (TopicPartition partition : toSend.keySet()) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
bld.append("), toForget=(");
prefix = "";
for (TopicIdPartition partition : toForget) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
bld.append("), toReplace=(");
prefix = "";
for (TopicIdPartition partition : toReplace) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
bld.append("), implied=(");
prefix = "";
for (TopicPartition partition : sessionPartitions.keySet()) {
if (!toSend.containsKey(partition)) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
}
}
}
if (canUseTopicIds) {
bld.append("), canUseTopicIds=True");
} else {
bld.append("), canUseTopicIds=False");
}
bld.append(")");
return bld.toString();
}
}
public class Builder {
/**
* The next partitions which we want to fetch.
*
* It is important to maintain the insertion order of this list by using a LinkedHashMap rather
* than a regular Map.
*
* One reason is that when dealing with FULL fetch requests, if there is not enough response
* space to return data from all partitions, the server will only return data from partitions
* early in this list.
*
* Another reason is because we make use of the list ordering to optimize the preparation of
* incremental fetch requests (see below).
*/
private LinkedHashMap<TopicPartition, PartitionData> next;
private Map<Uuid, String> topicNames;
private final boolean copySessionPartitions;
private int partitionsWithoutTopicIds = 0;
Builder() {
this.next = new LinkedHashMap<>();
this.topicNames = new HashMap<>();
this.copySessionPartitions = true;
}
Builder(int initialSize, boolean copySessionPartitions) {
this.next = new LinkedHashMap<>(initialSize);
this.topicNames = new HashMap<>();
this.copySessionPartitions = copySessionPartitions;
}
/**
* Mark that we want data from this partition in the upcoming fetch.
*/
public void add(TopicPartition topicPartition, PartitionData data) {
next.put(topicPartition, data);
// topicIds should not change between adding partitions and building, so we can use putIfAbsent
if (data.topicId.equals(Uuid.ZERO_UUID)) {
partitionsWithoutTopicIds++;
} else {
topicNames.putIfAbsent(data.topicId, topicPartition.topic());
}
}
public FetchRequestData build() {
boolean canUseTopicIds = partitionsWithoutTopicIds == 0;
if (nextMetadata.isFull()) {
if (log.isDebugEnabled()) {
log.debug("Built full fetch {} for node {} with {}.",
nextMetadata, node, topicPartitionsToLogString(next.keySet()));
}
sessionPartitions = next;
next = null;
// Only add topic IDs to the session if we are using topic IDs.
if (canUseTopicIds) {
sessionTopicNames = topicNames;
} else {
sessionTopicNames = Collections.emptyMap();
}
Map<TopicPartition, PartitionData> toSend =
Collections.unmodifiableMap(new LinkedHashMap<>(sessionPartitions));
return new FetchRequestData(toSend, Collections.emptyList(), Collections.emptyList(), toSend, nextMetadata, canUseTopicIds);
}
List<TopicIdPartition> added = new ArrayList<>();
List<TopicIdPartition> removed = new ArrayList<>();
List<TopicIdPartition> altered = new ArrayList<>();
List<TopicIdPartition> replaced = new ArrayList<>();
for (Iterator<Entry<TopicPartition, PartitionData>> iter =
sessionPartitions.entrySet().iterator(); iter.hasNext(); ) {
Entry<TopicPartition, PartitionData> entry = iter.next();
TopicPartition topicPartition = entry.getKey();
PartitionData prevData = entry.getValue();
PartitionData nextData = next.remove(topicPartition);
if (nextData != null) {
// We basically check if the new partition had the same topic ID. If not,
// we add it to the "replaced" set. If the request is version 13 or higher, the replaced
// partition will be forgotten. In any case, we will send the new partition in the request.
if (!prevData.topicId.equals(nextData.topicId)
&& !prevData.topicId.equals(Uuid.ZERO_UUID)
&& !nextData.topicId.equals(Uuid.ZERO_UUID)) {
// Re-add the replaced partition to the end of 'next'
next.put(topicPartition, nextData);
entry.setValue(nextData);
replaced.add(new TopicIdPartition(prevData.topicId, topicPartition));
} else if (!prevData.equals(nextData)) {
// Re-add the altered partition to the end of 'next'
next.put(topicPartition, nextData);
entry.setValue(nextData);
altered.add(new TopicIdPartition(nextData.topicId, topicPartition));
}
} else {
// Remove this partition from the session.
iter.remove();
// Indicate that we no longer want to listen to this partition.
removed.add(new TopicIdPartition(prevData.topicId, topicPartition));
// If we do not have this topic ID in the builder or the session, we can not use topic IDs.
if (canUseTopicIds && prevData.topicId.equals(Uuid.ZERO_UUID))
canUseTopicIds = false;
}
}
// Add any new partitions to the session.
for (Entry<TopicPartition, PartitionData> entry : next.entrySet()) {
TopicPartition topicPartition = entry.getKey();
PartitionData nextData = entry.getValue();
if (sessionPartitions.containsKey(topicPartition)) {
// In the previous loop, all the partitions which existed in both sessionPartitions
// and next were moved to the end of next, or removed from next. Therefore,
// once we hit one of them, we know there are no more unseen entries to look
// at in next.
break;
}
sessionPartitions.put(topicPartition, nextData);
added.add(new TopicIdPartition(nextData.topicId, topicPartition));
}
// Add topic IDs to session if we can use them. If an ID is inconsistent, we will handle in the receiving broker.
// If we switched from using topic IDs to not using them (or vice versa), that error will also be handled in the receiving broker.
if (canUseTopicIds) {
sessionTopicNames = topicNames;
} else {
sessionTopicNames = Collections.emptyMap();
}
if (log.isDebugEnabled()) {
log.debug("Built incremental fetch {} for node {}. Added {}, altered {}, removed {}, " +
"replaced {} out of {}", nextMetadata, node, topicIdPartitionsToLogString(added),
topicIdPartitionsToLogString(altered), topicIdPartitionsToLogString(removed),
topicIdPartitionsToLogString(replaced), topicPartitionsToLogString(sessionPartitions.keySet()));
}
Map<TopicPartition, PartitionData> toSend = Collections.unmodifiableMap(next);
Map<TopicPartition, PartitionData> curSessionPartitions = copySessionPartitions
? Collections.unmodifiableMap(new LinkedHashMap<>(sessionPartitions))
: Collections.unmodifiableMap(sessionPartitions);
next = null;
return new FetchRequestData(toSend,
Collections.unmodifiableList(removed),
Collections.unmodifiableList(replaced),
curSessionPartitions,
nextMetadata,
canUseTopicIds);
}
}
public Builder newBuilder() {
return new Builder();
}
/** A builder that allows for presizing the PartitionData hashmap, and avoiding making a
* secondary copy of the sessionPartitions, in cases where this is not necessarily.
* This builder is primarily for use by the Replica Fetcher
* @param size the initial size of the PartitionData hashmap
* @param copySessionPartitions boolean denoting whether the builder should make a deep copy of
* session partitions
*/
public Builder newBuilder(int size, boolean copySessionPartitions) {
return new Builder(size, copySessionPartitions);
}
private String topicPartitionsToLogString(Collection<TopicPartition> partitions) {
if (!log.isTraceEnabled()) {
return String.format("%d partition(s)", partitions.size());
}
return "(" + Utils.join(partitions, ", ") + ")";
}
private String topicIdPartitionsToLogString(Collection<TopicIdPartition> partitions) {
if (!log.isTraceEnabled()) {
return String.format("%d partition(s)", partitions.size());
}
return "(" + Utils.join(partitions, ", ") + ")";
}
/**
* Return missing items which are expected to be in a particular set, but which are not.
*
* @param toFind The items to look for.
* @param toSearch The set of items to search.
* @return Empty set if all items were found; some of the missing ones in a set, if not.
*/
static <T> Set<T> findMissing(Set<T> toFind, Set<T> toSearch) {
Set<T> ret = new LinkedHashSet<>();
for (T toFindItem: toFind) {
if (!toSearch.contains(toFindItem)) {
ret.add(toFindItem);
}
}
return ret;
}
/**
* Verify that a full fetch response contains all the partitions in the fetch session.
*
* @param topicPartitions The topicPartitions from the FetchResponse.
* @param ids The topic IDs from the FetchResponse.
* @param version The version of the FetchResponse.
* @return null if the full fetch response partitions are valid; human-readable problem description otherwise.
*/
String verifyFullFetchResponsePartitions(Set<TopicPartition> topicPartitions, Set<Uuid> ids, short version) {
StringBuilder bld = new StringBuilder();
Set<TopicPartition> extra =
findMissing(topicPartitions, sessionPartitions.keySet());
Set<TopicPartition> omitted =
findMissing(sessionPartitions.keySet(), topicPartitions);
Set<Uuid> extraIds = new HashSet<>();
if (version >= 13) {
extraIds = findMissing(ids, sessionTopicNames.keySet());
}
if (!omitted.isEmpty()) {
bld.append("omittedPartitions=(").append(Utils.join(omitted, ", ")).append("), ");
}
if (!extra.isEmpty()) {
bld.append("extraPartitions=(").append(Utils.join(extra, ", ")).append("), ");
}
if (!extraIds.isEmpty()) {
bld.append("extraIds=(").append(Utils.join(extraIds, ", ")).append("), ");
}
if ((!omitted.isEmpty()) || (!extra.isEmpty()) || (!extraIds.isEmpty())) {
bld.append("response=(").append(Utils.join(topicPartitions, ", ")).append(")");
return bld.toString();
}
return null;
}
/**
* Verify that the partitions in an incremental fetch response are contained in the session.
*
* @param topicPartitions The topicPartitions from the FetchResponse.
* @param ids The topic IDs from the FetchResponse.
* @param version The version of the FetchResponse.
* @return null if the incremental fetch response partitions are valid; human-readable problem description otherwise.
*/
String verifyIncrementalFetchResponsePartitions(Set<TopicPartition> topicPartitions, Set<Uuid> ids, short version) {
Set<Uuid> extraIds = new HashSet<>();
if (version >= 13) {
extraIds = findMissing(ids, sessionTopicNames.keySet());
}
Set<TopicPartition> extra =
findMissing(topicPartitions, sessionPartitions.keySet());
StringBuilder bld = new StringBuilder();
if (!extra.isEmpty())
bld.append("extraPartitions=(").append(Utils.join(extra, ", ")).append("), ");
if (!extraIds.isEmpty())
bld.append("extraIds=(").append(Utils.join(extraIds, ", ")).append("), ");
if ((!extra.isEmpty()) || (!extraIds.isEmpty())) {
bld.append("response=(").append(Utils.join(topicPartitions, ", ")).append(")");
return bld.toString();
}
return null;
}
/**
* Create a string describing the partitions in a FetchResponse.
*
* @param topicPartitions The topicPartitions from the FetchResponse.
* @return The string to log.
*/
private String responseDataToLogString(Set<TopicPartition> topicPartitions) {
if (!log.isTraceEnabled()) {
int implied = sessionPartitions.size() - topicPartitions.size();
if (implied > 0) {
return String.format(" with %d response partition(s), %d implied partition(s)",
topicPartitions.size(), implied);
} else {
return String.format(" with %d response partition(s)",
topicPartitions.size());
}
}
StringBuilder bld = new StringBuilder();
bld.append(" with response=(").
append(Utils.join(topicPartitions, ", ")).
append(")");
String prefix = ", implied=(";
String suffix = "";
for (TopicPartition partition : sessionPartitions.keySet()) {
if (!topicPartitions.contains(partition)) {
bld.append(prefix);
bld.append(partition);
prefix = ", ";
suffix = ")";
}
}
bld.append(suffix);
return bld.toString();
}
/**
* Handle the fetch response.
*
* @param response The response.
* @param version The version of the request.
* @return True if the response is well-formed; false if it can't be processed
* because of missing or unexpected partitions.
*/
public boolean handleResponse(FetchResponse response, short version) {
if (response.error() != Errors.NONE) {
log.info("Node {} was unable to process the fetch request with {}: {}.",
node, nextMetadata, response.error());
if (response.error() == Errors.FETCH_SESSION_ID_NOT_FOUND) {
nextMetadata = FetchMetadata.INITIAL;
} else {
nextMetadata = nextMetadata.nextCloseExistingAttemptNew();
}
return false;
}
Set<TopicPartition> topicPartitions = response.responseData(sessionTopicNames, version).keySet();
if (nextMetadata.isFull()) {
if (topicPartitions.isEmpty() && response.throttleTimeMs() > 0) {
// Normally, an empty full fetch response would be invalid. However, KIP-219
// specifies that if the broker wants to throttle the client, it will respond
// to a full fetch request with an empty response and a throttleTimeMs
// value set. We don't want to log this with a warning, since it's not an error.
// However, the empty full fetch response can't be processed, so it's still appropriate
// to return false here.
if (log.isDebugEnabled()) {
log.debug("Node {} sent a empty full fetch response to indicate that this " +
"client should be throttled for {} ms.", node, response.throttleTimeMs());
}
nextMetadata = FetchMetadata.INITIAL;
return false;
}
String problem = verifyFullFetchResponsePartitions(topicPartitions, response.topicIds(), version);
if (problem != null) {
log.info("Node {} sent an invalid full fetch response with {}", node, problem);
nextMetadata = FetchMetadata.INITIAL;
return false;
} else if (response.sessionId() == INVALID_SESSION_ID) {
if (log.isDebugEnabled())
log.debug("Node {} sent a full fetch response{}", node, responseDataToLogString(topicPartitions));
nextMetadata = FetchMetadata.INITIAL;
return true;
} else {
// The server created a new incremental fetch session.
if (log.isDebugEnabled())
log.debug("Node {} sent a full fetch response that created a new incremental " +
"fetch session {}{}", node, response.sessionId(), responseDataToLogString(topicPartitions));
nextMetadata = FetchMetadata.newIncremental(response.sessionId());
return true;
}
} else {
String problem = verifyIncrementalFetchResponsePartitions(topicPartitions, response.topicIds(), version);
if (problem != null) {
log.info("Node {} sent an invalid incremental fetch response with {}", node, problem);
nextMetadata = nextMetadata.nextCloseExistingAttemptNew();
return false;
} else if (response.sessionId() == INVALID_SESSION_ID) {
// The incremental fetch session was closed by the server.
if (log.isDebugEnabled())
log.debug("Node {} sent an incremental fetch response closing session {}{}",
node, nextMetadata.sessionId(), responseDataToLogString(topicPartitions));
nextMetadata = FetchMetadata.INITIAL;
return true;
} else {
// The incremental fetch session was continued by the server.
// We don't have to do anything special here to support KIP-219, since an empty incremental
// fetch request is perfectly valid.
if (log.isDebugEnabled())
log.debug("Node {} sent an incremental fetch response with throttleTimeMs = {} " +
"for session {}{}", node, response.throttleTimeMs(), response.sessionId(),
responseDataToLogString(topicPartitions));
nextMetadata = nextMetadata.nextIncremental();
return true;
}
}
}
/**
* The client will initiate the session close on next fetch request.
*/
public void notifyClose() {
log.debug("Set the metadata for next fetch request to close the existing session ID={}", nextMetadata.sessionId());
nextMetadata = nextMetadata.nextCloseExisting();
}
/**
* Handle an error sending the prepared request.
*
* When a network error occurs, we close any existing fetch session on our next request,
* and try to create a new session.
*
* @param t The exception.
*/
public void handleError(Throwable t) {
log.info("Error sending fetch request {} to node {}:", nextMetadata, node, t);
nextMetadata = nextMetadata.nextCloseExistingAttemptNew();
}
/**
* Get the fetch request session's partitions.
*/
public Set<TopicPartition> sessionTopicPartitions() {
return sessionPartitions.keySet();
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/GroupRebalanceConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.requests.JoinGroupRequest;
import java.util.Locale;
import java.util.Optional;
/**
* Class to extract group rebalance related configs.
*/
public class GroupRebalanceConfig {
public enum ProtocolType {
CONSUMER,
CONNECT;
@Override
public String toString() {
return super.toString().toLowerCase(Locale.ROOT);
}
}
public final int sessionTimeoutMs;
public final int rebalanceTimeoutMs;
public final int heartbeatIntervalMs;
public final String groupId;
public final Optional<String> groupInstanceId;
public final long retryBackoffMs;
public final boolean leaveGroupOnClose;
public GroupRebalanceConfig(AbstractConfig config, ProtocolType protocolType) {
this.sessionTimeoutMs = config.getInt(CommonClientConfigs.SESSION_TIMEOUT_MS_CONFIG);
// Consumer and Connect use different config names for defining rebalance timeout
if (protocolType == ProtocolType.CONSUMER) {
this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG);
} else {
this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.REBALANCE_TIMEOUT_MS_CONFIG);
}
this.heartbeatIntervalMs = config.getInt(CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG);
this.groupId = config.getString(CommonClientConfigs.GROUP_ID_CONFIG);
// Static membership is only introduced in consumer API.
if (protocolType == ProtocolType.CONSUMER) {
String groupInstanceId = config.getString(CommonClientConfigs.GROUP_INSTANCE_ID_CONFIG);
if (groupInstanceId != null) {
JoinGroupRequest.validateGroupInstanceId(groupInstanceId);
this.groupInstanceId = Optional.of(groupInstanceId);
} else {
this.groupInstanceId = Optional.empty();
}
} else {
this.groupInstanceId = Optional.empty();
}
this.retryBackoffMs = config.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
// Internal leave group config is only defined in Consumer.
if (protocolType == ProtocolType.CONSUMER) {
this.leaveGroupOnClose = config.getBoolean("internal.leave.group.on.close");
} else {
this.leaveGroupOnClose = true;
}
}
// For testing purpose.
public GroupRebalanceConfig(final int sessionTimeoutMs,
final int rebalanceTimeoutMs,
final int heartbeatIntervalMs,
String groupId,
Optional<String> groupInstanceId,
long retryBackoffMs,
boolean leaveGroupOnClose) {
this.sessionTimeoutMs = sessionTimeoutMs;
this.rebalanceTimeoutMs = rebalanceTimeoutMs;
this.heartbeatIntervalMs = heartbeatIntervalMs;
this.groupId = groupId;
this.groupInstanceId = groupInstanceId;
this.retryBackoffMs = retryBackoffMs;
this.leaveGroupOnClose = leaveGroupOnClose;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/HostResolver.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import java.net.InetAddress;
import java.net.UnknownHostException;
public interface HostResolver {
InetAddress[] resolve(String host) throws UnknownHostException;
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/InFlightRequests.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
/**
* The set of requests which have been sent or are being sent but haven't yet received a response
*/
final class InFlightRequests {
private final int maxInFlightRequestsPerConnection;
private final Map<String, Deque<NetworkClient.InFlightRequest>> requests = new HashMap<>();
/** Thread safe total number of in flight requests. */
private final AtomicInteger inFlightRequestCount = new AtomicInteger(0);
public InFlightRequests(int maxInFlightRequestsPerConnection) {
this.maxInFlightRequestsPerConnection = maxInFlightRequestsPerConnection;
}
/**
* Add the given request to the queue for the connection it was directed to
*/
public void add(NetworkClient.InFlightRequest request) {
String destination = request.destination;
Deque<NetworkClient.InFlightRequest> reqs = this.requests.get(destination);
if (reqs == null) {
reqs = new ArrayDeque<>();
this.requests.put(destination, reqs);
}
reqs.addFirst(request);
inFlightRequestCount.incrementAndGet();
}
/**
* Get the request queue for the given node
*/
private Deque<NetworkClient.InFlightRequest> requestQueue(String node) {
Deque<NetworkClient.InFlightRequest> reqs = requests.get(node);
if (reqs == null || reqs.isEmpty())
throw new IllegalStateException("There are no in-flight requests for node " + node);
return reqs;
}
/**
* Get the oldest request (the one that will be completed next) for the given node
*/
public NetworkClient.InFlightRequest completeNext(String node) {
NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollLast();
inFlightRequestCount.decrementAndGet();
return inFlightRequest;
}
/**
* Get the last request we sent to the given node (but don't remove it from the queue)
* @param node The node id
*/
public NetworkClient.InFlightRequest lastSent(String node) {
return requestQueue(node).peekFirst();
}
/**
* Complete the last request that was sent to a particular node.
* @param node The node the request was sent to
* @return The request
*/
public NetworkClient.InFlightRequest completeLastSent(String node) {
NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollFirst();
inFlightRequestCount.decrementAndGet();
return inFlightRequest;
}
/**
* Can we send more requests to this node?
*
* @param node Node in question
* @return true iff we have no requests still being sent to the given node
*/
public boolean canSendMore(String node) {
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
return queue == null || queue.isEmpty() ||
(queue.peekFirst().send.completed() && queue.size() < this.maxInFlightRequestsPerConnection);
}
/**
* Return the number of in-flight requests directed at the given node
* @param node The node
* @return The request count.
*/
public int count(String node) {
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
return queue == null ? 0 : queue.size();
}
/**
* Return true if there is no in-flight request directed at the given node and false otherwise
*/
public boolean isEmpty(String node) {
Deque<NetworkClient.InFlightRequest> queue = requests.get(node);
return queue == null || queue.isEmpty();
}
/**
* Count all in-flight requests for all nodes. This method is thread safe, but may lag the actual count.
*/
public int count() {
return inFlightRequestCount.get();
}
/**
* Return true if there is no in-flight request and false otherwise
*/
public boolean isEmpty() {
for (Deque<NetworkClient.InFlightRequest> deque : this.requests.values()) {
if (!deque.isEmpty())
return false;
}
return true;
}
/**
* Clear out all the in-flight requests for the given node and return them
*
* @param node The node
* @return All the in-flight requests for that node that have been removed
*/
public Iterable<NetworkClient.InFlightRequest> clearAll(String node) {
Deque<NetworkClient.InFlightRequest> reqs = requests.get(node);
if (reqs == null) {
return Collections.emptyList();
} else {
final Deque<NetworkClient.InFlightRequest> clearedRequests = requests.remove(node);
inFlightRequestCount.getAndAdd(-clearedRequests.size());
return () -> clearedRequests.descendingIterator();
}
}
private Boolean hasExpiredRequest(long now, Deque<NetworkClient.InFlightRequest> deque) {
for (NetworkClient.InFlightRequest request : deque) {
if (request.timeElapsedSinceSendMs(now) > request.requestTimeoutMs)
return true;
}
return false;
}
/**
* Returns a list of nodes with pending in-flight request, that need to be timed out
*
* @param now current time in milliseconds
* @return list of nodes
*/
public List<String> nodesWithTimedOutRequests(long now) {
List<String> nodeIds = new ArrayList<>();
for (Map.Entry<String, Deque<NetworkClient.InFlightRequest>> requestEntry : requests.entrySet()) {
String nodeId = requestEntry.getKey();
Deque<NetworkClient.InFlightRequest> deque = requestEntry.getValue();
if (hasExpiredRequest(now, deque))
nodeIds.add(nodeId);
}
return nodeIds;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/KafkaClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.requests.AbstractRequest;
import java.io.Closeable;
import java.util.List;
/**
* The interface for {@link NetworkClient}
*/
public interface KafkaClient extends Closeable {
/**
* Check if we are currently ready to send another request to the given node but don't attempt to connect if we
* aren't.
*
* @param node The node to check
* @param now The current timestamp
*/
boolean isReady(Node node, long now);
/**
* Initiate a connection to the given node (if necessary), and return true if already connected. The readiness of a
* node will change only when poll is invoked.
*
* @param node The node to connect to.
* @param now The current time
* @return true iff we are ready to immediately initiate the sending of another request to the given node.
*/
boolean ready(Node node, long now);
/**
* Return the number of milliseconds to wait, based on the connection state, before attempting to send data. When
* disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled
* connections.
*
* @param node The node to check
* @param now The current timestamp
* @return The number of milliseconds to wait.
*/
long connectionDelay(Node node, long now);
/**
* Return the number of milliseconds to wait, based on the connection state and the throttle time, before
* attempting to send data. If the connection has been established but being throttled, return throttle delay.
* Otherwise, return connection delay.
*
* @param node the connection to check
* @param now the current time in ms
*/
long pollDelayMs(Node node, long now);
/**
* Check if the connection of the node has failed, based on the connection state. Such connection failure are
* usually transient and can be resumed in the next {@link #ready(org.apache.kafka.common.Node, long)} }
* call, but there are cases where transient failures needs to be caught and re-acted upon.
*
* @param node the node to check
* @return true iff the connection has failed and the node is disconnected
*/
boolean connectionFailed(Node node);
/**
* Check if authentication to this node has failed, based on the connection state. Authentication failures are
* propagated without any retries.
*
* @param node the node to check
* @return an AuthenticationException iff authentication has failed, null otherwise
*/
AuthenticationException authenticationException(Node node);
/**
* Queue up the given request for sending. Requests can only be sent on ready connections.
* @param request The request
* @param now The current timestamp
*/
void send(ClientRequest request, long now);
/**
* Do actual reads and writes from sockets.
*
* @param timeout The maximum amount of time to wait for responses in ms, must be non-negative. The implementation
* is free to use a lower value if appropriate (common reasons for this are a lower request or
* metadata update timeout)
* @param now The current time in ms
* @throws IllegalStateException If a request is sent to an unready node
*/
List<ClientResponse> poll(long timeout, long now);
/**
* Disconnects the connection to a particular node, if there is one.
* Any pending ClientRequests for this connection will receive disconnections.
*
* @param nodeId The id of the node
*/
void disconnect(String nodeId);
/**
* Closes the connection to a particular node (if there is one).
* All requests on the connection will be cleared. ClientRequest callbacks will not be invoked
* for the cleared requests, nor will they be returned from poll().
*
* @param nodeId The id of the node
*/
void close(String nodeId);
/**
* Choose the node with the fewest outstanding requests. This method will prefer a node with an existing connection,
* but will potentially choose a node for which we don't yet have a connection if all existing connections are in
* use.
*
* @param now The current time in ms
* @return The node with the fewest in-flight requests.
*/
Node leastLoadedNode(long now);
/**
* The number of currently in-flight requests for which we have not yet returned a response
*/
int inFlightRequestCount();
/**
* Return true if there is at least one in-flight request and false otherwise.
*/
boolean hasInFlightRequests();
/**
* Get the total in-flight requests for a particular node
*
* @param nodeId The id of the node
*/
int inFlightRequestCount(String nodeId);
/**
* Return true if there is at least one in-flight request for a particular node and false otherwise.
*/
boolean hasInFlightRequests(String nodeId);
/**
* Return true if there is at least one node with connection in the READY state and not throttled. Returns false
* otherwise.
*
* @param now the current time
*/
boolean hasReadyNodes(long now);
/**
* Wake up the client if it is currently blocked waiting for I/O
*/
void wakeup();
/**
* Create a new ClientRequest.
*
* @param nodeId the node to send to
* @param requestBuilder the request builder to use
* @param createdTimeMs the time in milliseconds to use as the creation time of the request
* @param expectResponse true iff we expect a response
*/
ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder,
long createdTimeMs, boolean expectResponse);
/**
* Create a new ClientRequest.
*
* @param nodeId the node to send to
* @param requestBuilder the request builder to use
* @param createdTimeMs the time in milliseconds to use as the creation time of the request
* @param expectResponse true iff we expect a response
* @param requestTimeoutMs Upper bound time in milliseconds to await a response before disconnecting the socket and
* cancelling the request. The request may get cancelled sooner if the socket disconnects
* for any reason including if another pending request to the same node timed out first.
* @param callback the callback to invoke when we get a response
*/
ClientRequest newClientRequest(String nodeId,
AbstractRequest.Builder<?> requestBuilder,
long createdTimeMs,
boolean expectResponse,
int requestTimeoutMs,
RequestCompletionHandler callback);
/**
* Initiates shutdown of this client. This method may be invoked from another thread while this
* client is being polled. No further requests may be sent using the client. The current poll()
* will be terminated using wakeup(). The client should be explicitly shutdown using {@link #close()}
* after poll returns. Note that {@link #close()} should not be invoked concurrently while polling.
*/
void initiateClose();
/**
* Returns true if the client is still active. Returns false if {@link #initiateClose()} or {@link #close()}
* was invoked for this client.
*/
boolean active();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/ManualMetadataUpdater.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.requests.RequestHeader;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
/**
* A simple implementation of `MetadataUpdater` that returns the cluster nodes set via the constructor or via
* `setNodes`.
*
* This is useful in cases where automatic metadata updates are not required. An example is controller/broker
* communication.
*
* This class is not thread-safe!
*/
public class ManualMetadataUpdater implements MetadataUpdater {
private List<Node> nodes;
public ManualMetadataUpdater() {
this(new ArrayList<>(0));
}
public ManualMetadataUpdater(List<Node> nodes) {
this.nodes = nodes;
}
public void setNodes(List<Node> nodes) {
this.nodes = nodes;
}
@Override
public List<Node> fetchNodes() {
return new ArrayList<>(nodes);
}
@Override
public boolean isUpdateDue(long now) {
return false;
}
@Override
public long maybeUpdate(long now) {
return Long.MAX_VALUE;
}
@Override
public void handleServerDisconnect(long now, String nodeId, Optional<AuthenticationException> maybeAuthException) {
// We don't fail the broker on failures. There should be sufficient information from
// the NetworkClient logs to indicate the reason for the failure.
}
@Override
public void handleFailedRequest(long now, Optional<KafkaException> maybeFatalException) {
// Do nothing
}
@Override
public void handleSuccessfulResponse(RequestHeader requestHeader, long now, MetadataResponse response) {
// Do nothing
}
@Override
public void close() {
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/Metadata.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.errors.InvalidMetadataException;
import org.apache.kafka.common.errors.InvalidTopicException;
import org.apache.kafka.common.errors.TopicAuthorizationException;
import org.apache.kafka.common.internals.ClusterResourceListeners;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.requests.MetadataRequest;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.utils.LogContext;
import org.slf4j.Logger;
import java.io.Closeable;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.function.Supplier;
import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH;
/**
* A class encapsulating some of the logic around metadata.
* <p>
* This class is shared by the client thread (for partitioning) and the background sender thread.
*
* Metadata is maintained for only a subset of topics, which can be added to over time. When we request metadata for a
* topic we don't have any metadata for it will trigger a metadata update.
* <p>
* If topic expiry is enabled for the metadata, any topic that has not been used within the expiry interval
* is removed from the metadata refresh set after an update. Consumers disable topic expiry since they explicitly
* manage topics while producers rely on topic expiry to limit the refresh set.
*/
public class Metadata implements Closeable {
private final Logger log;
private final long refreshBackoffMs;
private final long metadataExpireMs;
private int updateVersion; // bumped on every metadata response
private int requestVersion; // bumped on every new topic addition
private long lastRefreshMs;
private long lastSuccessfulRefreshMs;
private KafkaException fatalException;
private Set<String> invalidTopics;
private Set<String> unauthorizedTopics;
private MetadataCache cache = MetadataCache.empty();
private boolean needFullUpdate;
private boolean needPartialUpdate;
private final ClusterResourceListeners clusterResourceListeners;
private boolean isClosed;
private final Map<TopicPartition, Integer> lastSeenLeaderEpochs;
/**
* Create a new Metadata instance
*
* @param refreshBackoffMs The minimum amount of time that must expire between metadata refreshes to avoid busy
* polling
* @param metadataExpireMs The maximum amount of time that metadata can be retained without refresh
* @param logContext Log context corresponding to the containing client
* @param clusterResourceListeners List of ClusterResourceListeners which will receive metadata updates.
*/
public Metadata(long refreshBackoffMs,
long metadataExpireMs,
LogContext logContext,
ClusterResourceListeners clusterResourceListeners) {
this.log = logContext.logger(Metadata.class);
this.refreshBackoffMs = refreshBackoffMs;
this.metadataExpireMs = metadataExpireMs;
this.lastRefreshMs = 0L;
this.lastSuccessfulRefreshMs = 0L;
this.requestVersion = 0;
this.updateVersion = 0;
this.needFullUpdate = false;
this.needPartialUpdate = false;
this.clusterResourceListeners = clusterResourceListeners;
this.isClosed = false;
this.lastSeenLeaderEpochs = new HashMap<>();
this.invalidTopics = Collections.emptySet();
this.unauthorizedTopics = Collections.emptySet();
}
/**
* Get the current cluster info without blocking
*/
public synchronized Cluster fetch() {
return cache.cluster();
}
/**
* Return the next time when the current cluster info can be updated (i.e., backoff time has elapsed).
*
* @param nowMs current time in ms
* @return remaining time in ms till the cluster info can be updated again
*/
public synchronized long timeToAllowUpdate(long nowMs) {
return Math.max(this.lastRefreshMs + this.refreshBackoffMs - nowMs, 0);
}
/**
* The next time to update the cluster info is the maximum of the time the current info will expire and the time the
* current info can be updated (i.e. backoff time has elapsed); If an update has been request then the expiry time
* is now
*
* @param nowMs current time in ms
* @return remaining time in ms till updating the cluster info
*/
public synchronized long timeToNextUpdate(long nowMs) {
long timeToExpire = updateRequested() ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0);
return Math.max(timeToExpire, timeToAllowUpdate(nowMs));
}
public long metadataExpireMs() {
return this.metadataExpireMs;
}
/**
* Request an update of the current cluster metadata info, return the current updateVersion before the update
*/
public synchronized int requestUpdate() {
this.needFullUpdate = true;
return this.updateVersion;
}
public synchronized int requestUpdateForNewTopics() {
// Override the timestamp of last refresh to let immediate update.
this.lastRefreshMs = 0;
this.needPartialUpdate = true;
this.requestVersion++;
return this.updateVersion;
}
/**
* Request an update for the partition metadata iff we have seen a newer leader epoch. This is called by the client
* any time it handles a response from the broker that includes leader epoch, except for UpdateMetadata which
* follows a different code path ({@link #update}).
*
* @param topicPartition
* @param leaderEpoch
* @return true if we updated the last seen epoch, false otherwise
*/
public synchronized boolean updateLastSeenEpochIfNewer(TopicPartition topicPartition, int leaderEpoch) {
Objects.requireNonNull(topicPartition, "TopicPartition cannot be null");
if (leaderEpoch < 0)
throw new IllegalArgumentException("Invalid leader epoch " + leaderEpoch + " (must be non-negative)");
Integer oldEpoch = lastSeenLeaderEpochs.get(topicPartition);
log.trace("Determining if we should replace existing epoch {} with new epoch {} for partition {}", oldEpoch, leaderEpoch, topicPartition);
final boolean updated;
if (oldEpoch == null) {
log.debug("Not replacing null epoch with new epoch {} for partition {}", leaderEpoch, topicPartition);
updated = false;
} else if (leaderEpoch > oldEpoch) {
log.debug("Updating last seen epoch from {} to {} for partition {}", oldEpoch, leaderEpoch, topicPartition);
lastSeenLeaderEpochs.put(topicPartition, leaderEpoch);
updated = true;
} else {
log.debug("Not replacing existing epoch {} with new epoch {} for partition {}", oldEpoch, leaderEpoch, topicPartition);
updated = false;
}
this.needFullUpdate = this.needFullUpdate || updated;
return updated;
}
public Optional<Integer> lastSeenLeaderEpoch(TopicPartition topicPartition) {
return Optional.ofNullable(lastSeenLeaderEpochs.get(topicPartition));
}
/**
* Check whether an update has been explicitly requested.
*
* @return true if an update was requested, false otherwise
*/
public synchronized boolean updateRequested() {
return this.needFullUpdate || this.needPartialUpdate;
}
/**
* Return the cached partition info if it exists and a newer leader epoch isn't known about.
*/
synchronized Optional<MetadataResponse.PartitionMetadata> partitionMetadataIfCurrent(TopicPartition topicPartition) {
Integer epoch = lastSeenLeaderEpochs.get(topicPartition);
Optional<MetadataResponse.PartitionMetadata> partitionMetadata = cache.partitionMetadata(topicPartition);
if (epoch == null) {
// old cluster format (no epochs)
return partitionMetadata;
} else {
return partitionMetadata.filter(metadata ->
metadata.leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH).equals(epoch));
}
}
/**
* @return a mapping from topic names to topic IDs for all topics with valid IDs in the cache
*/
public synchronized Map<String, Uuid> topicIds() {
return cache.topicIds();
}
public synchronized LeaderAndEpoch currentLeader(TopicPartition topicPartition) {
Optional<MetadataResponse.PartitionMetadata> maybeMetadata = partitionMetadataIfCurrent(topicPartition);
if (!maybeMetadata.isPresent())
return new LeaderAndEpoch(Optional.empty(), Optional.ofNullable(lastSeenLeaderEpochs.get(topicPartition)));
MetadataResponse.PartitionMetadata partitionMetadata = maybeMetadata.get();
Optional<Integer> leaderEpochOpt = partitionMetadata.leaderEpoch;
Optional<Node> leaderNodeOpt = partitionMetadata.leaderId.flatMap(cache::nodeById);
return new LeaderAndEpoch(leaderNodeOpt, leaderEpochOpt);
}
public synchronized void bootstrap(List<InetSocketAddress> addresses) {
this.needFullUpdate = true;
this.updateVersion += 1;
this.cache = MetadataCache.bootstrap(addresses);
}
/**
* Update metadata assuming the current request version.
*
* For testing only.
*/
public synchronized void updateWithCurrentRequestVersion(MetadataResponse response, boolean isPartialUpdate, long nowMs) {
this.update(this.requestVersion, response, isPartialUpdate, nowMs);
}
/**
* Updates the cluster metadata. If topic expiry is enabled, expiry time
* is set for topics if required and expired topics are removed from the metadata.
*
* @param requestVersion The request version corresponding to the update response, as provided by
* {@link #newMetadataRequestAndVersion(long)}.
* @param response metadata response received from the broker
* @param isPartialUpdate whether the metadata request was for a subset of the active topics
* @param nowMs current time in milliseconds
*/
public synchronized void update(int requestVersion, MetadataResponse response, boolean isPartialUpdate, long nowMs) {
Objects.requireNonNull(response, "Metadata response cannot be null");
if (isClosed())
throw new IllegalStateException("Update requested after metadata close");
this.needPartialUpdate = requestVersion < this.requestVersion;
this.lastRefreshMs = nowMs;
this.updateVersion += 1;
if (!isPartialUpdate) {
this.needFullUpdate = false;
this.lastSuccessfulRefreshMs = nowMs;
}
String previousClusterId = cache.clusterResource().clusterId();
this.cache = handleMetadataResponse(response, isPartialUpdate, nowMs);
Cluster cluster = cache.cluster();
maybeSetMetadataError(cluster);
this.lastSeenLeaderEpochs.keySet().removeIf(tp -> !retainTopic(tp.topic(), false, nowMs));
String newClusterId = cache.clusterResource().clusterId();
if (!Objects.equals(previousClusterId, newClusterId)) {
log.info("Cluster ID: {}", newClusterId);
}
clusterResourceListeners.onUpdate(cache.clusterResource());
log.debug("Updated cluster metadata updateVersion {} to {}", this.updateVersion, this.cache);
}
private void maybeSetMetadataError(Cluster cluster) {
clearRecoverableErrors();
checkInvalidTopics(cluster);
checkUnauthorizedTopics(cluster);
}
private void checkInvalidTopics(Cluster cluster) {
if (!cluster.invalidTopics().isEmpty()) {
log.error("Metadata response reported invalid topics {}", cluster.invalidTopics());
invalidTopics = new HashSet<>(cluster.invalidTopics());
}
}
private void checkUnauthorizedTopics(Cluster cluster) {
if (!cluster.unauthorizedTopics().isEmpty()) {
log.error("Topic authorization failed for topics {}", cluster.unauthorizedTopics());
unauthorizedTopics = new HashSet<>(cluster.unauthorizedTopics());
}
}
/**
* Transform a MetadataResponse into a new MetadataCache instance.
*/
private MetadataCache handleMetadataResponse(MetadataResponse metadataResponse, boolean isPartialUpdate, long nowMs) {
// All encountered topics.
Set<String> topics = new HashSet<>();
// Retained topics to be passed to the metadata cache.
Set<String> internalTopics = new HashSet<>();
Set<String> unauthorizedTopics = new HashSet<>();
Set<String> invalidTopics = new HashSet<>();
List<MetadataResponse.PartitionMetadata> partitions = new ArrayList<>();
Map<String, Uuid> topicIds = new HashMap<>();
Map<String, Uuid> oldTopicIds = cache.topicIds();
for (MetadataResponse.TopicMetadata metadata : metadataResponse.topicMetadata()) {
String topicName = metadata.topic();
Uuid topicId = metadata.topicId();
topics.add(topicName);
// We can only reason about topic ID changes when both IDs are valid, so keep oldId null unless the new metadata contains a topic ID
Uuid oldTopicId = null;
if (!Uuid.ZERO_UUID.equals(topicId)) {
topicIds.put(topicName, topicId);
oldTopicId = oldTopicIds.get(topicName);
} else {
topicId = null;
}
if (!retainTopic(topicName, metadata.isInternal(), nowMs))
continue;
if (metadata.isInternal())
internalTopics.add(topicName);
if (metadata.error() == Errors.NONE) {
for (MetadataResponse.PartitionMetadata partitionMetadata : metadata.partitionMetadata()) {
// Even if the partition's metadata includes an error, we need to handle
// the update to catch new epochs
updateLatestMetadata(partitionMetadata, metadataResponse.hasReliableLeaderEpochs(), topicId, oldTopicId)
.ifPresent(partitions::add);
if (partitionMetadata.error.exception() instanceof InvalidMetadataException) {
log.debug("Requesting metadata update for partition {} due to error {}",
partitionMetadata.topicPartition, partitionMetadata.error);
requestUpdate();
}
}
} else {
if (metadata.error().exception() instanceof InvalidMetadataException) {
log.debug("Requesting metadata update for topic {} due to error {}", topicName, metadata.error());
requestUpdate();
}
if (metadata.error() == Errors.INVALID_TOPIC_EXCEPTION)
invalidTopics.add(topicName);
else if (metadata.error() == Errors.TOPIC_AUTHORIZATION_FAILED)
unauthorizedTopics.add(topicName);
}
}
Map<Integer, Node> nodes = metadataResponse.brokersById();
if (isPartialUpdate)
return this.cache.mergeWith(metadataResponse.clusterId(), nodes, partitions,
unauthorizedTopics, invalidTopics, internalTopics, metadataResponse.controller(), topicIds,
(topic, isInternal) -> !topics.contains(topic) && retainTopic(topic, isInternal, nowMs));
else
return new MetadataCache(metadataResponse.clusterId(), nodes, partitions,
unauthorizedTopics, invalidTopics, internalTopics, metadataResponse.controller(), topicIds);
}
/**
* Compute the latest partition metadata to cache given ordering by leader epochs (if both
* available and reliable) and whether the topic ID changed.
*/
private Optional<MetadataResponse.PartitionMetadata> updateLatestMetadata(
MetadataResponse.PartitionMetadata partitionMetadata,
boolean hasReliableLeaderEpoch,
Uuid topicId,
Uuid oldTopicId) {
TopicPartition tp = partitionMetadata.topicPartition;
if (hasReliableLeaderEpoch && partitionMetadata.leaderEpoch.isPresent()) {
int newEpoch = partitionMetadata.leaderEpoch.get();
Integer currentEpoch = lastSeenLeaderEpochs.get(tp);
if (currentEpoch == null) {
// We have no previous info, so we can just insert the new epoch info
log.debug("Setting the last seen epoch of partition {} to {} since the last known epoch was undefined.",
tp, newEpoch);
lastSeenLeaderEpochs.put(tp, newEpoch);
return Optional.of(partitionMetadata);
} else if (topicId != null && !topicId.equals(oldTopicId)) {
// If the new topic ID is valid and different from the last seen topic ID, update the metadata.
// Between the time that a topic is deleted and re-created, the client may lose track of the
// corresponding topicId (i.e. `oldTopicId` will be null). In this case, when we discover the new
// topicId, we allow the corresponding leader epoch to override the last seen value.
log.info("Resetting the last seen epoch of partition {} to {} since the associated topicId changed from {} to {}",
tp, newEpoch, oldTopicId, topicId);
lastSeenLeaderEpochs.put(tp, newEpoch);
return Optional.of(partitionMetadata);
} else if (newEpoch >= currentEpoch) {
// If the received leader epoch is at least the same as the previous one, update the metadata
log.debug("Updating last seen epoch for partition {} from {} to epoch {} from new metadata", tp, currentEpoch, newEpoch);
lastSeenLeaderEpochs.put(tp, newEpoch);
return Optional.of(partitionMetadata);
} else {
// Otherwise ignore the new metadata and use the previously cached info
log.debug("Got metadata for an older epoch {} (current is {}) for partition {}, not updating", newEpoch, currentEpoch, tp);
return cache.partitionMetadata(tp);
}
} else {
// Handle old cluster formats as well as error responses where leader and epoch are missing
lastSeenLeaderEpochs.remove(tp);
return Optional.of(partitionMetadata.withoutLeaderEpoch());
}
}
/**
* If any non-retriable exceptions were encountered during metadata update, clear and throw the exception.
* This is used by the consumer to propagate any fatal exceptions or topic exceptions for any of the topics
* in the consumer's Metadata.
*/
public synchronized void maybeThrowAnyException() {
clearErrorsAndMaybeThrowException(this::recoverableException);
}
/**
* If any fatal exceptions were encountered during metadata update, throw the exception. This is used by
* the producer to abort waiting for metadata if there were fatal exceptions (e.g. authentication failures)
* in the last metadata update.
*/
protected synchronized void maybeThrowFatalException() {
KafkaException metadataException = this.fatalException;
if (metadataException != null) {
fatalException = null;
throw metadataException;
}
}
/**
* If any non-retriable exceptions were encountered during metadata update, throw exception if the exception
* is fatal or related to the specified topic. All exceptions from the last metadata update are cleared.
* This is used by the producer to propagate topic metadata errors for send requests.
*/
public synchronized void maybeThrowExceptionForTopic(String topic) {
clearErrorsAndMaybeThrowException(() -> recoverableExceptionForTopic(topic));
}
private void clearErrorsAndMaybeThrowException(Supplier<KafkaException> recoverableExceptionSupplier) {
KafkaException metadataException = Optional.ofNullable(fatalException).orElseGet(recoverableExceptionSupplier);
fatalException = null;
clearRecoverableErrors();
if (metadataException != null)
throw metadataException;
}
// We may be able to recover from this exception if metadata for this topic is no longer needed
private KafkaException recoverableException() {
if (!unauthorizedTopics.isEmpty())
return new TopicAuthorizationException(unauthorizedTopics);
else if (!invalidTopics.isEmpty())
return new InvalidTopicException(invalidTopics);
else
return null;
}
private KafkaException recoverableExceptionForTopic(String topic) {
if (unauthorizedTopics.contains(topic))
return new TopicAuthorizationException(Collections.singleton(topic));
else if (invalidTopics.contains(topic))
return new InvalidTopicException(Collections.singleton(topic));
else
return null;
}
private void clearRecoverableErrors() {
invalidTopics = Collections.emptySet();
unauthorizedTopics = Collections.emptySet();
}
/**
* Record an attempt to update the metadata that failed. We need to keep track of this
* to avoid retrying immediately.
*/
public synchronized void failedUpdate(long now) {
this.lastRefreshMs = now;
}
/**
* Propagate a fatal error which affects the ability to fetch metadata for the cluster.
* Two examples are authentication and unsupported version exceptions.
*
* @param exception The fatal exception
*/
public synchronized void fatalError(KafkaException exception) {
this.fatalException = exception;
}
/**
* @return The current metadata updateVersion
*/
public synchronized int updateVersion() {
return this.updateVersion;
}
/**
* The last time metadata was successfully updated.
*/
public synchronized long lastSuccessfulUpdate() {
return this.lastSuccessfulRefreshMs;
}
/**
* Close this metadata instance to indicate that metadata updates are no longer possible.
*/
@Override
public synchronized void close() {
this.isClosed = true;
}
/**
* Check if this metadata instance has been closed. See {@link #close()} for more information.
*
* @return True if this instance has been closed; false otherwise
*/
public synchronized boolean isClosed() {
return this.isClosed;
}
public synchronized MetadataRequestAndVersion newMetadataRequestAndVersion(long nowMs) {
MetadataRequest.Builder request = null;
boolean isPartialUpdate = false;
// Perform a partial update only if a full update hasn't been requested, and the last successful
// hasn't exceeded the metadata refresh time.
if (!this.needFullUpdate && this.lastSuccessfulRefreshMs + this.metadataExpireMs > nowMs) {
request = newMetadataRequestBuilderForNewTopics();
isPartialUpdate = true;
}
if (request == null) {
request = newMetadataRequestBuilder();
isPartialUpdate = false;
}
return new MetadataRequestAndVersion(request, requestVersion, isPartialUpdate);
}
/**
* Constructs and returns a metadata request builder for fetching cluster data and all active topics.
*
* @return the constructed non-null metadata builder
*/
protected MetadataRequest.Builder newMetadataRequestBuilder() {
return MetadataRequest.Builder.allTopics();
}
/**
* Constructs and returns a metadata request builder for fetching cluster data and any uncached topics,
* otherwise null if the functionality is not supported.
*
* @return the constructed metadata builder, or null if not supported
*/
protected MetadataRequest.Builder newMetadataRequestBuilderForNewTopics() {
return null;
}
protected boolean retainTopic(String topic, boolean isInternal, long nowMs) {
return true;
}
public static class MetadataRequestAndVersion {
public final MetadataRequest.Builder requestBuilder;
public final int requestVersion;
public final boolean isPartialUpdate;
private MetadataRequestAndVersion(MetadataRequest.Builder requestBuilder,
int requestVersion,
boolean isPartialUpdate) {
this.requestBuilder = requestBuilder;
this.requestVersion = requestVersion;
this.isPartialUpdate = isPartialUpdate;
}
}
/**
* Represents current leader state known in metadata. It is possible that we know the leader, but not the
* epoch if the metadata is received from a broker which does not support a sufficient Metadata API version.
* It is also possible that we know of the leader epoch, but not the leader when it is derived
* from an external source (e.g. a committed offset).
*/
public static class LeaderAndEpoch {
private static final LeaderAndEpoch NO_LEADER_OR_EPOCH = new LeaderAndEpoch(Optional.empty(), Optional.empty());
public final Optional<Node> leader;
public final Optional<Integer> epoch;
public LeaderAndEpoch(Optional<Node> leader, Optional<Integer> epoch) {
this.leader = Objects.requireNonNull(leader);
this.epoch = Objects.requireNonNull(epoch);
}
public static LeaderAndEpoch noLeaderOrEpoch() {
return NO_LEADER_OR_EPOCH;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
LeaderAndEpoch that = (LeaderAndEpoch) o;
if (!leader.equals(that.leader)) return false;
return epoch.equals(that.epoch);
}
@Override
public int hashCode() {
int result = leader.hashCode();
result = 31 * result + epoch.hashCode();
return result;
}
@Override
public String toString() {
return "LeaderAndEpoch{" +
"leader=" + leader +
", epoch=" + epoch.map(Number::toString).orElse("absent") +
'}';
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/MetadataCache.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.ClusterResource;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.Uuid;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.requests.MetadataResponse.PartitionMetadata;
import java.net.InetSocketAddress;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.function.BiPredicate;
import java.util.function.Predicate;
import java.util.stream.Collectors;
/**
* An internal mutable cache of nodes, topics, and partitions in the Kafka cluster. This keeps an up-to-date Cluster
* instance which is optimized for read access.
*/
public class MetadataCache {
private final String clusterId;
private final Map<Integer, Node> nodes;
private final Set<String> unauthorizedTopics;
private final Set<String> invalidTopics;
private final Set<String> internalTopics;
private final Node controller;
private final Map<TopicPartition, PartitionMetadata> metadataByPartition;
private final Map<String, Uuid> topicIds;
private Cluster clusterInstance;
MetadataCache(String clusterId,
Map<Integer, Node> nodes,
Collection<PartitionMetadata> partitions,
Set<String> unauthorizedTopics,
Set<String> invalidTopics,
Set<String> internalTopics,
Node controller,
Map<String, Uuid> topicIds) {
this(clusterId, nodes, partitions, unauthorizedTopics, invalidTopics, internalTopics, controller, topicIds, null);
}
private MetadataCache(String clusterId,
Map<Integer, Node> nodes,
Collection<PartitionMetadata> partitions,
Set<String> unauthorizedTopics,
Set<String> invalidTopics,
Set<String> internalTopics,
Node controller,
Map<String, Uuid> topicIds,
Cluster clusterInstance) {
this.clusterId = clusterId;
this.nodes = nodes;
this.unauthorizedTopics = unauthorizedTopics;
this.invalidTopics = invalidTopics;
this.internalTopics = internalTopics;
this.controller = controller;
this.topicIds = topicIds;
this.metadataByPartition = new HashMap<>(partitions.size());
for (PartitionMetadata p : partitions) {
this.metadataByPartition.put(p.topicPartition, p);
}
if (clusterInstance == null) {
computeClusterView();
} else {
this.clusterInstance = clusterInstance;
}
}
Optional<PartitionMetadata> partitionMetadata(TopicPartition topicPartition) {
return Optional.ofNullable(metadataByPartition.get(topicPartition));
}
Map<String, Uuid> topicIds() {
return topicIds;
}
Optional<Node> nodeById(int id) {
return Optional.ofNullable(nodes.get(id));
}
Cluster cluster() {
if (clusterInstance == null) {
throw new IllegalStateException("Cached Cluster instance should not be null, but was.");
} else {
return clusterInstance;
}
}
ClusterResource clusterResource() {
return new ClusterResource(clusterId);
}
/**
* Merges the metadata cache's contents with the provided metadata, returning a new metadata cache. The provided
* metadata is presumed to be more recent than the cache's metadata, and therefore all overlapping metadata will
* be overridden.
*
* @param newClusterId the new cluster Id
* @param newNodes the new set of nodes
* @param addPartitions partitions to add
* @param addUnauthorizedTopics unauthorized topics to add
* @param addInternalTopics internal topics to add
* @param newController the new controller node
* @param topicIds the mapping from topic name to topic ID from the MetadataResponse
* @param retainTopic returns whether a topic's metadata should be retained
* @return the merged metadata cache
*/
MetadataCache mergeWith(String newClusterId,
Map<Integer, Node> newNodes,
Collection<PartitionMetadata> addPartitions,
Set<String> addUnauthorizedTopics,
Set<String> addInvalidTopics,
Set<String> addInternalTopics,
Node newController,
Map<String, Uuid> topicIds,
BiPredicate<String, Boolean> retainTopic) {
Predicate<String> shouldRetainTopic = topic -> retainTopic.test(topic, internalTopics.contains(topic));
Map<TopicPartition, PartitionMetadata> newMetadataByPartition = new HashMap<>(addPartitions.size());
// We want the most recent topic ID. We start with the previous ID stored for retained topics and then
// update with newest information from the MetadataResponse. We always take the latest state, removing existing
// topic IDs if the latest state contains the topic name but not a topic ID.
Map<String, Uuid> newTopicIds = topicIds.entrySet().stream()
.filter(entry -> shouldRetainTopic.test(entry.getKey()))
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
for (PartitionMetadata partition : addPartitions) {
newMetadataByPartition.put(partition.topicPartition, partition);
Uuid id = topicIds.get(partition.topic());
if (id != null)
newTopicIds.put(partition.topic(), id);
else
// Remove if the latest metadata does not have a topic ID
newTopicIds.remove(partition.topic());
}
for (Map.Entry<TopicPartition, PartitionMetadata> entry : metadataByPartition.entrySet()) {
if (shouldRetainTopic.test(entry.getKey().topic())) {
newMetadataByPartition.putIfAbsent(entry.getKey(), entry.getValue());
}
}
Set<String> newUnauthorizedTopics = fillSet(addUnauthorizedTopics, unauthorizedTopics, shouldRetainTopic);
Set<String> newInvalidTopics = fillSet(addInvalidTopics, invalidTopics, shouldRetainTopic);
Set<String> newInternalTopics = fillSet(addInternalTopics, internalTopics, shouldRetainTopic);
return new MetadataCache(newClusterId, newNodes, newMetadataByPartition.values(), newUnauthorizedTopics,
newInvalidTopics, newInternalTopics, newController, newTopicIds);
}
/**
* Copies {@code baseSet} and adds all non-existent elements in {@code fillSet} such that {@code predicate} is true.
* In other words, all elements of {@code baseSet} will be contained in the result, with additional non-overlapping
* elements in {@code fillSet} where the predicate is true.
*
* @param baseSet the base elements for the resulting set
* @param fillSet elements to be filled into the resulting set
* @param predicate tested against the fill set to determine whether elements should be added to the base set
*/
private static <T> Set<T> fillSet(Set<T> baseSet, Set<T> fillSet, Predicate<T> predicate) {
Set<T> result = new HashSet<>(baseSet);
for (T element : fillSet) {
if (predicate.test(element)) {
result.add(element);
}
}
return result;
}
private void computeClusterView() {
List<PartitionInfo> partitionInfos = metadataByPartition.values()
.stream()
.map(metadata -> MetadataResponse.toPartitionInfo(metadata, nodes))
.collect(Collectors.toList());
this.clusterInstance = new Cluster(clusterId, nodes.values(), partitionInfos, unauthorizedTopics,
invalidTopics, internalTopics, controller, topicIds);
}
static MetadataCache bootstrap(List<InetSocketAddress> addresses) {
Map<Integer, Node> nodes = new HashMap<>();
int nodeId = -1;
for (InetSocketAddress address : addresses) {
nodes.put(nodeId, new Node(nodeId, address.getHostString(), address.getPort()));
nodeId--;
}
return new MetadataCache(null, nodes, Collections.emptyList(),
Collections.emptySet(), Collections.emptySet(), Collections.emptySet(),
null, Collections.emptyMap(), Cluster.bootstrap(addresses));
}
static MetadataCache empty() {
return new MetadataCache(null, Collections.emptyMap(), Collections.emptyList(),
Collections.emptySet(), Collections.emptySet(), Collections.emptySet(), null, Collections.emptyMap(), Cluster.empty());
}
@Override
public String toString() {
return "MetadataCache{" +
"clusterId='" + clusterId + '\'' +
", nodes=" + nodes +
", partitions=" + metadataByPartition.values() +
", controller=" + controller +
'}';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/MetadataUpdater.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.requests.RequestHeader;
import java.io.Closeable;
import java.util.List;
import java.util.Optional;
/**
* The interface used by `NetworkClient` to request cluster metadata info to be updated and to retrieve the cluster nodes
* from such metadata. This is an internal class.
* <p>
* This class is not thread-safe!
*/
public interface MetadataUpdater extends Closeable {
/**
* Gets the current cluster info without blocking.
*/
List<Node> fetchNodes();
/**
* Returns true if an update to the cluster metadata info is due.
*/
boolean isUpdateDue(long now);
/**
* Starts a cluster metadata update if needed and possible. Returns the time until the metadata update (which would
* be 0 if an update has been started as a result of this call).
*
* If the implementation relies on `NetworkClient` to send requests, `handleSuccessfulResponse` will be
* invoked after the metadata response is received.
*
* The semantics of `needed` and `possible` are implementation-dependent and may take into account a number of
* factors like node availability, how long since the last metadata update, etc.
*/
long maybeUpdate(long now);
/**
* Handle a server disconnect.
*
* This provides a mechanism for the `MetadataUpdater` implementation to use the NetworkClient instance for its own
* requests with special handling for disconnections of such requests.
*
* @param now Current time in milliseconds
* @param nodeId The id of the node that disconnected
* @param maybeAuthException Optional authentication error
*/
void handleServerDisconnect(long now, String nodeId, Optional<AuthenticationException> maybeAuthException);
/**
* Handle a metadata request failure.
*
* @param now Current time in milliseconds
* @param maybeFatalException Optional fatal error (e.g. {@link UnsupportedVersionException})
*/
void handleFailedRequest(long now, Optional<KafkaException> maybeFatalException);
/**
* Handle responses for metadata requests.
*
* This provides a mechanism for the `MetadataUpdater` implementation to use the NetworkClient instance for its own
* requests with special handling for completed receives of such requests.
*/
void handleSuccessfulResponse(RequestHeader requestHeader, long now, MetadataResponse metadataResponse);
/**
* Close this updater.
*/
@Override
void close();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/NetworkClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.AuthenticationException;
import org.apache.kafka.common.errors.DisconnectException;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.network.ChannelState;
import org.apache.kafka.common.network.NetworkSend;
import org.apache.kafka.common.network.NetworkReceive;
import org.apache.kafka.common.network.Selectable;
import org.apache.kafka.common.network.Send;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.protocol.Errors;
import org.apache.kafka.common.protocol.types.SchemaException;
import org.apache.kafka.common.requests.AbstractRequest;
import org.apache.kafka.common.requests.AbstractResponse;
import org.apache.kafka.common.requests.ApiVersionsRequest;
import org.apache.kafka.common.requests.ApiVersionsResponse;
import org.apache.kafka.common.requests.CorrelationIdMismatchException;
import org.apache.kafka.common.requests.MetadataRequest;
import org.apache.kafka.common.requests.MetadataResponse;
import org.apache.kafka.common.requests.RequestHeader;
import org.apache.kafka.common.security.authenticator.SaslClientAuthenticator;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Random;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;
/**
* A network client for asynchronous request/response network i/o. This is an internal class used to implement the
* user-facing producer and consumer clients.
* <p>
* This class is not thread-safe!
*/
public class NetworkClient implements KafkaClient {
private enum State {
ACTIVE,
CLOSING,
CLOSED
}
private final Logger log;
/* the selector used to perform network i/o */
private final Selectable selector;
private final MetadataUpdater metadataUpdater;
private final Random randOffset;
/* the state of each node's connection */
private final ClusterConnectionStates connectionStates;
/* the set of requests currently being sent or awaiting a response */
private final InFlightRequests inFlightRequests;
/* the socket send buffer size in bytes */
private final int socketSendBuffer;
/* the socket receive size buffer in bytes */
private final int socketReceiveBuffer;
/* the client id used to identify this client in requests to the server */
private final String clientId;
/* the current correlation id to use when sending requests to servers */
private int correlation;
/* default timeout for individual requests to await acknowledgement from servers */
private final int defaultRequestTimeoutMs;
/* time in ms to wait before retrying to create connection to a server */
private final long reconnectBackoffMs;
private final Time time;
/**
* True if we should send an ApiVersionRequest when first connecting to a broker.
*/
private final boolean discoverBrokerVersions;
private final ApiVersions apiVersions;
private final Map<String, ApiVersionsRequest.Builder> nodesNeedingApiVersionsFetch = new HashMap<>();
private final List<ClientResponse> abortedSends = new LinkedList<>();
private final Sensor throttleTimeSensor;
private final AtomicReference<State> state;
public NetworkClient(Selectable selector,
Metadata metadata,
String clientId,
int maxInFlightRequestsPerConnection,
long reconnectBackoffMs,
long reconnectBackoffMax,
int socketSendBuffer,
int socketReceiveBuffer,
int defaultRequestTimeoutMs,
long connectionSetupTimeoutMs,
long connectionSetupTimeoutMaxMs,
Time time,
boolean discoverBrokerVersions,
ApiVersions apiVersions,
LogContext logContext) {
this(selector,
metadata,
clientId,
maxInFlightRequestsPerConnection,
reconnectBackoffMs,
reconnectBackoffMax,
socketSendBuffer,
socketReceiveBuffer,
defaultRequestTimeoutMs,
connectionSetupTimeoutMs,
connectionSetupTimeoutMaxMs,
time,
discoverBrokerVersions,
apiVersions,
null,
logContext);
}
public NetworkClient(Selectable selector,
Metadata metadata,
String clientId,
int maxInFlightRequestsPerConnection,
long reconnectBackoffMs,
long reconnectBackoffMax,
int socketSendBuffer,
int socketReceiveBuffer,
int defaultRequestTimeoutMs,
long connectionSetupTimeoutMs,
long connectionSetupTimeoutMaxMs,
Time time,
boolean discoverBrokerVersions,
ApiVersions apiVersions,
Sensor throttleTimeSensor,
LogContext logContext) {
this(null,
metadata,
selector,
clientId,
maxInFlightRequestsPerConnection,
reconnectBackoffMs,
reconnectBackoffMax,
socketSendBuffer,
socketReceiveBuffer,
defaultRequestTimeoutMs,
connectionSetupTimeoutMs,
connectionSetupTimeoutMaxMs,
time,
discoverBrokerVersions,
apiVersions,
throttleTimeSensor,
logContext,
new DefaultHostResolver());
}
public NetworkClient(Selectable selector,
MetadataUpdater metadataUpdater,
String clientId,
int maxInFlightRequestsPerConnection,
long reconnectBackoffMs,
long reconnectBackoffMax,
int socketSendBuffer,
int socketReceiveBuffer,
int defaultRequestTimeoutMs,
long connectionSetupTimeoutMs,
long connectionSetupTimeoutMaxMs,
Time time,
boolean discoverBrokerVersions,
ApiVersions apiVersions,
LogContext logContext) {
this(metadataUpdater,
null,
selector,
clientId,
maxInFlightRequestsPerConnection,
reconnectBackoffMs,
reconnectBackoffMax,
socketSendBuffer,
socketReceiveBuffer,
defaultRequestTimeoutMs,
connectionSetupTimeoutMs,
connectionSetupTimeoutMaxMs,
time,
discoverBrokerVersions,
apiVersions,
null,
logContext,
new DefaultHostResolver());
}
public NetworkClient(MetadataUpdater metadataUpdater,
Metadata metadata,
Selectable selector,
String clientId,
int maxInFlightRequestsPerConnection,
long reconnectBackoffMs,
long reconnectBackoffMax,
int socketSendBuffer,
int socketReceiveBuffer,
int defaultRequestTimeoutMs,
long connectionSetupTimeoutMs,
long connectionSetupTimeoutMaxMs,
Time time,
boolean discoverBrokerVersions,
ApiVersions apiVersions,
Sensor throttleTimeSensor,
LogContext logContext,
HostResolver hostResolver) {
/* It would be better if we could pass `DefaultMetadataUpdater` from the public constructor, but it's not
* possible because `DefaultMetadataUpdater` is an inner class and it can only be instantiated after the
* super constructor is invoked.
*/
if (metadataUpdater == null) {
if (metadata == null)
throw new IllegalArgumentException("`metadata` must not be null");
this.metadataUpdater = new DefaultMetadataUpdater(metadata);
} else {
this.metadataUpdater = metadataUpdater;
}
this.selector = selector;
this.clientId = clientId;
this.inFlightRequests = new InFlightRequests(maxInFlightRequestsPerConnection);
this.connectionStates = new ClusterConnectionStates(
reconnectBackoffMs, reconnectBackoffMax,
connectionSetupTimeoutMs, connectionSetupTimeoutMaxMs, logContext, hostResolver);
this.socketSendBuffer = socketSendBuffer;
this.socketReceiveBuffer = socketReceiveBuffer;
this.correlation = 0;
this.randOffset = new Random();
this.defaultRequestTimeoutMs = defaultRequestTimeoutMs;
this.reconnectBackoffMs = reconnectBackoffMs;
this.time = time;
this.discoverBrokerVersions = discoverBrokerVersions;
this.apiVersions = apiVersions;
this.throttleTimeSensor = throttleTimeSensor;
this.log = logContext.logger(NetworkClient.class);
this.state = new AtomicReference<>(State.ACTIVE);
}
/**
* Begin connecting to the given node, return true if we are already connected and ready to send to that node.
*
* @param node The node to check
* @param now The current timestamp
* @return True if we are ready to send to the given node
*/
@Override
public boolean ready(Node node, long now) {
if (node.isEmpty())
throw new IllegalArgumentException("Cannot connect to empty node " + node);
if (isReady(node, now))
return true;
if (connectionStates.canConnect(node.idString(), now))
// if we are interested in sending to a node and we don't have a connection to it, initiate one
initiateConnect(node, now);
return false;
}
// Visible for testing
boolean canConnect(Node node, long now) {
return connectionStates.canConnect(node.idString(), now);
}
/**
* Disconnects the connection to a particular node, if there is one.
* Any pending ClientRequests for this connection will receive disconnections.
*
* @param nodeId The id of the node
*/
@Override
public void disconnect(String nodeId) {
if (connectionStates.isDisconnected(nodeId)) {
log.debug("Client requested disconnect from node {}, which is already disconnected", nodeId);
return;
}
log.info("Client requested disconnect from node {}", nodeId);
selector.close(nodeId);
long now = time.milliseconds();
cancelInFlightRequests(nodeId, now, abortedSends, false);
connectionStates.disconnected(nodeId, now);
}
private void cancelInFlightRequests(String nodeId,
long now,
Collection<ClientResponse> responses,
boolean timedOut) {
Iterable<InFlightRequest> inFlightRequests = this.inFlightRequests.clearAll(nodeId);
for (InFlightRequest request : inFlightRequests) {
if (log.isDebugEnabled()) {
log.debug("Cancelled in-flight {} request with correlation id {} due to node {} being disconnected " +
"(elapsed time since creation: {}ms, elapsed time since send: {}ms, request timeout: {}ms): {}",
request.header.apiKey(), request.header.correlationId(), nodeId,
request.timeElapsedSinceCreateMs(now), request.timeElapsedSinceSendMs(now),
request.requestTimeoutMs, request.request);
} else {
log.info("Cancelled in-flight {} request with correlation id {} due to node {} being disconnected " +
"(elapsed time since creation: {}ms, elapsed time since send: {}ms, request timeout: {}ms)",
request.header.apiKey(), request.header.correlationId(), nodeId,
request.timeElapsedSinceCreateMs(now), request.timeElapsedSinceSendMs(now),
request.requestTimeoutMs);
}
if (!request.isInternalRequest) {
if (responses != null) {
ClientResponse clientResponse;
if (timedOut)
clientResponse = request.timedOut(now);
else
clientResponse = request.disconnected(now);
responses.add(clientResponse);
}
} else if (request.header.apiKey() == ApiKeys.METADATA) {
metadataUpdater.handleFailedRequest(now, Optional.empty());
}
}
}
/**
* Closes the connection to a particular node (if there is one).
* All requests on the connection will be cleared. ClientRequest callbacks will not be invoked
* for the cleared requests, nor will they be returned from poll().
*
* @param nodeId The id of the node
*/
@Override
public void close(String nodeId) {
log.info("Client requested connection close from node {}", nodeId);
selector.close(nodeId);
long now = time.milliseconds();
cancelInFlightRequests(nodeId, now, null, false);
connectionStates.remove(nodeId);
}
/**
* Returns the number of milliseconds to wait, based on the connection state, before attempting to send data. When
* disconnected, this respects the reconnect backoff time. When connecting or connected, this handles slow/stalled
* connections.
*
* @param node The node to check
* @param now The current timestamp
* @return The number of milliseconds to wait.
*/
@Override
public long connectionDelay(Node node, long now) {
return connectionStates.connectionDelay(node.idString(), now);
}
// Return the remaining throttling delay in milliseconds if throttling is in progress. Return 0, otherwise.
// This is for testing.
public long throttleDelayMs(Node node, long now) {
return connectionStates.throttleDelayMs(node.idString(), now);
}
/**
* Return the poll delay in milliseconds based on both connection and throttle delay.
* @param node the connection to check
* @param now the current time in ms
*/
@Override
public long pollDelayMs(Node node, long now) {
return connectionStates.pollDelayMs(node.idString(), now);
}
/**
* Check if the connection of the node has failed, based on the connection state. Such connection failure are
* usually transient and can be resumed in the next {@link #ready(org.apache.kafka.common.Node, long)} }
* call, but there are cases where transient failures needs to be caught and re-acted upon.
*
* @param node the node to check
* @return true iff the connection has failed and the node is disconnected
*/
@Override
public boolean connectionFailed(Node node) {
return connectionStates.isDisconnected(node.idString());
}
/**
* Check if authentication to this node has failed, based on the connection state. Authentication failures are
* propagated without any retries.
*
* @param node the node to check
* @return an AuthenticationException iff authentication has failed, null otherwise
*/
@Override
public AuthenticationException authenticationException(Node node) {
return connectionStates.authenticationException(node.idString());
}
/**
* Check if the node with the given id is ready to send more requests.
*
* @param node The node
* @param now The current time in ms
* @return true if the node is ready
*/
@Override
public boolean isReady(Node node, long now) {
// if we need to update our metadata now declare all requests unready to make metadata requests first
// priority
return !metadataUpdater.isUpdateDue(now) && canSendRequest(node.idString(), now);
}
/**
* Are we connected and ready and able to send more requests to the given connection?
*
* @param node The node
* @param now the current timestamp
*/
private boolean canSendRequest(String node, long now) {
return connectionStates.isReady(node, now) && selector.isChannelReady(node) &&
inFlightRequests.canSendMore(node);
}
/**
* Queue up the given request for sending. Requests can only be sent out to ready nodes.
* @param request The request
* @param now The current timestamp
*/
@Override
public void send(ClientRequest request, long now) {
doSend(request, false, now);
}
// package-private for testing
void sendInternalMetadataRequest(MetadataRequest.Builder builder, String nodeConnectionId, long now) {
ClientRequest clientRequest = newClientRequest(nodeConnectionId, builder, now, true);
doSend(clientRequest, true, now);
}
private void doSend(ClientRequest clientRequest, boolean isInternalRequest, long now) {
ensureActive();
String nodeId = clientRequest.destination();
if (!isInternalRequest) {
// If this request came from outside the NetworkClient, validate
// that we can send data. If the request is internal, we trust
// that internal code has done this validation. Validation
// will be slightly different for some internal requests (for
// example, ApiVersionsRequests can be sent prior to being in
// READY state.)
if (!canSendRequest(nodeId, now))
throw new IllegalStateException("Attempt to send a request to node " + nodeId + " which is not ready.");
}
AbstractRequest.Builder<?> builder = clientRequest.requestBuilder();
try {
NodeApiVersions versionInfo = apiVersions.get(nodeId);
short version;
// Note: if versionInfo is null, we have no server version information. This would be
// the case when sending the initial ApiVersionRequest which fetches the version
// information itself. It is also the case when discoverBrokerVersions is set to false.
if (versionInfo == null) {
version = builder.latestAllowedVersion();
if (discoverBrokerVersions && log.isTraceEnabled())
log.trace("No version information found when sending {} with correlation id {} to node {}. " +
"Assuming version {}.", clientRequest.apiKey(), clientRequest.correlationId(), nodeId, version);
} else {
version = versionInfo.latestUsableVersion(clientRequest.apiKey(), builder.oldestAllowedVersion(),
builder.latestAllowedVersion());
}
// The call to build may also throw UnsupportedVersionException, if there are essential
// fields that cannot be represented in the chosen version.
doSend(clientRequest, isInternalRequest, now, builder.build(version));
} catch (UnsupportedVersionException unsupportedVersionException) {
// If the version is not supported, skip sending the request over the wire.
// Instead, simply add it to the local queue of aborted requests.
log.debug("Version mismatch when attempting to send {} with correlation id {} to {}", builder,
clientRequest.correlationId(), clientRequest.destination(), unsupportedVersionException);
ClientResponse clientResponse = new ClientResponse(clientRequest.makeHeader(builder.latestAllowedVersion()),
clientRequest.callback(), clientRequest.destination(), now, now,
false, unsupportedVersionException, null, null);
if (!isInternalRequest)
abortedSends.add(clientResponse);
else if (clientRequest.apiKey() == ApiKeys.METADATA)
metadataUpdater.handleFailedRequest(now, Optional.of(unsupportedVersionException));
}
}
private void doSend(ClientRequest clientRequest, boolean isInternalRequest, long now, AbstractRequest request) {
String destination = clientRequest.destination();
RequestHeader header = clientRequest.makeHeader(request.version());
if (log.isDebugEnabled()) {
log.debug("Sending {} request with header {} and timeout {} to node {}: {}",
clientRequest.apiKey(), header, clientRequest.requestTimeoutMs(), destination, request);
}
Send send = request.toSend(header);
InFlightRequest inFlightRequest = new InFlightRequest(
clientRequest,
header,
isInternalRequest,
request,
send,
now);
this.inFlightRequests.add(inFlightRequest);
selector.send(new NetworkSend(clientRequest.destination(), send));
}
/**
* Do actual reads and writes to sockets.
*
* @param timeout The maximum amount of time to wait (in ms) for responses if there are none immediately,
* must be non-negative. The actual timeout will be the minimum of timeout, request timeout and
* metadata timeout
* @param now The current time in milliseconds
* @return The list of responses received
*/
@Override
public List<ClientResponse> poll(long timeout, long now) {
ensureActive();
if (!abortedSends.isEmpty()) {
// If there are aborted sends because of unsupported version exceptions or disconnects,
// handle them immediately without waiting for Selector#poll.
List<ClientResponse> responses = new ArrayList<>();
handleAbortedSends(responses);
completeResponses(responses);
return responses;
}
long metadataTimeout = metadataUpdater.maybeUpdate(now);
try {
this.selector.poll(Utils.min(timeout, metadataTimeout, defaultRequestTimeoutMs));
} catch (IOException e) {
log.error("Unexpected error during I/O", e);
}
// process completed actions
long updatedNow = this.time.milliseconds();
List<ClientResponse> responses = new ArrayList<>();
handleCompletedSends(responses, updatedNow);
handleCompletedReceives(responses, updatedNow);
handleDisconnections(responses, updatedNow);
handleConnections();
handleInitiateApiVersionRequests(updatedNow);
handleTimedOutConnections(responses, updatedNow);
handleTimedOutRequests(responses, updatedNow);
completeResponses(responses);
return responses;
}
private void completeResponses(List<ClientResponse> responses) {
for (ClientResponse response : responses) {
try {
response.onComplete();
} catch (Exception e) {
log.error("Uncaught error in request completion:", e);
}
}
}
/**
* Get the number of in-flight requests
*/
@Override
public int inFlightRequestCount() {
return this.inFlightRequests.count();
}
@Override
public boolean hasInFlightRequests() {
return !this.inFlightRequests.isEmpty();
}
/**
* Get the number of in-flight requests for a given node
*/
@Override
public int inFlightRequestCount(String node) {
return this.inFlightRequests.count(node);
}
@Override
public boolean hasInFlightRequests(String node) {
return !this.inFlightRequests.isEmpty(node);
}
@Override
public boolean hasReadyNodes(long now) {
return connectionStates.hasReadyNodes(now);
}
/**
* Interrupt the client if it is blocked waiting on I/O.
*/
@Override
public void wakeup() {
this.selector.wakeup();
}
@Override
public void initiateClose() {
if (state.compareAndSet(State.ACTIVE, State.CLOSING)) {
wakeup();
}
}
@Override
public boolean active() {
return state.get() == State.ACTIVE;
}
private void ensureActive() {
if (!active())
throw new DisconnectException("NetworkClient is no longer active, state is " + state);
}
/**
* Close the network client
*/
@Override
public void close() {
state.compareAndSet(State.ACTIVE, State.CLOSING);
if (state.compareAndSet(State.CLOSING, State.CLOSED)) {
this.selector.close();
this.metadataUpdater.close();
} else {
log.warn("Attempting to close NetworkClient that has already been closed.");
}
}
/**
* Choose the node with the fewest outstanding requests which is at least eligible for connection. This method will
* prefer a node with an existing connection, but will potentially choose a node for which we don't yet have a
* connection if all existing connections are in use. If no connection exists, this method will prefer a node
* with least recent connection attempts. This method will never choose a node for which there is no
* existing connection and from which we have disconnected within the reconnect backoff period, or an active
* connection which is being throttled.
*
* @return The node with the fewest in-flight requests.
*/
@Override
public Node leastLoadedNode(long now) {
List<Node> nodes = this.metadataUpdater.fetchNodes();
if (nodes.isEmpty())
throw new IllegalStateException("There are no nodes in the Kafka cluster");
int inflight = Integer.MAX_VALUE;
Node foundConnecting = null;
Node foundCanConnect = null;
Node foundReady = null;
int offset = this.randOffset.nextInt(nodes.size());
for (int i = 0; i < nodes.size(); i++) {
int idx = (offset + i) % nodes.size();
Node node = nodes.get(idx);
if (canSendRequest(node.idString(), now)) {
int currInflight = this.inFlightRequests.count(node.idString());
if (currInflight == 0) {
// if we find an established connection with no in-flight requests we can stop right away
log.trace("Found least loaded node {} connected with no in-flight requests", node);
return node;
} else if (currInflight < inflight) {
// otherwise if this is the best we have found so far, record that
inflight = currInflight;
foundReady = node;
}
} else if (connectionStates.isPreparingConnection(node.idString())) {
foundConnecting = node;
} else if (canConnect(node, now)) {
if (foundCanConnect == null ||
this.connectionStates.lastConnectAttemptMs(foundCanConnect.idString()) >
this.connectionStates.lastConnectAttemptMs(node.idString())) {
foundCanConnect = node;
}
} else {
log.trace("Removing node {} from least loaded node selection since it is neither ready " +
"for sending or connecting", node);
}
}
// We prefer established connections if possible. Otherwise, we will wait for connections
// which are being established before connecting to new nodes.
if (foundReady != null) {
log.trace("Found least loaded node {} with {} inflight requests", foundReady, inflight);
return foundReady;
} else if (foundConnecting != null) {
log.trace("Found least loaded connecting node {}", foundConnecting);
return foundConnecting;
} else if (foundCanConnect != null) {
log.trace("Found least loaded node {} with no active connection", foundCanConnect);
return foundCanConnect;
} else {
log.trace("Least loaded node selection failed to find an available node");
return null;
}
}
public static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader) {
try {
return AbstractResponse.parseResponse(responseBuffer, requestHeader);
} catch (BufferUnderflowException e) {
throw new SchemaException("Buffer underflow while parsing response for request with header " + requestHeader, e);
} catch (CorrelationIdMismatchException e) {
if (SaslClientAuthenticator.isReserved(requestHeader.correlationId())
&& !SaslClientAuthenticator.isReserved(e.responseCorrelationId()))
throw new SchemaException("The response is unrelated to Sasl request since its correlation id is "
+ e.responseCorrelationId() + " and the reserved range for Sasl request is [ "
+ SaslClientAuthenticator.MIN_RESERVED_CORRELATION_ID + ","
+ SaslClientAuthenticator.MAX_RESERVED_CORRELATION_ID + "]");
else {
throw e;
}
}
}
/**
* Post process disconnection of a node
*
* @param responses The list of responses to update
* @param nodeId Id of the node to be disconnected
* @param now The current time
* @param disconnectState The state of the disconnected channel
*/
private void processDisconnection(List<ClientResponse> responses,
String nodeId,
long now,
ChannelState disconnectState) {
processDisconnection(responses, nodeId, now, disconnectState, false);
}
/**
* Post process disconnection of a node
*
* @param responses The list of responses to update
* @param nodeId Id of the node to be disconnected
* @param now The current time
*/
private void processTimeoutDisconnection(List<ClientResponse> responses, String nodeId, long now) {
processDisconnection(responses, nodeId, now, ChannelState.LOCAL_CLOSE, true);
}
/**
* Post process disconnection of a node
*
* @param responses The list of responses to update
* @param nodeId Id of the node to be disconnected
* @param now The current time
* @param disconnectState The state of the disconnected channel
* @param timedOut {@code true} if the connection is disconnected because of a timeout (request or connection)
*/
private void processDisconnection(List<ClientResponse> responses,
String nodeId,
long now,
ChannelState disconnectState,
boolean timedOut) {
connectionStates.disconnected(nodeId, now);
apiVersions.remove(nodeId);
nodesNeedingApiVersionsFetch.remove(nodeId);
switch (disconnectState.state()) {
case AUTHENTICATION_FAILED:
AuthenticationException exception = disconnectState.exception();
connectionStates.authenticationFailed(nodeId, now, exception);
log.error("Connection to node {} ({}) failed authentication due to: {}", nodeId,
disconnectState.remoteAddress(), exception.getMessage());
break;
case AUTHENTICATE:
log.warn("Connection to node {} ({}) terminated during authentication. This may happen " +
"due to any of the following reasons: (1) Authentication failed due to invalid " +
"credentials with brokers older than 1.0.0, (2) Firewall blocking Kafka TLS " +
"traffic (eg it may only allow HTTPS traffic), (3) Transient network issue.",
nodeId, disconnectState.remoteAddress());
break;
case NOT_CONNECTED:
log.warn("Connection to node {} ({}) could not be established. Broker may not be available.", nodeId, disconnectState.remoteAddress());
break;
default:
break; // Disconnections in other states are logged at debug level in Selector
}
cancelInFlightRequests(nodeId, now, responses, timedOut);
metadataUpdater.handleServerDisconnect(now, nodeId, Optional.ofNullable(disconnectState.exception()));
}
/**
* Iterate over all the inflight requests and expire any requests that have exceeded the configured requestTimeout.
* The connection to the node associated with the request will be terminated and will be treated as a disconnection.
*
* @param responses The list of responses to update
* @param now The current time
*/
private void handleTimedOutRequests(List<ClientResponse> responses, long now) {
List<String> nodeIds = this.inFlightRequests.nodesWithTimedOutRequests(now);
for (String nodeId : nodeIds) {
// close connection to the node
this.selector.close(nodeId);
log.info("Disconnecting from node {} due to request timeout.", nodeId);
processTimeoutDisconnection(responses, nodeId, now);
}
}
private void handleAbortedSends(List<ClientResponse> responses) {
responses.addAll(abortedSends);
abortedSends.clear();
}
/**
* Handle socket channel connection timeout. The timeout will hit iff a connection
* stays at the ConnectionState.CONNECTING state longer than the timeout value,
* as indicated by ClusterConnectionStates.NodeConnectionState.
*
* @param responses The list of responses to update
* @param now The current time
*/
private void handleTimedOutConnections(List<ClientResponse> responses, long now) {
List<String> nodes = connectionStates.nodesWithConnectionSetupTimeout(now);
for (String nodeId : nodes) {
this.selector.close(nodeId);
log.info(
"Disconnecting from node {} due to socket connection setup timeout. " +
"The timeout value is {} ms.",
nodeId,
connectionStates.connectionSetupTimeoutMs(nodeId));
processTimeoutDisconnection(responses, nodeId, now);
}
}
/**
* Handle any completed request send. In particular if no response is expected consider the request complete.
*
* @param responses The list of responses to update
* @param now The current time
*/
private void handleCompletedSends(List<ClientResponse> responses, long now) {
// if no response is expected then when the send is completed, return it
for (NetworkSend send : this.selector.completedSends()) {
InFlightRequest request = this.inFlightRequests.lastSent(send.destinationId());
if (!request.expectResponse) {
this.inFlightRequests.completeLastSent(send.destinationId());
responses.add(request.completed(null, now));
}
}
}
/**
* If a response from a node includes a non-zero throttle delay and client-side throttling has been enabled for
* the connection to the node, throttle the connection for the specified delay.
*
* @param response the response
* @param apiVersion the API version of the response
* @param nodeId the id of the node
* @param now The current time
*/
private void maybeThrottle(AbstractResponse response, short apiVersion, String nodeId, long now) {
int throttleTimeMs = response.throttleTimeMs();
if (throttleTimeMs > 0 && response.shouldClientThrottle(apiVersion)) {
connectionStates.throttle(nodeId, now + throttleTimeMs);
log.trace("Connection to node {} is throttled for {} ms until timestamp {}", nodeId, throttleTimeMs,
now + throttleTimeMs);
}
}
/**
* Handle any completed receives and update the response list with the responses received.
*
* @param responses The list of responses to update
* @param now The current time
*/
private void handleCompletedReceives(List<ClientResponse> responses, long now) {
for (NetworkReceive receive : this.selector.completedReceives()) {
String source = receive.source();
InFlightRequest req = inFlightRequests.completeNext(source);
AbstractResponse response = parseResponse(receive.payload(), req.header);
if (throttleTimeSensor != null)
throttleTimeSensor.record(response.throttleTimeMs(), now);
if (log.isDebugEnabled()) {
log.debug("Received {} response from node {} for request with header {}: {}",
req.header.apiKey(), req.destination, req.header, response);
}
// If the received response includes a throttle delay, throttle the connection.
maybeThrottle(response, req.header.apiVersion(), req.destination, now);
if (req.isInternalRequest && response instanceof MetadataResponse)
metadataUpdater.handleSuccessfulResponse(req.header, now, (MetadataResponse) response);
else if (req.isInternalRequest && response instanceof ApiVersionsResponse)
handleApiVersionsResponse(responses, req, now, (ApiVersionsResponse) response);
else
responses.add(req.completed(response, now));
}
}
private void handleApiVersionsResponse(List<ClientResponse> responses,
InFlightRequest req, long now, ApiVersionsResponse apiVersionsResponse) {
final String node = req.destination;
if (apiVersionsResponse.data().errorCode() != Errors.NONE.code()) {
if (req.request.version() == 0 || apiVersionsResponse.data().errorCode() != Errors.UNSUPPORTED_VERSION.code()) {
log.warn("Received error {} from node {} when making an ApiVersionsRequest with correlation id {}. Disconnecting.",
Errors.forCode(apiVersionsResponse.data().errorCode()), node, req.header.correlationId());
this.selector.close(node);
processDisconnection(responses, node, now, ChannelState.LOCAL_CLOSE);
} else {
// Starting from Apache Kafka 2.4, ApiKeys field is populated with the supported versions of
// the ApiVersionsRequest when an UNSUPPORTED_VERSION error is returned.
// If not provided, the client falls back to version 0.
short maxApiVersion = 0;
if (apiVersionsResponse.data().apiKeys().size() > 0) {
ApiVersion apiVersion = apiVersionsResponse.data().apiKeys().find(ApiKeys.API_VERSIONS.id);
if (apiVersion != null) {
maxApiVersion = apiVersion.maxVersion();
}
}
nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder(maxApiVersion));
}
return;
}
NodeApiVersions nodeVersionInfo = new NodeApiVersions(
apiVersionsResponse.data().apiKeys(),
apiVersionsResponse.data().supportedFeatures(),
apiVersionsResponse.data().zkMigrationReady());
apiVersions.update(node, nodeVersionInfo);
this.connectionStates.ready(node);
log.debug("Node {} has finalized features epoch: {}, finalized features: {}, supported features: {}, ZK migration ready: {}, API versions: {}.",
node, apiVersionsResponse.data().finalizedFeaturesEpoch(), apiVersionsResponse.data().finalizedFeatures(),
apiVersionsResponse.data().supportedFeatures(), apiVersionsResponse.data().zkMigrationReady(), nodeVersionInfo);
}
/**
* Handle any disconnected connections
*
* @param responses The list of responses that completed with the disconnection
* @param now The current time
*/
private void handleDisconnections(List<ClientResponse> responses, long now) {
for (Map.Entry<String, ChannelState> entry : this.selector.disconnected().entrySet()) {
String node = entry.getKey();
log.info("Node {} disconnected.", node);
processDisconnection(responses, node, now, entry.getValue());
}
}
/**
* Record any newly completed connections
*/
private void handleConnections() {
for (String node : this.selector.connected()) {
// We are now connected. Note that we might not still be able to send requests. For instance,
// if SSL is enabled, the SSL handshake happens after the connection is established.
// Therefore, it is still necessary to check isChannelReady before attempting to send on this
// connection.
if (discoverBrokerVersions) {
nodesNeedingApiVersionsFetch.put(node, new ApiVersionsRequest.Builder());
log.debug("Completed connection to node {}. Fetching API versions.", node);
} else {
this.connectionStates.ready(node);
log.debug("Completed connection to node {}. Ready.", node);
}
}
}
private void handleInitiateApiVersionRequests(long now) {
Iterator<Map.Entry<String, ApiVersionsRequest.Builder>> iter = nodesNeedingApiVersionsFetch.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry<String, ApiVersionsRequest.Builder> entry = iter.next();
String node = entry.getKey();
if (selector.isChannelReady(node) && inFlightRequests.canSendMore(node)) {
log.debug("Initiating API versions fetch from node {}.", node);
// We transition the connection to the CHECKING_API_VERSIONS state only when
// the ApiVersionsRequest is queued up to be sent out. Without this, the client
// could remain in the CHECKING_API_VERSIONS state forever if the channel does
// not before ready.
this.connectionStates.checkingApiVersions(node);
ApiVersionsRequest.Builder apiVersionRequestBuilder = entry.getValue();
ClientRequest clientRequest = newClientRequest(node, apiVersionRequestBuilder, now, true);
doSend(clientRequest, true, now);
iter.remove();
}
}
}
/**
* Initiate a connection to the given node
* @param node the node to connect to
* @param now current time in epoch milliseconds
*/
private void initiateConnect(Node node, long now) {
String nodeConnectionId = node.idString();
try {
connectionStates.connecting(nodeConnectionId, now, node.host());
InetAddress address = connectionStates.currentAddress(nodeConnectionId);
log.debug("Initiating connection to node {} using address {}", node, address);
selector.connect(nodeConnectionId,
new InetSocketAddress(address, node.port()),
this.socketSendBuffer,
this.socketReceiveBuffer);
} catch (IOException e) {
log.warn("Error connecting to node {}", node, e);
// Attempt failed, we'll try again after the backoff
connectionStates.disconnected(nodeConnectionId, now);
// Notify metadata updater of the connection failure
metadataUpdater.handleServerDisconnect(now, nodeConnectionId, Optional.empty());
}
}
class DefaultMetadataUpdater implements MetadataUpdater {
/* the current cluster metadata */
private final Metadata metadata;
// Defined if there is a request in progress, null otherwise
private InProgressData inProgress;
DefaultMetadataUpdater(Metadata metadata) {
this.metadata = metadata;
this.inProgress = null;
}
@Override
public List<Node> fetchNodes() {
return metadata.fetch().nodes();
}
@Override
public boolean isUpdateDue(long now) {
return !hasFetchInProgress() && this.metadata.timeToNextUpdate(now) == 0;
}
private boolean hasFetchInProgress() {
return inProgress != null;
}
@Override
public long maybeUpdate(long now) {
// should we update our metadata?
long timeToNextMetadataUpdate = metadata.timeToNextUpdate(now);
long waitForMetadataFetch = hasFetchInProgress() ? defaultRequestTimeoutMs : 0;
long metadataTimeout = Math.max(timeToNextMetadataUpdate, waitForMetadataFetch);
if (metadataTimeout > 0) {
return metadataTimeout;
}
// Beware that the behavior of this method and the computation of timeouts for poll() are
// highly dependent on the behavior of leastLoadedNode.
Node node = leastLoadedNode(now);
if (node == null) {
log.debug("Give up sending metadata request since no node is available");
return reconnectBackoffMs;
}
return maybeUpdate(now, node);
}
@Override
public void handleServerDisconnect(long now, String destinationId, Optional<AuthenticationException> maybeFatalException) {
Cluster cluster = metadata.fetch();
// 'processDisconnection' generates warnings for misconfigured bootstrap server configuration
// resulting in 'Connection Refused' and misconfigured security resulting in authentication failures.
// The warning below handles the case where a connection to a broker was established, but was disconnected
// before metadata could be obtained.
if (cluster.isBootstrapConfigured()) {
int nodeId = Integer.parseInt(destinationId);
Node node = cluster.nodeById(nodeId);
if (node != null)
log.warn("Bootstrap broker {} disconnected", node);
}
// If we have a disconnect while an update is due, we treat it as a failed update
// so that we can backoff properly
if (isUpdateDue(now))
handleFailedRequest(now, Optional.empty());
maybeFatalException.ifPresent(metadata::fatalError);
// The disconnect may be the result of stale metadata, so request an update
metadata.requestUpdate();
}
@Override
public void handleFailedRequest(long now, Optional<KafkaException> maybeFatalException) {
maybeFatalException.ifPresent(metadata::fatalError);
metadata.failedUpdate(now);
inProgress = null;
}
@Override
public void handleSuccessfulResponse(RequestHeader requestHeader, long now, MetadataResponse response) {
// If any partition has leader with missing listeners, log up to ten of these partitions
// for diagnosing broker configuration issues.
// This could be a transient issue if listeners were added dynamically to brokers.
List<TopicPartition> missingListenerPartitions = response.topicMetadata().stream().flatMap(topicMetadata ->
topicMetadata.partitionMetadata().stream()
.filter(partitionMetadata -> partitionMetadata.error == Errors.LISTENER_NOT_FOUND)
.map(partitionMetadata -> new TopicPartition(topicMetadata.topic(), partitionMetadata.partition())))
.collect(Collectors.toList());
if (!missingListenerPartitions.isEmpty()) {
int count = missingListenerPartitions.size();
log.warn("{} partitions have leader brokers without a matching listener, including {}",
count, missingListenerPartitions.subList(0, Math.min(10, count)));
}
// Check if any topic's metadata failed to get updated
Map<String, Errors> errors = response.errors();
if (!errors.isEmpty())
log.warn("Error while fetching metadata with correlation id {} : {}", requestHeader.correlationId(), errors);
// When talking to the startup phase of a broker, it is possible to receive an empty metadata set, which
// we should retry later.
if (response.brokers().isEmpty()) {
log.trace("Ignoring empty metadata response with correlation id {}.", requestHeader.correlationId());
this.metadata.failedUpdate(now);
} else {
this.metadata.update(inProgress.requestVersion, response, inProgress.isPartialUpdate, now);
}
inProgress = null;
}
@Override
public void close() {
this.metadata.close();
}
/**
* Return true if there's at least one connection establishment is currently underway
*/
private boolean isAnyNodeConnecting() {
for (Node node : fetchNodes()) {
if (connectionStates.isConnecting(node.idString())) {
return true;
}
}
return false;
}
/**
* Add a metadata request to the list of sends if we can make one
*/
private long maybeUpdate(long now, Node node) {
String nodeConnectionId = node.idString();
if (canSendRequest(nodeConnectionId, now)) {
Metadata.MetadataRequestAndVersion requestAndVersion = metadata.newMetadataRequestAndVersion(now);
MetadataRequest.Builder metadataRequest = requestAndVersion.requestBuilder;
log.debug("Sending metadata request {} to node {}", metadataRequest, node);
sendInternalMetadataRequest(metadataRequest, nodeConnectionId, now);
inProgress = new InProgressData(requestAndVersion.requestVersion, requestAndVersion.isPartialUpdate);
return defaultRequestTimeoutMs;
}
// If there's any connection establishment underway, wait until it completes. This prevents
// the client from unnecessarily connecting to additional nodes while a previous connection
// attempt has not been completed.
if (isAnyNodeConnecting()) {
// Strictly the timeout we should return here is "connect timeout", but as we don't
// have such application level configuration, using reconnect backoff instead.
return reconnectBackoffMs;
}
if (connectionStates.canConnect(nodeConnectionId, now)) {
// We don't have a connection to this node right now, make one
log.debug("Initialize connection to node {} for sending metadata request", node);
initiateConnect(node, now);
return reconnectBackoffMs;
}
// connected, but can't send more OR connecting
// In either case, we just need to wait for a network event to let us know the selected
// connection might be usable again.
return Long.MAX_VALUE;
}
public class InProgressData {
public final int requestVersion;
public final boolean isPartialUpdate;
private InProgressData(int requestVersion, boolean isPartialUpdate) {
this.requestVersion = requestVersion;
this.isPartialUpdate = isPartialUpdate;
}
}
}
@Override
public ClientRequest newClientRequest(String nodeId,
AbstractRequest.Builder<?> requestBuilder,
long createdTimeMs,
boolean expectResponse) {
return newClientRequest(nodeId, requestBuilder, createdTimeMs, expectResponse, defaultRequestTimeoutMs, null);
}
// visible for testing
int nextCorrelationId() {
if (SaslClientAuthenticator.isReserved(correlation)) {
// the numeric overflow is fine as negative values is acceptable
correlation = SaslClientAuthenticator.MAX_RESERVED_CORRELATION_ID + 1;
}
return correlation++;
}
@Override
public ClientRequest newClientRequest(String nodeId,
AbstractRequest.Builder<?> requestBuilder,
long createdTimeMs,
boolean expectResponse,
int requestTimeoutMs,
RequestCompletionHandler callback) {
return new ClientRequest(nodeId, requestBuilder, nextCorrelationId(), clientId, createdTimeMs, expectResponse,
requestTimeoutMs, callback);
}
public boolean discoverBrokerVersions() {
return discoverBrokerVersions;
}
static class InFlightRequest {
final RequestHeader header;
final String destination;
final RequestCompletionHandler callback;
final boolean expectResponse;
final AbstractRequest request;
final boolean isInternalRequest; // used to flag requests which are initiated internally by NetworkClient
final Send send;
final long sendTimeMs;
final long createdTimeMs;
final long requestTimeoutMs;
public InFlightRequest(ClientRequest clientRequest,
RequestHeader header,
boolean isInternalRequest,
AbstractRequest request,
Send send,
long sendTimeMs) {
this(header,
clientRequest.requestTimeoutMs(),
clientRequest.createdTimeMs(),
clientRequest.destination(),
clientRequest.callback(),
clientRequest.expectResponse(),
isInternalRequest,
request,
send,
sendTimeMs);
}
public InFlightRequest(RequestHeader header,
int requestTimeoutMs,
long createdTimeMs,
String destination,
RequestCompletionHandler callback,
boolean expectResponse,
boolean isInternalRequest,
AbstractRequest request,
Send send,
long sendTimeMs) {
this.header = header;
this.requestTimeoutMs = requestTimeoutMs;
this.createdTimeMs = createdTimeMs;
this.destination = destination;
this.callback = callback;
this.expectResponse = expectResponse;
this.isInternalRequest = isInternalRequest;
this.request = request;
this.send = send;
this.sendTimeMs = sendTimeMs;
}
public long timeElapsedSinceSendMs(long currentTimeMs) {
return Math.max(0, currentTimeMs - sendTimeMs);
}
public long timeElapsedSinceCreateMs(long currentTimeMs) {
return Math.max(0, currentTimeMs - createdTimeMs);
}
public ClientResponse completed(AbstractResponse response, long timeMs) {
return new ClientResponse(header, callback, destination, createdTimeMs, timeMs,
false, null, null, response);
}
public ClientResponse timedOut(long timeMs) {
// A timed out request is considered disconnected as well
return new ClientResponse(header, callback, destination, createdTimeMs, timeMs,
true, true, null, null, null);
}
public ClientResponse disconnected(long timeMs) {
return new ClientResponse(header, callback, destination, createdTimeMs, timeMs,
true, null, null, null);
}
@Override
public String toString() {
return "InFlightRequest(header=" + header +
", destination=" + destination +
", expectResponse=" + expectResponse +
", createdTimeMs=" + createdTimeMs +
", sendTimeMs=" + sendTimeMs +
", isInternalRequest=" + isInternalRequest +
", request=" + request +
", callback=" + callback +
", send=" + send + ")";
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/NetworkClientUtils.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.errors.DisconnectException;
import org.apache.kafka.common.utils.Time;
import java.io.IOException;
import java.util.List;
/**
* Provides additional utilities for {@link NetworkClient} (e.g. to implement blocking behaviour).
*/
public final class NetworkClientUtils {
private NetworkClientUtils() {}
/**
* Checks whether the node is currently connected, first calling `client.poll` to ensure that any pending
* disconnects have been processed.
*
* This method can be used to check the status of a connection prior to calling the blocking version to be able
* to tell whether the latter completed a new connection.
*/
public static boolean isReady(KafkaClient client, Node node, long currentTime) {
client.poll(0, currentTime);
return client.isReady(node, currentTime);
}
/**
* Invokes `client.poll` to discard pending disconnects, followed by `client.ready` and 0 or more `client.poll`
* invocations until the connection to `node` is ready, the timeoutMs expires or the connection fails.
*
* It returns `true` if the call completes normally or `false` if the timeoutMs expires. If the connection fails,
* an `IOException` is thrown instead. Note that if the `NetworkClient` has been configured with a positive
* connection timeoutMs, it is possible for this method to raise an `IOException` for a previous connection which
* has recently disconnected. If authentication to the node fails, an `AuthenticationException` is thrown.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
public static boolean awaitReady(KafkaClient client, Node node, Time time, long timeoutMs) throws IOException {
if (timeoutMs < 0) {
throw new IllegalArgumentException("Timeout needs to be greater than 0");
}
long startTime = time.milliseconds();
if (isReady(client, node, startTime) || client.ready(node, startTime))
return true;
long attemptStartTime = time.milliseconds();
while (!client.isReady(node, attemptStartTime) && attemptStartTime - startTime < timeoutMs) {
if (client.connectionFailed(node)) {
throw new IOException("Connection to " + node + " failed.");
}
long pollTimeout = timeoutMs - (attemptStartTime - startTime); // initialize in this order to avoid overflow
client.poll(pollTimeout, attemptStartTime);
if (client.authenticationException(node) != null)
throw client.authenticationException(node);
attemptStartTime = time.milliseconds();
}
return client.isReady(node, attemptStartTime);
}
/**
* Invokes `client.send` followed by 1 or more `client.poll` invocations until a response is received or a
* disconnection happens (which can happen for a number of reasons including a request timeout).
*
* In case of a disconnection, an `IOException` is thrown.
* If shutdown is initiated on the client during this method, an IOException is thrown.
*
* This method is useful for implementing blocking behaviour on top of the non-blocking `NetworkClient`, use it with
* care.
*/
public static ClientResponse sendAndReceive(KafkaClient client, ClientRequest request, Time time) throws IOException {
try {
client.send(request, time.milliseconds());
while (client.active()) {
List<ClientResponse> responses = client.poll(Long.MAX_VALUE, time.milliseconds());
for (ClientResponse response : responses) {
if (response.requestHeader().correlationId() == request.correlationId()) {
if (response.wasDisconnected()) {
throw new IOException("Connection to " + response.destination() + " was disconnected before the response was read");
}
if (response.versionMismatch() != null) {
throw response.versionMismatch();
}
return response;
}
}
}
throw new IOException("Client was shutdown before response was read");
} catch (DisconnectException e) {
if (client.active())
throw e;
else
throw new IOException("Client was shutdown before response was read");
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/NodeApiVersions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.feature.SupportedVersionRange;
import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion;
import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey;
import org.apache.kafka.common.protocol.ApiKeys;
import org.apache.kafka.common.requests.ApiVersionsResponse;
import org.apache.kafka.common.utils.Utils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.TreeMap;
/**
* An internal class which represents the API versions supported by a particular node.
*/
public class NodeApiVersions {
// A map of the usable versions of each API, keyed by the ApiKeys instance
private final Map<ApiKeys, ApiVersion> supportedVersions = new EnumMap<>(ApiKeys.class);
// List of APIs which the broker supports, but which are unknown to the client
private final List<ApiVersion> unknownApis = new ArrayList<>();
private final Map<String, SupportedVersionRange> supportedFeatures;
private final boolean zkMigrationEnabled;
/**
* Create a NodeApiVersions object with the current ApiVersions.
*
* @return A new NodeApiVersions object.
*/
public static NodeApiVersions create() {
return create(Collections.emptyList());
}
/**
* Create a NodeApiVersions object.
*
* @param overrides API versions to override. Any ApiVersion not specified here will be set to the current client
* value.
* @return A new NodeApiVersions object.
*/
public static NodeApiVersions create(Collection<ApiVersion> overrides) {
List<ApiVersion> apiVersions = new LinkedList<>(overrides);
for (ApiKeys apiKey : ApiKeys.zkBrokerApis()) {
boolean exists = false;
for (ApiVersion apiVersion : apiVersions) {
if (apiVersion.apiKey() == apiKey.id) {
exists = true;
break;
}
}
if (!exists) apiVersions.add(ApiVersionsResponse.toApiVersion(apiKey));
}
return new NodeApiVersions(apiVersions, Collections.emptyList(), false);
}
/**
* Create a NodeApiVersions object with a single ApiKey. It is mainly used in tests.
*
* @param apiKey ApiKey's id.
* @param minVersion ApiKey's minimum version.
* @param maxVersion ApiKey's maximum version.
* @return A new NodeApiVersions object.
*/
public static NodeApiVersions create(short apiKey, short minVersion, short maxVersion) {
return create(Collections.singleton(new ApiVersion()
.setApiKey(apiKey)
.setMinVersion(minVersion)
.setMaxVersion(maxVersion)));
}
public NodeApiVersions(Collection<ApiVersion> nodeApiVersions, Collection<SupportedFeatureKey> nodeSupportedFeatures, boolean zkMigrationEnabled) {
for (ApiVersion nodeApiVersion : nodeApiVersions) {
if (ApiKeys.hasId(nodeApiVersion.apiKey())) {
ApiKeys nodeApiKey = ApiKeys.forId(nodeApiVersion.apiKey());
supportedVersions.put(nodeApiKey, nodeApiVersion);
} else {
// Newer brokers may support ApiKeys we don't know about
unknownApis.add(nodeApiVersion);
}
}
Map<String, SupportedVersionRange> supportedFeaturesBuilder = new HashMap<>();
for (SupportedFeatureKey supportedFeature : nodeSupportedFeatures) {
supportedFeaturesBuilder.put(supportedFeature.name(),
new SupportedVersionRange(supportedFeature.minVersion(), supportedFeature.maxVersion()));
}
this.supportedFeatures = Collections.unmodifiableMap(supportedFeaturesBuilder);
this.zkMigrationEnabled = zkMigrationEnabled;
}
/**
* Return the most recent version supported by both the node and the local software.
*/
public short latestUsableVersion(ApiKeys apiKey) {
return latestUsableVersion(apiKey, apiKey.oldestVersion(), apiKey.latestVersion());
}
/**
* Get the latest version supported by the broker within an allowed range of versions
*/
public short latestUsableVersion(ApiKeys apiKey, short oldestAllowedVersion, short latestAllowedVersion) {
if (!supportedVersions.containsKey(apiKey))
throw new UnsupportedVersionException("The broker does not support " + apiKey);
ApiVersion supportedVersion = supportedVersions.get(apiKey);
Optional<ApiVersion> intersectVersion = ApiVersionsResponse.intersect(supportedVersion,
new ApiVersion()
.setApiKey(apiKey.id)
.setMinVersion(oldestAllowedVersion)
.setMaxVersion(latestAllowedVersion));
if (intersectVersion.isPresent())
return intersectVersion.get().maxVersion();
else
throw new UnsupportedVersionException("The broker does not support " + apiKey +
" with version in range [" + oldestAllowedVersion + "," + latestAllowedVersion + "]. The supported" +
" range is [" + supportedVersion.minVersion() + "," + supportedVersion.maxVersion() + "].");
}
/**
* Convert the object to a string with no linebreaks.<p/>
* <p>
* This toString method is relatively expensive, so avoid calling it unless debug logging is turned on.
*/
@Override
public String toString() {
return toString(false);
}
/**
* Convert the object to a string.
*
* @param lineBreaks True if we should add a linebreak after each api.
*/
public String toString(boolean lineBreaks) {
// The apiVersion collection may not be in sorted order. We put it into
// a TreeMap before printing it out to ensure that we always print in
// ascending order.
TreeMap<Short, String> apiKeysText = new TreeMap<>();
for (ApiVersion supportedVersion : this.supportedVersions.values())
apiKeysText.put(supportedVersion.apiKey(), apiVersionToText(supportedVersion));
for (ApiVersion apiVersion : unknownApis)
apiKeysText.put(apiVersion.apiKey(), apiVersionToText(apiVersion));
// Also handle the case where some apiKey types are not specified at all in the given ApiVersions,
// which may happen when the remote is too old.
for (ApiKeys apiKey : ApiKeys.zkBrokerApis()) {
if (!apiKeysText.containsKey(apiKey.id)) {
StringBuilder bld = new StringBuilder();
bld.append(apiKey.name).append("(").
append(apiKey.id).append("): ").append("UNSUPPORTED");
apiKeysText.put(apiKey.id, bld.toString());
}
}
String separator = lineBreaks ? ",\n\t" : ", ";
StringBuilder bld = new StringBuilder();
bld.append("(");
if (lineBreaks)
bld.append("\n\t");
bld.append(Utils.join(apiKeysText.values(), separator));
if (lineBreaks)
bld.append("\n");
bld.append(")");
return bld.toString();
}
private String apiVersionToText(ApiVersion apiVersion) {
StringBuilder bld = new StringBuilder();
ApiKeys apiKey = null;
if (ApiKeys.hasId(apiVersion.apiKey())) {
apiKey = ApiKeys.forId(apiVersion.apiKey());
bld.append(apiKey.name).append("(").append(apiKey.id).append("): ");
} else {
bld.append("UNKNOWN(").append(apiVersion.apiKey()).append("): ");
}
if (apiVersion.minVersion() == apiVersion.maxVersion()) {
bld.append(apiVersion.minVersion());
} else {
bld.append(apiVersion.minVersion()).append(" to ").append(apiVersion.maxVersion());
}
if (apiKey != null) {
ApiVersion supportedVersion = supportedVersions.get(apiKey);
if (apiKey.latestVersion() < supportedVersion.minVersion()) {
bld.append(" [unusable: node too new]");
} else if (supportedVersion.maxVersion() < apiKey.oldestVersion()) {
bld.append(" [unusable: node too old]");
} else {
short latestUsableVersion = Utils.min(apiKey.latestVersion(), supportedVersion.maxVersion());
bld.append(" [usable: ").append(latestUsableVersion).append("]");
}
}
return bld.toString();
}
/**
* Get the version information for a given API.
*
* @param apiKey The api key to lookup
* @return The api version information from the broker or null if it is unsupported
*/
public ApiVersion apiVersion(ApiKeys apiKey) {
return supportedVersions.get(apiKey);
}
public Map<ApiKeys, ApiVersion> allSupportedApiVersions() {
return supportedVersions;
}
public Map<String, SupportedVersionRange> supportedFeatures() {
return supportedFeatures;
}
public boolean zkMigrationEnabled() {
return zkMigrationEnabled;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/RequestCompletionHandler.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
/**
* A callback interface for attaching an action to be executed when a request is complete and the corresponding response
* has been received. This handler will also be invoked if there is a disconnection while handling the request.
*/
public interface RequestCompletionHandler {
void onComplete(ClientResponse response);
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/StaleMetadataException.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients;
import org.apache.kafka.common.errors.InvalidMetadataException;
/**
* Thrown when current metadata cannot be used. This is often used as a way to trigger a metadata
* update before retrying another operation.
*
* Note: this is not a public API.
*/
public class StaleMetadataException extends InvalidMetadataException {
private static final long serialVersionUID = 1L;
public StaleMetadataException() {}
public StaleMetadataException(String message) {
super(message);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AbortTransactionOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
@InterfaceStability.Evolving
public class AbortTransactionOptions extends AbstractOptions<AbortTransactionOptions> {
@Override
public String toString() {
return "AbortTransactionOptions(" +
"timeoutMs=" + timeoutMs +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AbortTransactionResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* The result of {@link Admin#abortTransaction(AbortTransactionSpec, AbortTransactionOptions)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class AbortTransactionResult {
private final Map<TopicPartition, KafkaFuture<Void>> futures;
AbortTransactionResult(Map<TopicPartition, KafkaFuture<Void>> futures) {
this.futures = futures;
}
/**
* Get a future which completes when the transaction specified by {@link AbortTransactionSpec}
* in the respective call to {@link Admin#abortTransaction(AbortTransactionSpec, AbortTransactionOptions)}
* returns successfully or fails due to an error or timeout.
*
* @return the future
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AbortTransactionSpec.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Objects;
@InterfaceStability.Evolving
public class AbortTransactionSpec {
private final TopicPartition topicPartition;
private final long producerId;
private final short producerEpoch;
private final int coordinatorEpoch;
public AbortTransactionSpec(
TopicPartition topicPartition,
long producerId,
short producerEpoch,
int coordinatorEpoch
) {
this.topicPartition = topicPartition;
this.producerId = producerId;
this.producerEpoch = producerEpoch;
this.coordinatorEpoch = coordinatorEpoch;
}
public TopicPartition topicPartition() {
return topicPartition;
}
public long producerId() {
return producerId;
}
public short producerEpoch() {
return producerEpoch;
}
public int coordinatorEpoch() {
return coordinatorEpoch;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AbortTransactionSpec that = (AbortTransactionSpec) o;
return producerId == that.producerId &&
producerEpoch == that.producerEpoch &&
coordinatorEpoch == that.coordinatorEpoch &&
Objects.equals(topicPartition, that.topicPartition);
}
@Override
public int hashCode() {
return Objects.hash(topicPartition, producerId, producerEpoch, coordinatorEpoch);
}
@Override
public String toString() {
return "AbortTransactionSpec(" +
"topicPartition=" + topicPartition +
", producerId=" + producerId +
", producerEpoch=" + producerEpoch +
", coordinatorEpoch=" + coordinatorEpoch +
')';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AbstractOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
/*
* This class implements the common APIs that are shared by Options classes for various AdminClient commands
*/
public abstract class AbstractOptions<T extends AbstractOptions> {
protected Integer timeoutMs = null;
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*/
@SuppressWarnings("unchecked")
public T timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return (T) this;
}
/**
* The timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*/
public Integer timeoutMs() {
return timeoutMs;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/Admin.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.ElectionType;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.Metric;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.TopicCollection;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.TopicPartitionReplica;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.acl.AclBindingFilter;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.errors.FeatureUpdateFailedException;
import org.apache.kafka.common.quota.ClientQuotaAlteration;
import org.apache.kafka.common.quota.ClientQuotaFilter;
import org.apache.kafka.common.requests.LeaveGroupResponse;
import java.time.Duration;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Optional;
import java.util.Properties;
import java.util.Set;
/**
* The administrative client for Kafka, which supports managing and inspecting topics, brokers, configurations and ACLs.
* <p>
* Instances returned from the {@code create} methods of this interface are guaranteed to be thread safe.
* However, the {@link KafkaFuture KafkaFutures} returned from request methods are executed
* by a single thread so it is important that any code which executes on that thread when they complete
* (using {@link KafkaFuture#thenApply(KafkaFuture.Function)}, for example) doesn't block
* for too long. If necessary, processing of results should be passed to another thread.
* <p>
* The operations exposed by Admin follow a consistent pattern:
* <ul>
* <li>Admin instances should be created using {@link Admin#create(Properties)} or {@link Admin#create(Map)}</li>
* <li>Each operation typically has two overloaded methods, one which uses a default set of options and an
* overloaded method where the last parameter is an explicit options object.
* <li>The operation method's first parameter is a {@code Collection} of items to perform
* the operation on. Batching multiple requests into a single call is more efficient and should be
* preferred over multiple calls to the same method.
* <li>The operation methods execute asynchronously.
* <li>Each {@code xxx} operation method returns an {@code XxxResult} class with methods which expose
* {@link KafkaFuture} for accessing the result(s) of the operation.
* <li>Typically an {@code all()} method is provided for getting the overall success/failure of the batch and a
* {@code values()} method provided access to each item in a request batch.
* Other methods may also be provided.
* <li>For synchronous behaviour use {@link KafkaFuture#get()}
* </ul>
* <p>
* Here is a simple example of using an Admin client instance to create a new topic:
* <pre>
* {@code
* Properties props = new Properties();
* props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
*
* try (Admin admin = Admin.create(props)) {
* String topicName = "my-topic";
* int partitions = 12;
* short replicationFactor = 3;
* // Create a compacted topic
* CreateTopicsResult result = admin.createTopics(Collections.singleton(
* new NewTopic(topicName, partitions, replicationFactor)
* .configs(Collections.singletonMap(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT))));
*
* // Call values() to get the result for a specific topic
* KafkaFuture<Void> future = result.values().get(topicName);
*
* // Call get() to block until the topic creation is complete or has failed
* // if creation failed the ExecutionException wraps the underlying cause.
* future.get();
* }
* }
* </pre>
*
* <h3>Bootstrap and balancing</h3>
* <p>
* The {@code bootstrap.servers} config in the {@code Map} or {@code Properties} passed
* to {@link Admin#create(Properties)} is only used for discovering the brokers in the cluster,
* which the client will then connect to as needed.
* As such, it is sufficient to include only two or three broker addresses to cope with the possibility of brokers
* being unavailable.
* <p>
* Different operations necessitate requests being sent to different nodes in the cluster. For example
* {@link #createTopics(Collection)} communicates with the controller, but {@link #describeTopics(Collection)}
* can talk to any broker. When the recipient does not matter the instance will try to use the broker with the
* fewest outstanding requests.
* <p>
* The client will transparently retry certain errors which are usually transient.
* For example if the request for {@code createTopics()} get sent to a node which was not the controller
* the metadata would be refreshed and the request re-sent to the controller.
*
* <h3>Broker Compatibility</h3>
* <p>
* The minimum broker version required is 0.10.0.0. Methods with stricter requirements will specify the minimum broker
* version required.
* <p>
* This client was introduced in 0.11.0.0 and the API is still evolving. We will try to evolve the API in a compatible
* manner, but we reserve the right to make breaking changes in minor releases, if necessary. We will update the
* {@code InterfaceStability} annotation and this notice once the API is considered stable.
* <p>
*/
@InterfaceStability.Evolving
public interface Admin extends AutoCloseable {
/**
* Create a new Admin with the given configuration.
*
* @param props The configuration.
* @return The new KafkaAdminClient.
*/
static Admin create(Properties props) {
return KafkaAdminClient.createInternal(new AdminClientConfig(props, true), null);
}
/**
* Create a new Admin with the given configuration.
*
* @param conf The configuration.
* @return The new KafkaAdminClient.
*/
static Admin create(Map<String, Object> conf) {
return KafkaAdminClient.createInternal(new AdminClientConfig(conf, true), null, null);
}
/**
* Close the Admin and release all associated resources.
* <p>
* See {@link #close(Duration)}
*/
@Override
default void close() {
close(Duration.ofMillis(Long.MAX_VALUE));
}
/**
* Close the Admin client and release all associated resources.
* <p>
* The close operation has a grace period during which current operations will be allowed to
* complete, specified by the given duration.
* New operations will not be accepted during the grace period. Once the grace period is over,
* all operations that have not yet been completed will be aborted with a {@link org.apache.kafka.common.errors.TimeoutException}.
*
* @param timeout The time to use for the wait time.
*/
void close(Duration timeout);
/**
* Create a batch of new topics with the default options.
* <p>
* This is a convenience method for {@link #createTopics(Collection, CreateTopicsOptions)} with default options.
* See the overload for more details.
* <p>
* This operation is supported by brokers with version 0.10.1.0 or higher.
*
* @param newTopics The new topics to create.
* @return The CreateTopicsResult.
*/
default CreateTopicsResult createTopics(Collection<NewTopic> newTopics) {
return createTopics(newTopics, new CreateTopicsOptions());
}
/**
* Create a batch of new topics.
* <p>
* This operation is not transactional so it may succeed for some topics while fail for others.
* <p>
* It may take several seconds after {@link CreateTopicsResult} returns
* success for all the brokers to become aware that the topics have been created.
* During this time, {@link #listTopics()} and {@link #describeTopics(Collection)}
* may not return information about the new topics.
* <p>
* This operation is supported by brokers with version 0.10.1.0 or higher. The validateOnly option is supported
* from version 0.10.2.0.
*
* @param newTopics The new topics to create.
* @param options The options to use when creating the new topics.
* @return The CreateTopicsResult.
*/
CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options);
/**
* This is a convenience method for {@link #deleteTopics(TopicCollection, DeleteTopicsOptions)}
* with default options. See the overload for more details.
* <p>
* This operation is supported by brokers with version 0.10.1.0 or higher.
*
* @param topics The topic names to delete.
* @return The DeleteTopicsResult.
*/
default DeleteTopicsResult deleteTopics(Collection<String> topics) {
return deleteTopics(TopicCollection.ofTopicNames(topics), new DeleteTopicsOptions());
}
/**
* This is a convenience method for {@link #deleteTopics(TopicCollection, DeleteTopicsOptions)}
* with default options. See the overload for more details.
* <p>
* This operation is supported by brokers with version 0.10.1.0 or higher.
*
* @param topics The topic names to delete.
* @param options The options to use when deleting the topics.
* @return The DeleteTopicsResult.
*/
default DeleteTopicsResult deleteTopics(Collection<String> topics, DeleteTopicsOptions options) {
return deleteTopics(TopicCollection.ofTopicNames(topics), options);
}
/**
* This is a convenience method for {@link #deleteTopics(TopicCollection, DeleteTopicsOptions)}
* with default options. See the overload for more details.
* <p>
* When using topic IDs, this operation is supported by brokers with inter-broker protocol 2.8 or higher.
* When using topic names, this operation is supported by brokers with version 0.10.1.0 or higher.
*
* @param topics The topics to delete.
* @return The DeleteTopicsResult.
*/
default DeleteTopicsResult deleteTopics(TopicCollection topics) {
return deleteTopics(topics, new DeleteTopicsOptions());
}
/**
* Delete a batch of topics.
* <p>
* This operation is not transactional so it may succeed for some topics while fail for others.
* <p>
* It may take several seconds after the {@link DeleteTopicsResult} returns
* success for all the brokers to become aware that the topics are gone.
* During this time, {@link #listTopics()} and {@link #describeTopics(Collection)}
* may continue to return information about the deleted topics.
* <p>
* If delete.topic.enable is false on the brokers, deleteTopics will mark
* the topics for deletion, but not actually delete them. The futures will
* return successfully in this case.
* <p>
* When using topic IDs, this operation is supported by brokers with inter-broker protocol 2.8 or higher.
* When using topic names, this operation is supported by brokers with version 0.10.1.0 or higher.
*
* @param topics The topics to delete.
* @param options The options to use when deleting the topics.
* @return The DeleteTopicsResult.
*/
DeleteTopicsResult deleteTopics(TopicCollection topics, DeleteTopicsOptions options);
/**
* List the topics available in the cluster with the default options.
* <p>
* This is a convenience method for {@link #listTopics(ListTopicsOptions)} with default options.
* See the overload for more details.
*
* @return The ListTopicsResult.
*/
default ListTopicsResult listTopics() {
return listTopics(new ListTopicsOptions());
}
/**
* List the topics available in the cluster.
*
* @param options The options to use when listing the topics.
* @return The ListTopicsResult.
*/
ListTopicsResult listTopics(ListTopicsOptions options);
/**
* Describe some topics in the cluster, with the default options.
* <p>
* This is a convenience method for {@link #describeTopics(Collection, DescribeTopicsOptions)} with
* default options. See the overload for more details.
*
* @param topicNames The names of the topics to describe.
* @return The DescribeTopicsResult.
*/
default DescribeTopicsResult describeTopics(Collection<String> topicNames) {
return describeTopics(topicNames, new DescribeTopicsOptions());
}
/**
* Describe some topics in the cluster.
*
* @param topicNames The names of the topics to describe.
* @param options The options to use when describing the topic.
* @return The DescribeTopicsResult.
*/
default DescribeTopicsResult describeTopics(Collection<String> topicNames, DescribeTopicsOptions options) {
return describeTopics(TopicCollection.ofTopicNames(topicNames), options);
}
/**
* This is a convenience method for {@link #describeTopics(TopicCollection, DescribeTopicsOptions)}
* with default options. See the overload for more details.
* <p>
* When using topic IDs, this operation is supported by brokers with version 3.1.0 or higher.
*
* @param topics The topics to describe.
* @return The DescribeTopicsResult.
*/
default DescribeTopicsResult describeTopics(TopicCollection topics) {
return describeTopics(topics, new DescribeTopicsOptions());
}
/**
* Describe some topics in the cluster.
*
* When using topic IDs, this operation is supported by brokers with version 3.1.0 or higher.
*
* @param topics The topics to describe.
* @param options The options to use when describing the topics.
* @return The DescribeTopicsResult.
*/
DescribeTopicsResult describeTopics(TopicCollection topics, DescribeTopicsOptions options);
/**
* Get information about the nodes in the cluster, using the default options.
* <p>
* This is a convenience method for {@link #describeCluster(DescribeClusterOptions)} with default options.
* See the overload for more details.
*
* @return The DescribeClusterResult.
*/
default DescribeClusterResult describeCluster() {
return describeCluster(new DescribeClusterOptions());
}
/**
* Get information about the nodes in the cluster.
*
* @param options The options to use when getting information about the cluster.
* @return The DescribeClusterResult.
*/
DescribeClusterResult describeCluster(DescribeClusterOptions options);
/**
* This is a convenience method for {@link #describeAcls(AclBindingFilter, DescribeAclsOptions)} with
* default options. See the overload for more details.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param filter The filter to use.
* @return The DescribeAclsResult.
*/
default DescribeAclsResult describeAcls(AclBindingFilter filter) {
return describeAcls(filter, new DescribeAclsOptions());
}
/**
* Lists access control lists (ACLs) according to the supplied filter.
* <p>
* Note: it may take some time for changes made by {@code createAcls} or {@code deleteAcls} to be reflected
* in the output of {@code describeAcls}.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param filter The filter to use.
* @param options The options to use when listing the ACLs.
* @return The DescribeAclsResult.
*/
DescribeAclsResult describeAcls(AclBindingFilter filter, DescribeAclsOptions options);
/**
* This is a convenience method for {@link #createAcls(Collection, CreateAclsOptions)} with
* default options. See the overload for more details.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param acls The ACLs to create
* @return The CreateAclsResult.
*/
default CreateAclsResult createAcls(Collection<AclBinding> acls) {
return createAcls(acls, new CreateAclsOptions());
}
/**
* Creates access control lists (ACLs) which are bound to specific resources.
* <p>
* This operation is not transactional so it may succeed for some ACLs while fail for others.
* <p>
* If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
* no changes will be made.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param acls The ACLs to create
* @param options The options to use when creating the ACLs.
* @return The CreateAclsResult.
*/
CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options);
/**
* This is a convenience method for {@link #deleteAcls(Collection, DeleteAclsOptions)} with default options.
* See the overload for more details.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param filters The filters to use.
* @return The DeleteAclsResult.
*/
default DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters) {
return deleteAcls(filters, new DeleteAclsOptions());
}
/**
* Deletes access control lists (ACLs) according to the supplied filters.
* <p>
* This operation is not transactional so it may succeed for some ACLs while fail for others.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param filters The filters to use.
* @param options The options to use when deleting the ACLs.
* @return The DeleteAclsResult.
*/
DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options);
/**
* Get the configuration for the specified resources with the default options.
* <p>
* This is a convenience method for {@link #describeConfigs(Collection, DescribeConfigsOptions)} with default options.
* See the overload for more details.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param resources The resources (topic and broker resource types are currently supported)
* @return The DescribeConfigsResult
*/
default DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources) {
return describeConfigs(resources, new DescribeConfigsOptions());
}
/**
* Get the configuration for the specified resources.
* <p>
* The returned configuration includes default values and the isDefault() method can be used to distinguish them
* from user supplied values.
* <p>
* The value of config entries where isSensitive() is true is always {@code null} so that sensitive information
* is not disclosed.
* <p>
* Config entries where isReadOnly() is true cannot be updated.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param resources The resources (topic and broker resource types are currently supported)
* @param options The options to use when describing configs
* @return The DescribeConfigsResult
*/
DescribeConfigsResult describeConfigs(Collection<ConfigResource> resources, DescribeConfigsOptions options);
/**
* Update the configuration for the specified resources with the default options.
* <p>
* This is a convenience method for {@link #alterConfigs(Map, AlterConfigsOptions)} with default options.
* See the overload for more details.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param configs The resources with their configs (topic is the only resource type with configs that can
* be updated currently)
* @return The AlterConfigsResult
* @deprecated Since 2.3. Use {@link #incrementalAlterConfigs(Map)}.
*/
@Deprecated
default AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs) {
return alterConfigs(configs, new AlterConfigsOptions());
}
/**
* Update the configuration for the specified resources with the default options.
* <p>
* Updates are not transactional so they may succeed for some resources while fail for others. The configs for
* a particular resource are updated atomically.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param configs The resources with their configs (topic is the only resource type with configs that can
* be updated currently)
* @param options The options to use when describing configs
* @return The AlterConfigsResult
* @deprecated Since 2.3. Use {@link #incrementalAlterConfigs(Map, AlterConfigsOptions)}.
*/
@Deprecated
AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, AlterConfigsOptions options);
/**
* Incrementally updates the configuration for the specified resources with default options.
* <p>
* This is a convenience method for {@link #incrementalAlterConfigs(Map, AlterConfigsOptions)} with default options.
* See the overload for more details.
* <p>
* This operation is supported by brokers with version 2.3.0 or higher.
*
* @param configs The resources with their configs
* @return The AlterConfigsResult
*/
default AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource, Collection<AlterConfigOp>> configs) {
return incrementalAlterConfigs(configs, new AlterConfigsOptions());
}
/**
* Incrementally update the configuration for the specified resources.
* <p>
* Updates are not transactional so they may succeed for some resources while fail for others. The configs for
* a particular resource are updated atomically.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures obtained from
* the returned {@link AlterConfigsResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* if the authenticated user didn't have alter access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.TopicAuthorizationException}
* if the authenticated user didn't have alter access to the Topic.</li>
* <li>{@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException}
* if the Topic doesn't exist.</li>
* <li>{@link org.apache.kafka.common.errors.InvalidRequestException}
* if the request details are invalid. e.g., a configuration key was specified more than once for a resource</li>
* </ul>
* <p>
* This operation is supported by brokers with version 2.3.0 or higher.
*
* @param configs The resources with their configs
* @param options The options to use when altering configs
* @return The AlterConfigsResult
*/
AlterConfigsResult incrementalAlterConfigs(Map<ConfigResource,
Collection<AlterConfigOp>> configs, AlterConfigsOptions options);
/**
* Change the log directory for the specified replicas. If the replica does not exist on the broker, the result
* shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the
* broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given
* log directory if it is not already there. For detailed result, inspect the returned {@link AlterReplicaLogDirsResult} instance.
* <p>
* This operation is not transactional so it may succeed for some replicas while fail for others.
* <p>
* This is a convenience method for {@link #alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)} with default options.
* See the overload for more details.
* <p>
* This operation is supported by brokers with version 1.1.0 or higher.
*
* @param replicaAssignment The replicas with their log directory absolute path
* @return The AlterReplicaLogDirsResult
*/
default AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment) {
return alterReplicaLogDirs(replicaAssignment, new AlterReplicaLogDirsOptions());
}
/**
* Change the log directory for the specified replicas. If the replica does not exist on the broker, the result
* shows REPLICA_NOT_AVAILABLE for the given replica and the replica will be created in the given log directory on the
* broker when it is created later. If the replica already exists on the broker, the replica will be moved to the given
* log directory if it is not already there. For detailed result, inspect the returned {@link AlterReplicaLogDirsResult} instance.
* <p>
* This operation is not transactional so it may succeed for some replicas while fail for others.
* <p>
* This operation is supported by brokers with version 1.1.0 or higher.
*
* @param replicaAssignment The replicas with their log directory absolute path
* @param options The options to use when changing replica dir
* @return The AlterReplicaLogDirsResult
*/
AlterReplicaLogDirsResult alterReplicaLogDirs(Map<TopicPartitionReplica, String> replicaAssignment,
AlterReplicaLogDirsOptions options);
/**
* Query the information of all log directories on the given set of brokers
* <p>
* This is a convenience method for {@link #describeLogDirs(Collection, DescribeLogDirsOptions)} with default options.
* See the overload for more details.
* <p>
* This operation is supported by brokers with version 1.0.0 or higher.
*
* @param brokers A list of brokers
* @return The DescribeLogDirsResult
*/
default DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers) {
return describeLogDirs(brokers, new DescribeLogDirsOptions());
}
/**
* Query the information of all log directories on the given set of brokers
* <p>
* This operation is supported by brokers with version 1.0.0 or higher.
*
* @param brokers A list of brokers
* @param options The options to use when querying log dir info
* @return The DescribeLogDirsResult
*/
DescribeLogDirsResult describeLogDirs(Collection<Integer> brokers, DescribeLogDirsOptions options);
/**
* Query the replica log directory information for the specified replicas.
* <p>
* This is a convenience method for {@link #describeReplicaLogDirs(Collection, DescribeReplicaLogDirsOptions)}
* with default options. See the overload for more details.
* <p>
* This operation is supported by brokers with version 1.0.0 or higher.
*
* @param replicas The replicas to query
* @return The DescribeReplicaLogDirsResult
*/
default DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas) {
return describeReplicaLogDirs(replicas, new DescribeReplicaLogDirsOptions());
}
/**
* Query the replica log directory information for the specified replicas.
* <p>
* This operation is supported by brokers with version 1.0.0 or higher.
*
* @param replicas The replicas to query
* @param options The options to use when querying replica log dir info
* @return The DescribeReplicaLogDirsResult
*/
DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection<TopicPartitionReplica> replicas, DescribeReplicaLogDirsOptions options);
/**
* Increase the number of partitions of the topics given as the keys of {@code newPartitions}
* according to the corresponding values. <strong>If partitions are increased for a topic that has a key,
* the partition logic or ordering of the messages will be affected.</strong>
* <p>
* This is a convenience method for {@link #createPartitions(Map, CreatePartitionsOptions)} with default options.
* See the overload for more details.
*
* @param newPartitions The topics which should have new partitions created, and corresponding parameters
* for the created partitions.
* @return The CreatePartitionsResult.
*/
default CreatePartitionsResult createPartitions(Map<String, NewPartitions> newPartitions) {
return createPartitions(newPartitions, new CreatePartitionsOptions());
}
/**
* Increase the number of partitions of the topics given as the keys of {@code newPartitions}
* according to the corresponding values. <strong>If partitions are increased for a topic that has a key,
* the partition logic or ordering of the messages will be affected.</strong>
* <p>
* This operation is not transactional so it may succeed for some topics while fail for others.
* <p>
* It may take several seconds after this method returns
* success for all the brokers to become aware that the partitions have been created.
* During this time, {@link #describeTopics(Collection)}
* may not return information about the new partitions.
* <p>
* This operation is supported by brokers with version 1.0.0 or higher.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the
* {@link CreatePartitionsResult#values() values()} method of the returned {@link CreatePartitionsResult}
* <ul>
* <li>{@link org.apache.kafka.common.errors.AuthorizationException}
* if the authenticated user is not authorized to alter the topic</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* if the request was not completed in within the given {@link CreatePartitionsOptions#timeoutMs()}.</li>
* <li>{@link org.apache.kafka.common.errors.ReassignmentInProgressException}
* if a partition reassignment is currently in progress</li>
* <li>{@link org.apache.kafka.common.errors.BrokerNotAvailableException}
* if the requested {@link NewPartitions#assignments()} contain a broker that is currently unavailable.</li>
* <li>{@link org.apache.kafka.common.errors.InvalidReplicationFactorException}
* if no {@link NewPartitions#assignments()} are given and it is impossible for the broker to assign
* replicas with the topics replication factor.</li>
* <li>Subclasses of {@link org.apache.kafka.common.KafkaException}
* if the request is invalid in some way.</li>
* </ul>
*
* @param newPartitions The topics which should have new partitions created, and corresponding parameters
* for the created partitions.
* @param options The options to use when creating the new partitions.
* @return The CreatePartitionsResult.
*/
CreatePartitionsResult createPartitions(Map<String, NewPartitions> newPartitions,
CreatePartitionsOptions options);
/**
* Delete records whose offset is smaller than the given offset of the corresponding partition.
* <p>
* This is a convenience method for {@link #deleteRecords(Map, DeleteRecordsOptions)} with default options.
* See the overload for more details.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param recordsToDelete The topic partitions and related offsets from which records deletion starts.
* @return The DeleteRecordsResult.
*/
default DeleteRecordsResult deleteRecords(Map<TopicPartition, RecordsToDelete> recordsToDelete) {
return deleteRecords(recordsToDelete, new DeleteRecordsOptions());
}
/**
* Delete records whose offset is smaller than the given offset of the corresponding partition.
* <p>
* This operation is supported by brokers with version 0.11.0.0 or higher.
*
* @param recordsToDelete The topic partitions and related offsets from which records deletion starts.
* @param options The options to use when deleting records.
* @return The DeleteRecordsResult.
*/
DeleteRecordsResult deleteRecords(Map<TopicPartition, RecordsToDelete> recordsToDelete,
DeleteRecordsOptions options);
/**
* Create a Delegation Token.
* <p>
* This is a convenience method for {@link #createDelegationToken(CreateDelegationTokenOptions)} with default options.
* See the overload for more details.
*
* @return The CreateDelegationTokenResult.
*/
default CreateDelegationTokenResult createDelegationToken() {
return createDelegationToken(new CreateDelegationTokenOptions());
}
/**
* Create a Delegation Token.
* <p>
* This operation is supported by brokers with version 1.1.0 or higher.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the
* {@link CreateDelegationTokenResult#delegationToken() delegationToken()} method of the returned {@link CreateDelegationTokenResult}
* <ul>
* <li>{@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException}
* If the request sent on PLAINTEXT/1-way SSL channels or delegation token authenticated channels.</li>
* <li>{@link org.apache.kafka.common.errors.InvalidPrincipalTypeException}
* if the renewers principal type is not supported.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenDisabledException}
* if the delegation token feature is disabled.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* if the request was not completed in within the given {@link CreateDelegationTokenOptions#timeoutMs()}.</li>
* </ul>
*
* @param options The options to use when creating delegation token.
* @return The DeleteRecordsResult.
*/
CreateDelegationTokenResult createDelegationToken(CreateDelegationTokenOptions options);
/**
* Renew a Delegation Token.
* <p>
* This is a convenience method for {@link #renewDelegationToken(byte[], RenewDelegationTokenOptions)} with default options.
* See the overload for more details.
*
* @param hmac HMAC of the Delegation token
* @return The RenewDelegationTokenResult.
*/
default RenewDelegationTokenResult renewDelegationToken(byte[] hmac) {
return renewDelegationToken(hmac, new RenewDelegationTokenOptions());
}
/**
* Renew a Delegation Token.
* <p>
* This operation is supported by brokers with version 1.1.0 or higher.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the
* {@link RenewDelegationTokenResult#expiryTimestamp() expiryTimestamp()} method of the returned {@link RenewDelegationTokenResult}
* <ul>
* <li>{@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException}
* If the request sent on PLAINTEXT/1-way SSL channels or delegation token authenticated channels.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenDisabledException}
* if the delegation token feature is disabled.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenNotFoundException}
* if the delegation token is not found on server.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenOwnerMismatchException}
* if the authenticated user is not owner/renewer of the token.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenExpiredException}
* if the delegation token is expired.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* if the request was not completed in within the given {@link RenewDelegationTokenOptions#timeoutMs()}.</li>
* </ul>
*
* @param hmac HMAC of the Delegation token
* @param options The options to use when renewing delegation token.
* @return The RenewDelegationTokenResult.
*/
RenewDelegationTokenResult renewDelegationToken(byte[] hmac, RenewDelegationTokenOptions options);
/**
* Expire a Delegation Token.
* <p>
* This is a convenience method for {@link #expireDelegationToken(byte[], ExpireDelegationTokenOptions)} with default options.
* This will expire the token immediately. See the overload for more details.
*
* @param hmac HMAC of the Delegation token
* @return The ExpireDelegationTokenResult.
*/
default ExpireDelegationTokenResult expireDelegationToken(byte[] hmac) {
return expireDelegationToken(hmac, new ExpireDelegationTokenOptions());
}
/**
* Expire a Delegation Token.
* <p>
* This operation is supported by brokers with version 1.1.0 or higher.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the
* {@link ExpireDelegationTokenResult#expiryTimestamp() expiryTimestamp()} method of the returned {@link ExpireDelegationTokenResult}
* <ul>
* <li>{@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException}
* If the request sent on PLAINTEXT/1-way SSL channels or delegation token authenticated channels.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenDisabledException}
* if the delegation token feature is disabled.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenNotFoundException}
* if the delegation token is not found on server.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenOwnerMismatchException}
* if the authenticated user is not owner/renewer of the requested token.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenExpiredException}
* if the delegation token is expired.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* if the request was not completed in within the given {@link ExpireDelegationTokenOptions#timeoutMs()}.</li>
* </ul>
*
* @param hmac HMAC of the Delegation token
* @param options The options to use when expiring delegation token.
* @return The ExpireDelegationTokenResult.
*/
ExpireDelegationTokenResult expireDelegationToken(byte[] hmac, ExpireDelegationTokenOptions options);
/**
* Describe the Delegation Tokens.
* <p>
* This is a convenience method for {@link #describeDelegationToken(DescribeDelegationTokenOptions)} with default options.
* This will return all the user owned tokens and tokens where user have Describe permission. See the overload for more details.
*
* @return The DescribeDelegationTokenResult.
*/
default DescribeDelegationTokenResult describeDelegationToken() {
return describeDelegationToken(new DescribeDelegationTokenOptions());
}
/**
* Describe the Delegation Tokens.
* <p>
* This operation is supported by brokers with version 1.1.0 or higher.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures obtained from the
* {@link DescribeDelegationTokenResult#delegationTokens() delegationTokens()} method of the returned {@link DescribeDelegationTokenResult}
* <ul>
* <li>{@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException}
* If the request sent on PLAINTEXT/1-way SSL channels or delegation token authenticated channels.</li>
* <li>{@link org.apache.kafka.common.errors.DelegationTokenDisabledException}
* if the delegation token feature is disabled.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* if the request was not completed in within the given {@link DescribeDelegationTokenOptions#timeoutMs()}.</li>
* </ul>
*
* @param options The options to use when describing delegation tokens.
* @return The DescribeDelegationTokenResult.
*/
DescribeDelegationTokenResult describeDelegationToken(DescribeDelegationTokenOptions options);
/**
* Describe some group IDs in the cluster.
*
* @param groupIds The IDs of the groups to describe.
* @param options The options to use when describing the groups.
* @return The DescribeConsumerGroupResult.
*/
DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds,
DescribeConsumerGroupsOptions options);
/**
* Describe some group IDs in the cluster, with the default options.
* <p>
* This is a convenience method for {@link #describeConsumerGroups(Collection, DescribeConsumerGroupsOptions)}
* with default options. See the overload for more details.
*
* @param groupIds The IDs of the groups to describe.
* @return The DescribeConsumerGroupResult.
*/
default DescribeConsumerGroupsResult describeConsumerGroups(Collection<String> groupIds) {
return describeConsumerGroups(groupIds, new DescribeConsumerGroupsOptions());
}
/**
* List the consumer groups available in the cluster.
*
* @param options The options to use when listing the consumer groups.
* @return The ListGroupsResult.
*/
ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options);
/**
* List the consumer groups available in the cluster with the default options.
* <p>
* This is a convenience method for {@link #listConsumerGroups(ListConsumerGroupsOptions)} with default options.
* See the overload for more details.
*
* @return The ListGroupsResult.
*/
default ListConsumerGroupsResult listConsumerGroups() {
return listConsumerGroups(new ListConsumerGroupsOptions());
}
/**
* List the consumer group offsets available in the cluster.
*
* @param options The options to use when listing the consumer group offsets.
* @return The ListGroupOffsetsResult
*/
default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId, ListConsumerGroupOffsetsOptions options) {
@SuppressWarnings("deprecation")
ListConsumerGroupOffsetsSpec groupSpec = new ListConsumerGroupOffsetsSpec()
.topicPartitions(options.topicPartitions());
// We can use the provided options with the batched API, which uses topic partitions from
// the group spec and ignores any topic partitions set in the options.
return listConsumerGroupOffsets(Collections.singletonMap(groupId, groupSpec), options);
}
/**
* List the consumer group offsets available in the cluster with the default options.
* <p>
* This is a convenience method for {@link #listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions)}
* to list offsets of all partitions of one group with default options.
*
* @return The ListGroupOffsetsResult.
*/
default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(String groupId) {
return listConsumerGroupOffsets(groupId, new ListConsumerGroupOffsetsOptions());
}
/**
* List the consumer group offsets available in the cluster for the specified consumer groups.
*
* @param groupSpecs Map of consumer group ids to a spec that specifies the topic partitions of the group to list offsets for.
*
* @param options The options to use when listing the consumer group offsets.
* @return The ListConsumerGroupOffsetsResult
*/
ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs, ListConsumerGroupOffsetsOptions options);
/**
* List the consumer group offsets available in the cluster for the specified groups with the default options.
* <p>
* This is a convenience method for
* {@link #listConsumerGroupOffsets(Map, ListConsumerGroupOffsetsOptions)} with default options.
*
* @param groupSpecs Map of consumer group ids to a spec that specifies the topic partitions of the group to list offsets for.
* @return The ListConsumerGroupOffsetsResult.
*/
default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map<String, ListConsumerGroupOffsetsSpec> groupSpecs) {
return listConsumerGroupOffsets(groupSpecs, new ListConsumerGroupOffsetsOptions());
}
/**
* Delete consumer groups from the cluster.
*
* @param options The options to use when deleting a consumer group.
* @return The DeletConsumerGroupResult.
*/
DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds, DeleteConsumerGroupsOptions options);
/**
* Delete consumer groups from the cluster with the default options.
*
* @return The DeleteConsumerGroupResult.
*/
default DeleteConsumerGroupsResult deleteConsumerGroups(Collection<String> groupIds) {
return deleteConsumerGroups(groupIds, new DeleteConsumerGroupsOptions());
}
/**
* Delete committed offsets for a set of partitions in a consumer group. This will
* succeed at the partition level only if the group is not actively subscribed
* to the corresponding topic.
*
* @param options The options to use when deleting offsets in a consumer group.
* @return The DeleteConsumerGroupOffsetsResult.
*/
DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId,
Set<TopicPartition> partitions,
DeleteConsumerGroupOffsetsOptions options);
/**
* Delete committed offsets for a set of partitions in a consumer group with the default
* options. This will succeed at the partition level only if the group is not actively
* subscribed to the corresponding topic.
*
* @return The DeleteConsumerGroupOffsetsResult.
*/
default DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set<TopicPartition> partitions) {
return deleteConsumerGroupOffsets(groupId, partitions, new DeleteConsumerGroupOffsetsOptions());
}
/**
* Elect a replica as leader for topic partitions.
* <p>
* This is a convenience method for {@link #electLeaders(ElectionType, Set, ElectLeadersOptions)}
* with default options.
*
* @param electionType The type of election to conduct.
* @param partitions The topics and partitions for which to conduct elections.
* @return The ElectLeadersResult.
*/
default ElectLeadersResult electLeaders(ElectionType electionType, Set<TopicPartition> partitions) {
return electLeaders(electionType, partitions, new ElectLeadersOptions());
}
/**
* Elect a replica as leader for the given {@code partitions}, or for all partitions if the argument
* to {@code partitions} is null.
* <p>
* This operation is not transactional so it may succeed for some partitions while fail for others.
* <p>
* It may take several seconds after this method returns success for all the brokers in the cluster
* to become aware that the partitions have new leaders. During this time,
* {@link #describeTopics(Collection)} may not return information about the partitions'
* new leaders.
* <p>
* This operation is supported by brokers with version 2.2.0 or later if preferred election is use;
* otherwise the brokers most be 2.4.0 or higher.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the future obtained
* from the returned {@link ElectLeadersResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* if the authenticated user didn't have alter access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException}
* if the topic or partition did not exist within the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.InvalidTopicException}
* if the topic was already queued for deletion.</li>
* <li>{@link org.apache.kafka.common.errors.NotControllerException}
* if the request was sent to a broker that was not the controller for the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* if the request timed out before the election was complete.</li>
* <li>{@link org.apache.kafka.common.errors.LeaderNotAvailableException}
* if the preferred leader was not alive or not in the ISR.</li>
* </ul>
*
* @param electionType The type of election to conduct.
* @param partitions The topics and partitions for which to conduct elections.
* @param options The options to use when electing the leaders.
* @return The ElectLeadersResult.
*/
ElectLeadersResult electLeaders(
ElectionType electionType,
Set<TopicPartition> partitions,
ElectLeadersOptions options);
/**
* Change the reassignments for one or more partitions.
* Providing an empty Optional (e.g via {@link Optional#empty()}) will <bold>revert</bold> the reassignment for the associated partition.
*
* This is a convenience method for {@link #alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions)}
* with default options. See the overload for more details.
*/
default AlterPartitionReassignmentsResult alterPartitionReassignments(
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments) {
return alterPartitionReassignments(reassignments, new AlterPartitionReassignmentsOptions());
}
/**
* Change the reassignments for one or more partitions.
* Providing an empty Optional (e.g via {@link Optional#empty()}) will <bold>revert</bold> the reassignment for the associated partition.
*
* <p>The following exceptions can be anticipated when calling {@code get()} on the futures obtained from
* the returned {@code AlterPartitionReassignmentsResult}:</p>
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* If the authenticated user didn't have alter access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException}
* If the topic or partition does not exist within the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* if the request timed out before the controller could record the new assignments.</li>
* <li>{@link org.apache.kafka.common.errors.InvalidReplicaAssignmentException}
* If the specified assignment was not valid.</li>
* <li>{@link org.apache.kafka.common.errors.NoReassignmentInProgressException}
* If there was an attempt to cancel a reassignment for a partition which was not being reassigned.</li>
* </ul>
*
* @param reassignments The reassignments to add, modify, or remove. See {@link NewPartitionReassignment}.
* @param options The options to use.
* @return The result.
*/
AlterPartitionReassignmentsResult alterPartitionReassignments(
Map<TopicPartition, Optional<NewPartitionReassignment>> reassignments,
AlterPartitionReassignmentsOptions options);
/**
* List all of the current partition reassignments
*
* This is a convenience method for {@link #listPartitionReassignments(ListPartitionReassignmentsOptions)}
* with default options. See the overload for more details.
*/
default ListPartitionReassignmentsResult listPartitionReassignments() {
return listPartitionReassignments(new ListPartitionReassignmentsOptions());
}
/**
* List the current reassignments for the given partitions
*
* This is a convenience method for {@link #listPartitionReassignments(Set, ListPartitionReassignmentsOptions)}
* with default options. See the overload for more details.
*/
default ListPartitionReassignmentsResult listPartitionReassignments(Set<TopicPartition> partitions) {
return listPartitionReassignments(partitions, new ListPartitionReassignmentsOptions());
}
/**
* List the current reassignments for the given partitions
*
* <p>The following exceptions can be anticipated when calling {@code get()} on the futures obtained from
* the returned {@code ListPartitionReassignmentsResult}:</p>
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* If the authenticated user doesn't have alter access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException}
* If a given topic or partition does not exist.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the controller could list the current reassignments.</li>
* </ul>
*
* @param partitions The topic partitions to list reassignments for.
* @param options The options to use.
* @return The result.
*/
default ListPartitionReassignmentsResult listPartitionReassignments(
Set<TopicPartition> partitions,
ListPartitionReassignmentsOptions options) {
return listPartitionReassignments(Optional.of(partitions), options);
}
/**
* List all of the current partition reassignments
*
* <p>The following exceptions can be anticipated when calling {@code get()} on the futures obtained from
* the returned {@code ListPartitionReassignmentsResult}:</p>
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* If the authenticated user doesn't have alter access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException}
* If a given topic or partition does not exist.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the controller could list the current reassignments.</li>
* </ul>
*
* @param options The options to use.
* @return The result.
*/
default ListPartitionReassignmentsResult listPartitionReassignments(ListPartitionReassignmentsOptions options) {
return listPartitionReassignments(Optional.empty(), options);
}
/**
* @param partitions the partitions we want to get reassignment for, or an empty optional if we want to get the reassignments for all partitions in the cluster
* @param options The options to use.
* @return The result.
*/
ListPartitionReassignmentsResult listPartitionReassignments(Optional<Set<TopicPartition>> partitions,
ListPartitionReassignmentsOptions options);
/**
* Remove members from the consumer group by given member identities.
* <p>
* For possible error codes, refer to {@link LeaveGroupResponse}.
*
* @param groupId The ID of the group to remove member from.
* @param options The options to carry removing members' information.
* @return The MembershipChangeResult.
*/
RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(String groupId, RemoveMembersFromConsumerGroupOptions options);
/**
* <p>Alters offsets for the specified group. In order to succeed, the group must be empty.
*
* <p>This is a convenience method for {@link #alterConsumerGroupOffsets(String, Map, AlterConsumerGroupOffsetsOptions)} with default options.
* See the overload for more details.
*
* @param groupId The group for which to alter offsets.
* @param offsets A map of offsets by partition with associated metadata.
* @return The AlterOffsetsResult.
*/
default AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, Map<TopicPartition, OffsetAndMetadata> offsets) {
return alterConsumerGroupOffsets(groupId, offsets, new AlterConsumerGroupOffsetsOptions());
}
/**
* <p>Alters offsets for the specified group. In order to succeed, the group must be empty.
*
* <p>This operation is not transactional so it may succeed for some partitions while fail for others.
*
* @param groupId The group for which to alter offsets.
* @param offsets A map of offsets by partition with associated metadata. Partitions not specified in the map are ignored.
* @param options The options to use when altering the offsets.
* @return The AlterOffsetsResult.
*/
AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, Map<TopicPartition, OffsetAndMetadata> offsets, AlterConsumerGroupOffsetsOptions options);
/**
* <p>List offset for the specified partitions and OffsetSpec. This operation enables to find
* the beginning offset, end offset as well as the offset matching a timestamp in partitions.
*
* <p>This is a convenience method for {@link #listOffsets(Map, ListOffsetsOptions)}
*
* @param topicPartitionOffsets The mapping from partition to the OffsetSpec to look up.
* @return The ListOffsetsResult.
*/
default ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets) {
return listOffsets(topicPartitionOffsets, new ListOffsetsOptions());
}
/**
* <p>List offset for the specified partitions. This operation enables to find
* the beginning offset, end offset as well as the offset matching a timestamp in partitions.
*
* @param topicPartitionOffsets The mapping from partition to the OffsetSpec to look up.
* @param options The options to use when retrieving the offsets
* @return The ListOffsetsResult.
*/
ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options);
/**
* Describes all entities matching the provided filter that have at least one client quota configuration
* value defined.
* <p>
* This is a convenience method for {@link #describeClientQuotas(ClientQuotaFilter, DescribeClientQuotasOptions)}
* with default options. See the overload for more details.
* <p>
* This operation is supported by brokers with version 2.6.0 or higher.
*
* @param filter the filter to apply to match entities
* @return the DescribeClientQuotasResult containing the result
*/
default DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter) {
return describeClientQuotas(filter, new DescribeClientQuotasOptions());
}
/**
* Describes all entities matching the provided filter that have at least one client quota configuration
* value defined.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the future from the
* returned {@link DescribeClientQuotasResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* If the authenticated user didn't have describe access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.InvalidRequestException}
* If the request details are invalid. e.g., an invalid entity type was specified.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the describe could finish.</li>
* </ul>
* <p>
* This operation is supported by brokers with version 2.6.0 or higher.
*
* @param filter the filter to apply to match entities
* @param options the options to use
* @return the DescribeClientQuotasResult containing the result
*/
DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options);
/**
* Alters client quota configurations with the specified alterations.
* <p>
* This is a convenience method for {@link #alterClientQuotas(Collection, AlterClientQuotasOptions)}
* with default options. See the overload for more details.
* <p>
* This operation is supported by brokers with version 2.6.0 or higher.
*
* @param entries the alterations to perform
* @return the AlterClientQuotasResult containing the result
*/
default AlterClientQuotasResult alterClientQuotas(Collection<ClientQuotaAlteration> entries) {
return alterClientQuotas(entries, new AlterClientQuotasOptions());
}
/**
* Alters client quota configurations with the specified alterations.
* <p>
* Alterations for a single entity are atomic, but across entities is not guaranteed. The resulting
* per-entity error code should be evaluated to resolve the success or failure of all updates.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures obtained from
* the returned {@link AlterClientQuotasResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* If the authenticated user didn't have alter access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.InvalidRequestException}
* If the request details are invalid. e.g., a configuration key was specified more than once for an entity.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the alterations could finish. It cannot be guaranteed whether the update
* succeed or not.</li>
* </ul>
* <p>
* This operation is supported by brokers with version 2.6.0 or higher.
*
* @param entries the alterations to perform
* @return the AlterClientQuotasResult containing the result
*/
AlterClientQuotasResult alterClientQuotas(Collection<ClientQuotaAlteration> entries, AlterClientQuotasOptions options);
/**
* Describe all SASL/SCRAM credentials.
*
* <p>This is a convenience method for {@link #describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)}
*
* @return The DescribeUserScramCredentialsResult.
*/
default DescribeUserScramCredentialsResult describeUserScramCredentials() {
return describeUserScramCredentials(null, new DescribeUserScramCredentialsOptions());
}
/**
* Describe SASL/SCRAM credentials for the given users.
*
* <p>This is a convenience method for {@link #describeUserScramCredentials(List, DescribeUserScramCredentialsOptions)}
*
* @param users the users for which credentials are to be described; all users' credentials are described if null
* or empty.
* @return The DescribeUserScramCredentialsResult.
*/
default DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users) {
return describeUserScramCredentials(users, new DescribeUserScramCredentialsOptions());
}
/**
* Describe SASL/SCRAM credentials.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures from the
* returned {@link DescribeUserScramCredentialsResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* If the authenticated user didn't have describe access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.ResourceNotFoundException}
* If the user did not exist/had no SCRAM credentials.</li>
* <li>{@link org.apache.kafka.common.errors.DuplicateResourceException}
* If the user was requested to be described more than once in the original request.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the describe operation could finish.</li>
* </ul>
* <p>
* This operation is supported by brokers with version 2.7.0 or higher.
*
* @param users the users for which credentials are to be described; all users' credentials are described if null
* or empty.
* @param options The options to use when describing the credentials
* @return The DescribeUserScramCredentialsResult.
*/
DescribeUserScramCredentialsResult describeUserScramCredentials(List<String> users, DescribeUserScramCredentialsOptions options);
/**
* Alter SASL/SCRAM credentials for the given users.
*
* <p>This is a convenience method for {@link #alterUserScramCredentials(List, AlterUserScramCredentialsOptions)}
*
* @param alterations the alterations to be applied
* @return The AlterUserScramCredentialsResult.
*/
default AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations) {
return alterUserScramCredentials(alterations, new AlterUserScramCredentialsOptions());
}
/**
* Alter SASL/SCRAM credentials.
*
* <p>
* The following exceptions can be anticipated when calling {@code get()} any of the futures from the
* returned {@link AlterUserScramCredentialsResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.NotControllerException}
* If the request is not sent to the Controller broker.</li>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* If the authenticated user didn't have alter access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.UnsupportedByAuthenticationException}
* If the user authenticated with a delegation token.</li>
* <li>{@link org.apache.kafka.common.errors.UnsupportedSaslMechanismException}
* If the requested SCRAM mechanism is unrecognized or otherwise unsupported.</li>
* <li>{@link org.apache.kafka.common.errors.UnacceptableCredentialException}
* If the username is empty or the requested number of iterations is too small or too large.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the describe could finish.</li>
* </ul>
* <p>
* This operation is supported by brokers with version 2.7.0 or higher.
*
* @param alterations the alterations to be applied
* @param options The options to use when altering the credentials
* @return The AlterUserScramCredentialsResult.
*/
AlterUserScramCredentialsResult alterUserScramCredentials(List<UserScramCredentialAlteration> alterations,
AlterUserScramCredentialsOptions options);
/**
* Describes finalized as well as supported features.
* <p>
* This is a convenience method for {@link #describeFeatures(DescribeFeaturesOptions)} with default options.
* See the overload for more details.
*
* @return the {@link DescribeFeaturesResult} containing the result
*/
default DescribeFeaturesResult describeFeatures() {
return describeFeatures(new DescribeFeaturesOptions());
}
/**
* Describes finalized as well as supported features. The request is issued to any random
* broker.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the future from the
* returned {@link DescribeFeaturesResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the describe operation could finish.</li>
* </ul>
* <p>
*
* @param options the options to use
* @return the {@link DescribeFeaturesResult} containing the result
*/
DescribeFeaturesResult describeFeatures(DescribeFeaturesOptions options);
/**
* Applies specified updates to finalized features. This operation is not transactional so some
* updates may succeed while the rest may fail.
* <p>
* The API takes in a map of finalized feature names to {@link FeatureUpdate} that needs to be
* applied. Each entry in the map specifies the finalized feature to be added or updated or
* deleted, along with the new max feature version level value. This request is issued only to
* the controller since the API is only served by the controller. The return value contains an
* error code for each supplied {@link FeatureUpdate}, and the code indicates if the update
* succeeded or failed in the controller.
* <ul>
* <li>Downgrade of feature version level is not a regular operation/intent. It is only allowed
* in the controller if the {@link FeatureUpdate} has the allowDowngrade flag set. Setting this
* flag conveys user intent to attempt downgrade of a feature max version level. Note that
* despite the allowDowngrade flag being set, certain downgrades may be rejected by the
* controller if it is deemed impossible.</li>
* <li>Deletion of a finalized feature version is not a regular operation/intent. It could be
* done by setting the allowDowngrade flag to true in the {@link FeatureUpdate}, and, setting
* the max version level to a value less than 1.</li>
* </ul>
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures
* obtained from the returned {@link UpdateFeaturesResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* If the authenticated user didn't have alter access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.InvalidRequestException}
* If the request details are invalid. e.g., a non-existing finalized feature is attempted
* to be deleted or downgraded.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the updates could finish. It cannot be guaranteed whether
* the updates succeeded or not.</li>
* <li>{@link FeatureUpdateFailedException}
* This means there was an unexpected error encountered when the update was applied on
* the controller. There is no guarantee on whether the update succeeded or failed. The best
* way to find out is to issue a {@link Admin#describeFeatures(DescribeFeaturesOptions)}
* request.</li>
* </ul>
* <p>
* This operation is supported by brokers with version 2.7.0 or higher.
* @param featureUpdates the map of finalized feature name to {@link FeatureUpdate}
* @param options the options to use
* @return the {@link UpdateFeaturesResult} containing the result
*/
UpdateFeaturesResult updateFeatures(Map<String, FeatureUpdate> featureUpdates, UpdateFeaturesOptions options);
/**
* Describes the state of the metadata quorum.
* <p>
* This is a convenience method for {@link #describeMetadataQuorum(DescribeMetadataQuorumOptions)} with default options.
* See the overload for more details.
*
* @return the {@link DescribeMetadataQuorumResult} containing the result
*/
default DescribeMetadataQuorumResult describeMetadataQuorum() {
return describeMetadataQuorum(new DescribeMetadataQuorumOptions());
}
/**
* Describes the state of the metadata quorum.
* <p>
* The following exceptions can be anticipated when calling {@code get()} on the futures obtained from
* the returned {@code DescribeMetadataQuorumResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.ClusterAuthorizationException}
* If the authenticated user didn't have {@code DESCRIBE} access to the cluster.</li>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the controller could list the cluster links.</li>
* </ul>
*
* @param options The {@link DescribeMetadataQuorumOptions} to use when describing the quorum.
* @return the {@link DescribeMetadataQuorumResult} containing the result
*/
DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuorumOptions options);
/**
* Unregister a broker.
* <p>
* This operation does not have any effect on partition assignments. It is supported
* only on Kafka clusters which use Raft to store metadata, rather than ZooKeeper.
*
* This is a convenience method for {@link #unregisterBroker(int, UnregisterBrokerOptions)}
*
* @param brokerId the broker id to unregister.
*
* @return the {@link UnregisterBrokerResult} containing the result
*/
@InterfaceStability.Unstable
default UnregisterBrokerResult unregisterBroker(int brokerId) {
return unregisterBroker(brokerId, new UnregisterBrokerOptions());
}
/**
* Unregister a broker.
* <p>
* This operation does not have any effect on partition assignments. It is supported
* only on Kafka clusters which use Raft to store metadata, rather than ZooKeeper.
*
* The following exceptions can be anticipated when calling {@code get()} on the future from the
* returned {@link UnregisterBrokerResult}:
* <ul>
* <li>{@link org.apache.kafka.common.errors.TimeoutException}
* If the request timed out before the describe operation could finish.</li>
* <li>{@link org.apache.kafka.common.errors.UnsupportedVersionException}
* If the software is too old to support the unregistration API, or if the
* cluster is not using Raft to store metadata.
* </ul>
* <p>
*
* @param brokerId the broker id to unregister.
* @param options the options to use.
*
* @return the {@link UnregisterBrokerResult} containing the result
*/
@InterfaceStability.Unstable
UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options);
/**
* Describe producer state on a set of topic partitions. See
* {@link #describeProducers(Collection, DescribeProducersOptions)} for more details.
*
* @param partitions The set of partitions to query
* @return The result
*/
default DescribeProducersResult describeProducers(Collection<TopicPartition> partitions) {
return describeProducers(partitions, new DescribeProducersOptions());
}
/**
* Describe active producer state on a set of topic partitions. Unless a specific broker
* is requested through {@link DescribeProducersOptions#brokerId(int)}, this will
* query the partition leader to find the producer state.
*
* @param partitions The set of partitions to query
* @param options Options to control the method behavior
* @return The result
*/
DescribeProducersResult describeProducers(Collection<TopicPartition> partitions, DescribeProducersOptions options);
/**
* Describe the state of a set of transactional IDs. See
* {@link #describeTransactions(Collection, DescribeTransactionsOptions)} for more details.
*
* @param transactionalIds The set of transactional IDs to query
* @return The result
*/
default DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds) {
return describeTransactions(transactionalIds, new DescribeTransactionsOptions());
}
/**
* Describe the state of a set of transactional IDs from the respective transaction coordinators,
* which are dynamically discovered.
*
* @param transactionalIds The set of transactional IDs to query
* @param options Options to control the method behavior
* @return The result
*/
DescribeTransactionsResult describeTransactions(Collection<String> transactionalIds, DescribeTransactionsOptions options);
/**
* Forcefully abort a transaction which is open on a topic partition. See
* {@link #abortTransaction(AbortTransactionSpec, AbortTransactionOptions)} for more details.
*
* @param spec The transaction specification including topic partition and producer details
* @return The result
*/
default AbortTransactionResult abortTransaction(AbortTransactionSpec spec) {
return abortTransaction(spec, new AbortTransactionOptions());
}
/**
* Forcefully abort a transaction which is open on a topic partition. This will
* send a `WriteTxnMarkers` request to the partition leader in order to abort the
* transaction. This requires administrative privileges.
*
* @param spec The transaction specification including topic partition and producer details
* @param options Options to control the method behavior (including filters)
* @return The result
*/
AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortTransactionOptions options);
/**
* List active transactions in the cluster. See
* {@link #listTransactions(ListTransactionsOptions)} for more details.
*
* @return The result
*/
default ListTransactionsResult listTransactions() {
return listTransactions(new ListTransactionsOptions());
}
/**
* List active transactions in the cluster. This will query all potential transaction
* coordinators in the cluster and collect the state of all transactions. Users
* should typically attempt to reduce the size of the result set using
* {@link ListTransactionsOptions#filterProducerIds(Collection)} or
* {@link ListTransactionsOptions#filterStates(Collection)}
*
* @param options Options to control the method behavior (including filters)
* @return The result
*/
ListTransactionsResult listTransactions(ListTransactionsOptions options);
/**
* Fence out all active producers that use any of the provided transactional IDs, with the default options.
* <p>
* This is a convenience method for {@link #fenceProducers(Collection, FenceProducersOptions)}
* with default options. See the overload for more details.
*
* @param transactionalIds The IDs of the producers to fence.
* @return The FenceProducersResult.
*/
default FenceProducersResult fenceProducers(Collection<String> transactionalIds) {
return fenceProducers(transactionalIds, new FenceProducersOptions());
}
/**
* Fence out all active producers that use any of the provided transactional IDs.
*
* @param transactionalIds The IDs of the producers to fence.
* @param options The options to use when fencing the producers.
* @return The FenceProducersResult.
*/
FenceProducersResult fenceProducers(Collection<String> transactionalIds,
FenceProducersOptions options);
/**
* Get the metrics kept by the adminClient
*/
Map<MetricName, ? extends Metric> metrics();
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AdminClient.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Map;
import java.util.Properties;
/**
* The base class for in-built admin clients.
*
* Client code should use the newer {@link Admin} interface in preference to this class.
*
* This class may be removed in a later release, but has not be marked as deprecated to avoid unnecessary noise.
*/
public abstract class AdminClient implements Admin {
/**
* Create a new Admin with the given configuration.
*
* @param props The configuration.
* @return The new KafkaAdminClient.
*/
public static AdminClient create(Properties props) {
return (AdminClient) Admin.create(props);
}
/**
* Create a new Admin with the given configuration.
*
* @param conf The configuration.
* @return The new KafkaAdminClient.
*/
public static AdminClient create(Map<String, Object> conf) {
return (AdminClient) Admin.create(conf);
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AdminClientConfig.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.clients.ClientDnsLookup;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.common.config.AbstractConfig;
import org.apache.kafka.common.config.ConfigDef;
import org.apache.kafka.common.config.ConfigDef.Importance;
import org.apache.kafka.common.config.ConfigDef.Type;
import org.apache.kafka.common.config.SecurityConfig;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.utils.Utils;
import java.util.Map;
import java.util.Set;
import static org.apache.kafka.common.config.ConfigDef.Range.atLeast;
import static org.apache.kafka.common.config.ConfigDef.Range.between;
import static org.apache.kafka.common.config.ConfigDef.ValidString.in;
/**
* The AdminClient configuration class, which also contains constants for configuration entry names.
*/
public class AdminClientConfig extends AbstractConfig {
private static final ConfigDef CONFIG;
/**
* <code>bootstrap.servers</code>
*/
public static final String BOOTSTRAP_SERVERS_CONFIG = CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG;
private static final String BOOTSTRAP_SERVERS_DOC = CommonClientConfigs.BOOTSTRAP_SERVERS_DOC;
/**
* <code>client.dns.lookup</code>
*/
public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG;
private static final String CLIENT_DNS_LOOKUP_DOC = CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC;
/**
* <code>reconnect.backoff.ms</code>
*/
public static final String RECONNECT_BACKOFF_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG;
private static final String RECONNECT_BACKOFF_MS_DOC = CommonClientConfigs.RECONNECT_BACKOFF_MS_DOC;
/**
* <code>reconnect.backoff.max.ms</code>
*/
public static final String RECONNECT_BACKOFF_MAX_MS_CONFIG = CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG;
private static final String RECONNECT_BACKOFF_MAX_MS_DOC = CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_DOC;
/**
* <code>retry.backoff.ms</code>
*/
public static final String RETRY_BACKOFF_MS_CONFIG = CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG;
private static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to " +
"retry a failed request. This avoids repeatedly sending requests in a tight loop under " +
"some failure scenarios.";
/** <code>socket.connection.setup.timeout.ms</code> */
public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG;
/** <code>socket.connection.setup.timeout.max.ms</code> */
public static final String SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG = CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG;
/** <code>connections.max.idle.ms</code> */
public static final String CONNECTIONS_MAX_IDLE_MS_CONFIG = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG;
private static final String CONNECTIONS_MAX_IDLE_MS_DOC = CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC;
/** <code>request.timeout.ms</code> */
public static final String REQUEST_TIMEOUT_MS_CONFIG = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG;
private static final String REQUEST_TIMEOUT_MS_DOC = CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC;
public static final String CLIENT_ID_CONFIG = CommonClientConfigs.CLIENT_ID_CONFIG;
private static final String CLIENT_ID_DOC = CommonClientConfigs.CLIENT_ID_DOC;
public static final String METADATA_MAX_AGE_CONFIG = CommonClientConfigs.METADATA_MAX_AGE_CONFIG;
private static final String METADATA_MAX_AGE_DOC = CommonClientConfigs.METADATA_MAX_AGE_DOC;
public static final String SEND_BUFFER_CONFIG = CommonClientConfigs.SEND_BUFFER_CONFIG;
private static final String SEND_BUFFER_DOC = CommonClientConfigs.SEND_BUFFER_DOC;
public static final String RECEIVE_BUFFER_CONFIG = CommonClientConfigs.RECEIVE_BUFFER_CONFIG;
private static final String RECEIVE_BUFFER_DOC = CommonClientConfigs.RECEIVE_BUFFER_DOC;
public static final String METRIC_REPORTER_CLASSES_CONFIG = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG;
private static final String METRIC_REPORTER_CLASSES_DOC = CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC;
@Deprecated
public static final String AUTO_INCLUDE_JMX_REPORTER_CONFIG = CommonClientConfigs.AUTO_INCLUDE_JMX_REPORTER_CONFIG;
public static final String AUTO_INCLUDE_JMX_REPORTER_DOC = CommonClientConfigs.AUTO_INCLUDE_JMX_REPORTER_DOC;
public static final String METRICS_NUM_SAMPLES_CONFIG = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG;
private static final String METRICS_NUM_SAMPLES_DOC = CommonClientConfigs.METRICS_NUM_SAMPLES_DOC;
public static final String METRICS_SAMPLE_WINDOW_MS_CONFIG = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG;
private static final String METRICS_SAMPLE_WINDOW_MS_DOC = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC;
public static final String METRICS_RECORDING_LEVEL_CONFIG = CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG;
public static final String SECURITY_PROTOCOL_CONFIG = CommonClientConfigs.SECURITY_PROTOCOL_CONFIG;
public static final String DEFAULT_SECURITY_PROTOCOL = CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL;
private static final String SECURITY_PROTOCOL_DOC = CommonClientConfigs.SECURITY_PROTOCOL_DOC;
private static final String METRICS_RECORDING_LEVEL_DOC = CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC;
public static final String RETRIES_CONFIG = CommonClientConfigs.RETRIES_CONFIG;
public static final String DEFAULT_API_TIMEOUT_MS_CONFIG = CommonClientConfigs.DEFAULT_API_TIMEOUT_MS_CONFIG;
/**
* <code>security.providers</code>
*/
public static final String SECURITY_PROVIDERS_CONFIG = SecurityConfig.SECURITY_PROVIDERS_CONFIG;
private static final String SECURITY_PROVIDERS_DOC = SecurityConfig.SECURITY_PROVIDERS_DOC;
static {
CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG,
Type.LIST,
Importance.HIGH,
BOOTSTRAP_SERVERS_DOC)
.define(CLIENT_ID_CONFIG, Type.STRING, "", Importance.MEDIUM, CLIENT_ID_DOC)
.define(METADATA_MAX_AGE_CONFIG, Type.LONG, 5 * 60 * 1000, atLeast(0), Importance.LOW, METADATA_MAX_AGE_DOC)
.define(SEND_BUFFER_CONFIG, Type.INT, 128 * 1024, atLeast(CommonClientConfigs.SEND_BUFFER_LOWER_BOUND), Importance.MEDIUM, SEND_BUFFER_DOC)
.define(RECEIVE_BUFFER_CONFIG, Type.INT, 64 * 1024, atLeast(CommonClientConfigs.RECEIVE_BUFFER_LOWER_BOUND), Importance.MEDIUM, RECEIVE_BUFFER_DOC)
.define(RECONNECT_BACKOFF_MS_CONFIG,
Type.LONG,
50L,
atLeast(0L),
Importance.LOW,
RECONNECT_BACKOFF_MS_DOC)
.define(RECONNECT_BACKOFF_MAX_MS_CONFIG,
Type.LONG,
1000L,
atLeast(0L),
Importance.LOW,
RECONNECT_BACKOFF_MAX_MS_DOC)
.define(RETRY_BACKOFF_MS_CONFIG,
Type.LONG,
100L,
atLeast(0L),
Importance.LOW,
RETRY_BACKOFF_MS_DOC)
.define(REQUEST_TIMEOUT_MS_CONFIG,
Type.INT,
30000,
atLeast(0),
Importance.MEDIUM,
REQUEST_TIMEOUT_MS_DOC)
.define(SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG,
Type.LONG,
CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MS,
Importance.MEDIUM,
CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_DOC)
.define(SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG,
Type.LONG,
CommonClientConfigs.DEFAULT_SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS,
Importance.MEDIUM,
CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_DOC)
.define(CONNECTIONS_MAX_IDLE_MS_CONFIG,
Type.LONG,
5 * 60 * 1000,
Importance.MEDIUM,
CONNECTIONS_MAX_IDLE_MS_DOC)
.define(RETRIES_CONFIG,
Type.INT,
Integer.MAX_VALUE,
between(0, Integer.MAX_VALUE),
Importance.LOW,
CommonClientConfigs.RETRIES_DOC)
.define(DEFAULT_API_TIMEOUT_MS_CONFIG,
Type.INT,
60000,
atLeast(0),
Importance.MEDIUM,
CommonClientConfigs.DEFAULT_API_TIMEOUT_MS_DOC)
.define(METRICS_SAMPLE_WINDOW_MS_CONFIG,
Type.LONG,
30000,
atLeast(0),
Importance.LOW,
METRICS_SAMPLE_WINDOW_MS_DOC)
.define(METRICS_NUM_SAMPLES_CONFIG, Type.INT, 2, atLeast(1), Importance.LOW, METRICS_NUM_SAMPLES_DOC)
.define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, "", Importance.LOW, METRIC_REPORTER_CLASSES_DOC)
.define(METRICS_RECORDING_LEVEL_CONFIG,
Type.STRING,
Sensor.RecordingLevel.INFO.toString(),
in(Sensor.RecordingLevel.INFO.toString(), Sensor.RecordingLevel.DEBUG.toString(), Sensor.RecordingLevel.TRACE.toString()),
Importance.LOW,
METRICS_RECORDING_LEVEL_DOC)
.define(AUTO_INCLUDE_JMX_REPORTER_CONFIG,
Type.BOOLEAN,
true,
Importance.LOW,
AUTO_INCLUDE_JMX_REPORTER_DOC)
.define(CLIENT_DNS_LOOKUP_CONFIG,
Type.STRING,
ClientDnsLookup.USE_ALL_DNS_IPS.toString(),
in(ClientDnsLookup.USE_ALL_DNS_IPS.toString(),
ClientDnsLookup.RESOLVE_CANONICAL_BOOTSTRAP_SERVERS_ONLY.toString()),
Importance.MEDIUM,
CLIENT_DNS_LOOKUP_DOC)
// security support
.define(SECURITY_PROVIDERS_CONFIG,
Type.STRING,
null,
Importance.LOW,
SECURITY_PROVIDERS_DOC)
.define(SECURITY_PROTOCOL_CONFIG,
Type.STRING,
DEFAULT_SECURITY_PROTOCOL,
ConfigDef.CaseInsensitiveValidString
.in(Utils.enumOptions(SecurityProtocol.class)),
Importance.MEDIUM,
SECURITY_PROTOCOL_DOC)
.withClientSslSupport()
.withClientSaslSupport();
}
@Override
protected Map<String, Object> postProcessParsedConfig(final Map<String, Object> parsedValues) {
CommonClientConfigs.postValidateSaslMechanismConfig(this);
return CommonClientConfigs.postProcessReconnectBackoffConfigs(this, parsedValues);
}
public AdminClientConfig(Map<?, ?> props) {
this(props, false);
}
protected AdminClientConfig(Map<?, ?> props, boolean doLog) {
super(CONFIG, props, doLog, "admin");
}
public static Set<String> configNames() {
return CONFIG.names();
}
public static ConfigDef configDef() {
return new ConfigDef(CONFIG);
}
public static void main(String[] args) {
System.out.println(CONFIG.toHtml(4, config -> "adminclientconfigs_" + config));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterClientQuotasOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#alterClientQuotas(Collection, AlterClientQuotasOptions)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class AlterClientQuotasOptions extends AbstractOptions<AlterClientQuotasOptions> {
private boolean validateOnly = false;
/**
* Returns whether the request should be validated without altering the configs.
*/
public boolean validateOnly() {
return this.validateOnly;
}
/**
* Sets whether the request should be validated without altering the configs.
*/
public AlterClientQuotasOptions validateOnly(boolean validateOnly) {
this.validateOnly = validateOnly;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterClientQuotasResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.quota.ClientQuotaEntity;
import java.util.Collection;
import java.util.Map;
/**
* The result of the {@link Admin#alterClientQuotas(Collection, AlterClientQuotasOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class AlterClientQuotasResult {
private final Map<ClientQuotaEntity, KafkaFuture<Void>> futures;
/**
* Maps an entity to its alteration result.
*
* @param futures maps entity to its alteration result
*/
public AlterClientQuotasResult(Map<ClientQuotaEntity, KafkaFuture<Void>> futures) {
this.futures = futures;
}
/**
* Returns a map from quota entity to a future which can be used to check the status of the operation.
*/
public Map<ClientQuotaEntity, KafkaFuture<Void>> values() {
return futures;
}
/**
* Returns a future which succeeds only if all quota alterations succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterConfigOp.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
import java.util.function.Function;
import java.util.stream.Collectors;
/**
* A class representing a alter configuration entry containing name, value and operation type.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class AlterConfigOp {
public enum OpType {
/**
* Set the value of the configuration entry.
*/
SET((byte) 0),
/**
* Revert the configuration entry to the default value (possibly null).
*/
DELETE((byte) 1),
/**
* (For list-type configuration entries only.) Add the specified values to the
* current value of the configuration entry. If the configuration value has not been set,
* adds to the default value.
*/
APPEND((byte) 2),
/**
* (For list-type configuration entries only.) Removes the specified values from the current
* value of the configuration entry. It is legal to remove values that are not currently in the
* configuration entry. Removing all entries from the current configuration value leaves an empty
* list and does NOT revert to the default value of the entry.
*/
SUBTRACT((byte) 3);
private static final Map<Byte, OpType> OP_TYPES = Collections.unmodifiableMap(
Arrays.stream(values()).collect(Collectors.toMap(OpType::id, Function.identity()))
);
private final byte id;
OpType(final byte id) {
this.id = id;
}
public byte id() {
return id;
}
public static OpType forId(final byte id) {
return OP_TYPES.get(id);
}
}
private final ConfigEntry configEntry;
private final OpType opType;
public AlterConfigOp(ConfigEntry configEntry, OpType operationType) {
this.configEntry = configEntry;
this.opType = operationType;
}
public ConfigEntry configEntry() {
return configEntry;
}
public OpType opType() {
return opType;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final AlterConfigOp that = (AlterConfigOp) o;
return opType == that.opType &&
Objects.equals(configEntry, that.configEntry);
}
@Override
public int hashCode() {
return Objects.hash(opType, configEntry);
}
@Override
public String toString() {
return "AlterConfigOp{" +
"opType=" + opType +
", configEntry=" + configEntry +
'}';
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterConfigsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* Options for {@link Admin#incrementalAlterConfigs(Map)} and {@link Admin#alterConfigs(Map)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class AlterConfigsOptions extends AbstractOptions<AlterConfigsOptions> {
private boolean validateOnly = false;
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public AlterConfigsOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
/**
* Return true if the request should be validated without altering the configs.
*/
public boolean shouldValidateOnly() {
return validateOnly;
}
/**
* Set to true if the request should be validated without altering the configs.
*/
public AlterConfigsOptions validateOnly(boolean validateOnly) {
this.validateOnly = validateOnly;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterConfigsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.config.ConfigResource;
import java.util.Map;
/**
* The result of the {@link Admin#alterConfigs(Map)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class AlterConfigsResult {
private final Map<ConfigResource, KafkaFuture<Void>> futures;
AlterConfigsResult(Map<ConfigResource, KafkaFuture<Void>> futures) {
this.futures = futures;
}
/**
* Return a map from resources to futures which can be used to check the status of the operation on each resource.
*/
public Map<ConfigResource, KafkaFuture<Void>> values() {
return futures;
}
/**
* Return a future which succeeds only if all the alter configs operations succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* Options for the {@link AdminClient#alterConsumerGroupOffsets(String, Map, AlterConsumerGroupOffsetsOptions)} call.
*
* The API of this class is evolving, see {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class AlterConsumerGroupOffsetsOptions extends AbstractOptions<AlterConsumerGroupOffsetsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.internals.KafkaFutureImpl;
import org.apache.kafka.common.protocol.Errors;
/**
* The result of the {@link AdminClient#alterConsumerGroupOffsets(String, Map)} call.
*
* The API of this class is evolving, see {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class AlterConsumerGroupOffsetsResult {
private final KafkaFuture<Map<TopicPartition, Errors>> future;
AlterConsumerGroupOffsetsResult(KafkaFuture<Map<TopicPartition, Errors>> future) {
this.future = future;
}
/**
* Return a future which can be used to check the result for a given partition.
*/
public KafkaFuture<Void> partitionResult(final TopicPartition partition) {
final KafkaFutureImpl<Void> result = new KafkaFutureImpl<>();
this.future.whenComplete((topicPartitions, throwable) -> {
if (throwable != null) {
result.completeExceptionally(throwable);
} else if (!topicPartitions.containsKey(partition)) {
result.completeExceptionally(new IllegalArgumentException(
"Alter offset for partition \"" + partition + "\" was not attempted"));
} else {
final Errors error = topicPartitions.get(partition);
if (error == Errors.NONE) {
result.complete(null);
} else {
result.completeExceptionally(error.exception());
}
}
});
return result;
}
/**
* Return a future which succeeds if all the alter offsets succeed.
*/
public KafkaFuture<Void> all() {
return this.future.thenApply(topicPartitionErrorsMap -> {
List<TopicPartition> partitionsFailed = topicPartitionErrorsMap.entrySet()
.stream()
.filter(e -> e.getValue() != Errors.NONE)
.map(Map.Entry::getKey)
.collect(Collectors.toList());
for (Errors error : topicPartitionErrorsMap.values()) {
if (error != Errors.NONE) {
throw error.exception(
"Failed altering consumer group offsets for the following partitions: " + partitionsFailed);
}
}
return null;
});
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterPartitionReassignmentsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* Options for {@link AdminClient#alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions)}
*
* The API of this class is evolving. See {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class AlterPartitionReassignmentsOptions extends AbstractOptions<AlterPartitionReassignmentsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterPartitionReassignmentsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* The result of {@link AdminClient#alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions)}.
*
* The API of this class is evolving. See {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class AlterPartitionReassignmentsResult {
private final Map<TopicPartition, KafkaFuture<Void>> futures;
AlterPartitionReassignmentsResult(Map<TopicPartition, KafkaFuture<Void>> futures) {
this.futures = futures;
}
/**
* Return a map from partitions to futures which can be used to check the status of the reassignment.
*
* Possible error codes:
*
* INVALID_REPLICA_ASSIGNMENT (39) - if the specified replica assignment was not valid -- for example, if it included negative numbers, repeated numbers, or specified a broker ID that the controller was not aware of.
* NO_REASSIGNMENT_IN_PROGRESS (85) - if the request wants to cancel reassignments but none exist
* UNKNOWN (-1)
*
*/
public Map<TopicPartition, KafkaFuture<Void>> values() {
return futures;
}
/**
* Return a future which succeeds only if all the reassignments were successfully initiated.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterReplicaLogDirsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Map;
/**
* Options for {@link Admin#alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)}.
*/
@InterfaceStability.Evolving
public class AlterReplicaLogDirsOptions extends AbstractOptions<AlterReplicaLogDirsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterReplicaLogDirsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Map;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.TopicPartitionReplica;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.errors.ClusterAuthorizationException;
import org.apache.kafka.common.errors.InvalidTopicException;
import org.apache.kafka.common.errors.KafkaStorageException;
import org.apache.kafka.common.errors.LogDirNotFoundException;
import org.apache.kafka.common.errors.ReplicaNotAvailableException;
import org.apache.kafka.common.errors.UnknownServerException;
/**
* The result of {@link Admin#alterReplicaLogDirs(Map, AlterReplicaLogDirsOptions)}.
*
* To retrieve the detailed result per specified {@link TopicPartitionReplica}, use {@link #values()}. To retrieve the
* overall result only, use {@link #all()}.
*/
@InterfaceStability.Evolving
public class AlterReplicaLogDirsResult {
private final Map<TopicPartitionReplica, KafkaFuture<Void>> futures;
AlterReplicaLogDirsResult(Map<TopicPartitionReplica, KafkaFuture<Void>> futures) {
this.futures = futures;
}
/**
* Return a map from {@link TopicPartitionReplica} to {@link KafkaFuture} which holds the status of individual
* replica movement.
*
* To check the result of individual replica movement, call {@link KafkaFuture#get()} from the value contained
* in the returned map. If there is no error, it will return silently; if not, an {@link Exception} will be thrown
* like the following:
*
* <ul>
* <li>{@link CancellationException}: The task was canceled.</li>
* <li>{@link InterruptedException}: Interrupted while joining I/O thread.</li>
* <li>{@link ExecutionException}: Execution failed with the following causes:</li>
* <ul>
* <li>{@link ClusterAuthorizationException}: Authorization failed. (CLUSTER_AUTHORIZATION_FAILED, 31)</li>
* <li>{@link InvalidTopicException}: The specified topic name is too long. (INVALID_TOPIC_EXCEPTION, 17)</li>
* <li>{@link LogDirNotFoundException}: The specified log directory is not found in the broker. (LOG_DIR_NOT_FOUND, 57)</li>
* <li>{@link ReplicaNotAvailableException}: The replica does not exist on the broker. (REPLICA_NOT_AVAILABLE, 9)</li>
* <li>{@link KafkaStorageException}: Disk error occurred. (KAFKA_STORAGE_ERROR, 56)</li>
* <li>{@link UnknownServerException}: Unknown. (UNKNOWN_SERVER_ERROR, -1)</li>
* </ul>
* </ul>
*/
public Map<TopicPartitionReplica, KafkaFuture<Void>> values() {
return futures;
}
/**
* Return a {@link KafkaFuture} which succeeds on {@link KafkaFuture#get()} if all the replica movement have succeeded.
* if not, it throws an {@link Exception} described in {@link #values()} method.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterUserScramCredentialsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.List;
/**
* Options for {@link AdminClient#alterUserScramCredentials(List, AlterUserScramCredentialsOptions)}
*
* The API of this class is evolving. See {@link AdminClient} for details.
*/
@InterfaceStability.Evolving
public class AlterUserScramCredentialsOptions extends AbstractOptions<AlterUserScramCredentialsOptions> {
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/AlterUserScramCredentialsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
/**
* The result of the {@link Admin#alterUserScramCredentials(List)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class AlterUserScramCredentialsResult {
private final Map<String, KafkaFuture<Void>> futures;
/**
*
* @param futures the required map from user names to futures representing the results of the alteration(s)
* for each user
*/
public AlterUserScramCredentialsResult(Map<String, KafkaFuture<Void>> futures) {
this.futures = Collections.unmodifiableMap(Objects.requireNonNull(futures));
}
/**
* Return a map from user names to futures, which can be used to check the status of the alteration(s)
* for each user.
*/
public Map<String, KafkaFuture<Void>> values() {
return this.futures;
}
/**
* Return a future which succeeds only if all the user SCRAM credential alterations succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/Config.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* A configuration object containing the configuration entries for a resource.
* <p>
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class Config {
private final Map<String, ConfigEntry> entries = new HashMap<>();
/**
* Create a configuration instance with the provided entries.
*/
public Config(Collection<ConfigEntry> entries) {
for (ConfigEntry entry : entries) {
this.entries.put(entry.name(), entry);
}
}
/**
* Configuration entries for a resource.
*/
public Collection<ConfigEntry> entries() {
return Collections.unmodifiableCollection(entries.values());
}
/**
* Get the configuration entry with the provided name or null if there isn't one.
*/
public ConfigEntry get(String name) {
return entries.get(name);
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
Config config = (Config) o;
return entries.equals(config.entries);
}
@Override
public int hashCode() {
return entries.hashCode();
}
@Override
public String toString() {
return "Config(entries=" + entries.values() + ")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ConfigEntry.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
/**
* A class representing a configuration entry containing name, value and additional metadata.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class ConfigEntry {
private final String name;
private final String value;
private final ConfigSource source;
private final boolean isSensitive;
private final boolean isReadOnly;
private final List<ConfigSynonym> synonyms;
private final ConfigType type;
private final String documentation;
/**
* Create a configuration entry with the provided values.
*
* @param name the non-null config name
* @param value the config value or null
*/
public ConfigEntry(String name, String value) {
this(name, value, ConfigSource.UNKNOWN, false, false,
Collections.emptyList(), ConfigType.UNKNOWN, null);
}
/**
* Create a configuration with the provided values.
*
* @param name the non-null config name
* @param value the config value or null
* @param source the source of this config entry
* @param isSensitive whether the config value is sensitive, the broker never returns the value if it is sensitive
* @param isReadOnly whether the config is read-only and cannot be updated
* @param synonyms Synonym configs in order of precedence
*/
public ConfigEntry(String name,
String value,
ConfigSource source,
boolean isSensitive,
boolean isReadOnly,
List<ConfigSynonym> synonyms,
ConfigType type,
String documentation) {
Objects.requireNonNull(name, "name should not be null");
this.name = name;
this.value = value;
this.source = source;
this.isSensitive = isSensitive;
this.isReadOnly = isReadOnly;
this.synonyms = synonyms;
this.type = type;
this.documentation = documentation;
}
/**
* Return the config name.
*/
public String name() {
return name;
}
/**
* Return the value or null. Null is returned if the config is unset or if isSensitive is true.
*/
public String value() {
return value;
}
/**
* Return the source of this configuration entry.
*/
public ConfigSource source() {
return source;
}
/**
* Return whether the config value is the default or if it's been explicitly set.
*/
public boolean isDefault() {
return source == ConfigSource.DEFAULT_CONFIG;
}
/**
* Return whether the config value is sensitive. The value is always set to null by the broker if the config value
* is sensitive.
*/
public boolean isSensitive() {
return isSensitive;
}
/**
* Return whether the config is read-only and cannot be updated.
*/
public boolean isReadOnly() {
return isReadOnly;
}
/**
* Returns all config values that may be used as the value of this config along with their source,
* in the order of precedence. The list starts with the value returned in this ConfigEntry.
* The list is empty if synonyms were not requested using {@link DescribeConfigsOptions#includeSynonyms(boolean)}
*/
public List<ConfigSynonym> synonyms() {
return synonyms;
}
/**
* Return the config data type.
*/
public ConfigType type() {
return type;
}
/**
* Return the config documentation.
*/
public String documentation() {
return documentation;
}
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
ConfigEntry that = (ConfigEntry) o;
return this.name.equals(that.name) &&
Objects.equals(this.value, that.value) &&
this.isSensitive == that.isSensitive &&
this.isReadOnly == that.isReadOnly &&
Objects.equals(this.source, that.source) &&
Objects.equals(this.synonyms, that.synonyms) &&
Objects.equals(this.type, that.type) &&
Objects.equals(this.documentation, that.documentation);
}
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
result = prime * result + name.hashCode();
result = prime * result + Objects.hashCode(value);
result = prime * result + (isSensitive ? 1 : 0);
result = prime * result + (isReadOnly ? 1 : 0);
result = prime * result + Objects.hashCode(source);
result = prime * result + Objects.hashCode(synonyms);
result = prime * result + Objects.hashCode(type);
result = prime * result + Objects.hashCode(documentation);
return result;
}
/**
* Override toString to redact sensitive value.
* WARNING, user should be responsible to set the correct "isSensitive" field for each config entry.
*/
@Override
public String toString() {
return "ConfigEntry(" +
"name=" + name +
", value=" + (isSensitive ? "Redacted" : value) +
", source=" + source +
", isSensitive=" + isSensitive +
", isReadOnly=" + isReadOnly +
", synonyms=" + synonyms +
", type=" + type +
", documentation=" + documentation +
")";
}
/**
* Data type of configuration entry.
*/
public enum ConfigType {
UNKNOWN,
BOOLEAN,
STRING,
INT,
SHORT,
LONG,
DOUBLE,
LIST,
CLASS,
PASSWORD
}
/**
* Source of configuration entries.
*/
public enum ConfigSource {
DYNAMIC_TOPIC_CONFIG, // dynamic topic config that is configured for a specific topic
DYNAMIC_BROKER_LOGGER_CONFIG, // dynamic broker logger config that is configured for a specific broker
DYNAMIC_BROKER_CONFIG, // dynamic broker config that is configured for a specific broker
DYNAMIC_DEFAULT_BROKER_CONFIG, // dynamic broker config that is configured as default for all brokers in the cluster
STATIC_BROKER_CONFIG, // static broker config provided as broker properties at start up (e.g. server.properties file)
DEFAULT_CONFIG, // built-in default configuration for configs that have a default value
UNKNOWN // source unknown e.g. in the ConfigEntry used for alter requests where source is not set
}
/**
* Class representing a configuration synonym of a {@link ConfigEntry}.
*/
public static class ConfigSynonym {
private final String name;
private final String value;
private final ConfigSource source;
/**
* Create a configuration synonym with the provided values.
*
* @param name Configuration name (this may be different from the name of the associated {@link ConfigEntry}
* @param value Configuration value
* @param source {@link ConfigSource} of this configuraton
*/
ConfigSynonym(String name, String value, ConfigSource source) {
this.name = name;
this.value = value;
this.source = source;
}
/**
* Returns the name of this configuration.
*/
public String name() {
return name;
}
/**
* Returns the value of this configuration, which may be null if the configuration is sensitive.
*/
public String value() {
return value;
}
/**
* Returns the source of this configuration.
*/
public ConfigSource source() {
return source;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ConfigSynonym that = (ConfigSynonym) o;
return Objects.equals(name, that.name) && Objects.equals(value, that.value) && source == that.source;
}
@Override
public int hashCode() {
return Objects.hash(name, value, source);
}
@Override
public String toString() {
return "ConfigSynonym(" +
"name=" + name +
", value=" + value +
", source=" + source +
")";
}
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ConsumerGroupDescription.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.ConsumerGroupState;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.acl.AclOperation;
import org.apache.kafka.common.utils.Utils;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Objects;
import java.util.Set;
/**
* A detailed description of a single consumer group in the cluster.
*/
public class ConsumerGroupDescription {
private final String groupId;
private final boolean isSimpleConsumerGroup;
private final Collection<MemberDescription> members;
private final String partitionAssignor;
private final ConsumerGroupState state;
private final Node coordinator;
private final Set<AclOperation> authorizedOperations;
public ConsumerGroupDescription(String groupId,
boolean isSimpleConsumerGroup,
Collection<MemberDescription> members,
String partitionAssignor,
ConsumerGroupState state,
Node coordinator) {
this(groupId, isSimpleConsumerGroup, members, partitionAssignor, state, coordinator, Collections.emptySet());
}
public ConsumerGroupDescription(String groupId,
boolean isSimpleConsumerGroup,
Collection<MemberDescription> members,
String partitionAssignor,
ConsumerGroupState state,
Node coordinator,
Set<AclOperation> authorizedOperations) {
this.groupId = groupId == null ? "" : groupId;
this.isSimpleConsumerGroup = isSimpleConsumerGroup;
this.members = members == null ? Collections.emptyList() :
Collections.unmodifiableList(new ArrayList<>(members));
this.partitionAssignor = partitionAssignor == null ? "" : partitionAssignor;
this.state = state;
this.coordinator = coordinator;
this.authorizedOperations = authorizedOperations;
}
@Override
public boolean equals(final Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
final ConsumerGroupDescription that = (ConsumerGroupDescription) o;
return isSimpleConsumerGroup == that.isSimpleConsumerGroup &&
Objects.equals(groupId, that.groupId) &&
Objects.equals(members, that.members) &&
Objects.equals(partitionAssignor, that.partitionAssignor) &&
state == that.state &&
Objects.equals(coordinator, that.coordinator) &&
Objects.equals(authorizedOperations, that.authorizedOperations);
}
@Override
public int hashCode() {
return Objects.hash(groupId, isSimpleConsumerGroup, members, partitionAssignor, state, coordinator, authorizedOperations);
}
/**
* The id of the consumer group.
*/
public String groupId() {
return groupId;
}
/**
* If consumer group is simple or not.
*/
public boolean isSimpleConsumerGroup() {
return isSimpleConsumerGroup;
}
/**
* A list of the members of the consumer group.
*/
public Collection<MemberDescription> members() {
return members;
}
/**
* The consumer group partition assignor.
*/
public String partitionAssignor() {
return partitionAssignor;
}
/**
* The consumer group state, or UNKNOWN if the state is too new for us to parse.
*/
public ConsumerGroupState state() {
return state;
}
/**
* The consumer group coordinator, or null if the coordinator is not known.
*/
public Node coordinator() {
return coordinator;
}
/**
* authorizedOperations for this group, or null if that information is not known.
*/
public Set<AclOperation> authorizedOperations() {
return authorizedOperations;
}
@Override
public String toString() {
return "(groupId=" + groupId +
", isSimpleConsumerGroup=" + isSimpleConsumerGroup +
", members=" + Utils.join(members, ",") +
", partitionAssignor=" + partitionAssignor +
", state=" + state +
", coordinator=" + coordinator +
", authorizedOperations=" + authorizedOperations +
")";
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/ConsumerGroupListing.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.Objects;
import java.util.Optional;
import org.apache.kafka.common.ConsumerGroupState;
/**
* A listing of a consumer group in the cluster.
*/
public class ConsumerGroupListing {
private final String groupId;
private final boolean isSimpleConsumerGroup;
private final Optional<ConsumerGroupState> state;
/**
* Create an instance with the specified parameters.
*
* @param groupId Group Id
* @param isSimpleConsumerGroup If consumer group is simple or not.
*/
public ConsumerGroupListing(String groupId, boolean isSimpleConsumerGroup) {
this(groupId, isSimpleConsumerGroup, Optional.empty());
}
/**
* Create an instance with the specified parameters.
*
* @param groupId Group Id
* @param isSimpleConsumerGroup If consumer group is simple or not.
* @param state The state of the consumer group
*/
public ConsumerGroupListing(String groupId, boolean isSimpleConsumerGroup, Optional<ConsumerGroupState> state) {
this.groupId = groupId;
this.isSimpleConsumerGroup = isSimpleConsumerGroup;
this.state = Objects.requireNonNull(state);
}
/**
* Consumer Group Id
*/
public String groupId() {
return groupId;
}
/**
* If Consumer Group is simple or not.
*/
public boolean isSimpleConsumerGroup() {
return isSimpleConsumerGroup;
}
/**
* Consumer Group state
*/
public Optional<ConsumerGroupState> state() {
return state;
}
@Override
public String toString() {
return "(" +
"groupId='" + groupId + '\'' +
", isSimpleConsumerGroup=" + isSimpleConsumerGroup +
", state=" + state +
')';
}
@Override
public int hashCode() {
return Objects.hash(groupId, isSimpleConsumerGroup, state);
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (getClass() != obj.getClass())
return false;
ConsumerGroupListing other = (ConsumerGroupListing) obj;
if (groupId == null) {
if (other.groupId != null)
return false;
} else if (!groupId.equals(other.groupId))
return false;
if (isSimpleConsumerGroup != other.isSimpleConsumerGroup)
return false;
if (state == null) {
if (other.state != null)
return false;
} else if (!state.equals(other.state))
return false;
return true;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/CreateAclsOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
/**
* Options for {@link Admin#createAcls(Collection)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class CreateAclsOptions extends AbstractOptions<CreateAclsOptions> {
/**
* Set the timeout in milliseconds for this operation or {@code null} if the default api timeout for the
* AdminClient should be used.
*
*/
// This method is retained to keep binary compatibility with 0.11
public CreateAclsOptions timeoutMs(Integer timeoutMs) {
this.timeoutMs = timeoutMs;
return this;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/CreateAclsResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.acl.AclBinding;
import org.apache.kafka.common.annotation.InterfaceStability;
import java.util.Collection;
import java.util.Map;
/**
* The result of the {@link Admin#createAcls(Collection)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class CreateAclsResult {
private final Map<AclBinding, KafkaFuture<Void>> futures;
CreateAclsResult(Map<AclBinding, KafkaFuture<Void>> futures) {
this.futures = futures;
}
/**
* Return a map from ACL bindings to futures which can be used to check the status of the creation of each ACL
* binding.
*/
public Map<AclBinding, KafkaFuture<Void>> values() {
return futures;
}
/**
* Return a future which succeeds only if all the ACL creations succeed.
*/
public KafkaFuture<Void> all() {
return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0]));
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/CreateDelegationTokenOptions.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import java.util.LinkedList;
import java.util.List;
import java.util.Optional;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.security.auth.KafkaPrincipal;
/**
* Options for {@link Admin#createDelegationToken(CreateDelegationTokenOptions)}.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class CreateDelegationTokenOptions extends AbstractOptions<CreateDelegationTokenOptions> {
private long maxLifeTimeMs = -1;
private List<KafkaPrincipal> renewers = new LinkedList<>();
private KafkaPrincipal owner = null;
public CreateDelegationTokenOptions renewers(List<KafkaPrincipal> renewers) {
this.renewers = renewers;
return this;
}
public List<KafkaPrincipal> renewers() {
return renewers;
}
public CreateDelegationTokenOptions owner(KafkaPrincipal owner) {
this.owner = owner;
return this;
}
public Optional<KafkaPrincipal> owner() {
return Optional.ofNullable(owner);
}
public CreateDelegationTokenOptions maxlifeTimeMs(long maxLifeTimeMs) {
this.maxLifeTimeMs = maxLifeTimeMs;
return this;
}
public long maxlifeTimeMs() {
return maxLifeTimeMs;
}
}
|
0 | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients | java-sources/ai/superstream/kafka-clients/3.5.118/org/apache/kafka/clients/admin/CreateDelegationTokenResult.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.clients.admin;
import org.apache.kafka.common.KafkaFuture;
import org.apache.kafka.common.annotation.InterfaceStability;
import org.apache.kafka.common.security.token.delegation.DelegationToken;
/**
* The result of the {@link KafkaAdminClient#createDelegationToken(CreateDelegationTokenOptions)} call.
*
* The API of this class is evolving, see {@link Admin} for details.
*/
@InterfaceStability.Evolving
public class CreateDelegationTokenResult {
private final KafkaFuture<DelegationToken> delegationToken;
CreateDelegationTokenResult(KafkaFuture<DelegationToken> delegationToken) {
this.delegationToken = delegationToken;
}
/**
* Returns a future which yields a delegation token
*/
public KafkaFuture<DelegationToken> delegationToken() {
return delegationToken;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.