id stringlengths 29 30 | content stringlengths 152 2.6k |
|---|---|
codereview_new_java_data_8399 |
* limitations under the License.
*/
/**
- * Reusable implementations of cache primitives
*/
package org.apache.kafka.common.cache;
\ No newline at end of file
```suggestion
* Reusable implementations of cache primitives.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
```
* limitations under the License.
*/
/**
+ * Provides reusable implementations of cache primitives.
+ * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.cache;
\ No newline at end of file |
codereview_new_java_data_8400 |
* limitations under the License.
*/
/**
- * Mechanisms for compressing data handled by Kafka
*/
package org.apache.kafka.common.compress;
\ No newline at end of file
```suggestion
* Mechanisms for compressing data handled by Kafka.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
```
* limitations under the License.
*/
/**
+ * Provides mechanisms for compressing data handled by Kafka.
+ * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.compress;
\ No newline at end of file |
codereview_new_java_data_8401 |
* limitations under the License.
*/
/**
- * Standard mechanisms for defining, parsing, validating, and documenting user-configurable parameters
*/
package org.apache.kafka.common.config;
\ No newline at end of file
```suggestion
* Provides common mechanisms for defining, parsing, validating, and documenting user-configurable parameters.
```
* limitations under the License.
*/
/**
+ * Provides common mechanisms for defining, parsing, validating, and documenting user-configurable parameters.
*/
package org.apache.kafka.common.config;
\ No newline at end of file |
codereview_new_java_data_8402 |
* limitations under the License.
*/
/**
- * Pluggable interface and implementations for late-binding in configuration values
*/
package org.apache.kafka.common.config.provider;
\ No newline at end of file
```suggestion
* Provides a pluggable interface and some implementations for late-binding in configuration values.
```
* limitations under the License.
*/
/**
+ * Provides a pluggable interface and some implementations for late-binding in configuration values.
*/
package org.apache.kafka.common.config.provider;
\ No newline at end of file |
codereview_new_java_data_8403 |
* limitations under the License.
*/
/**
- * Custom non-primitive types of configuration properties
*/
package org.apache.kafka.common.config.types;
\ No newline at end of file
```suggestion
* Provides custom non-primitive types of configuration properties.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
```
* limitations under the License.
*/
/**
+ * Provides custom non-primitive types of configuration properties.
+ * <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.config.types;
\ No newline at end of file |
codereview_new_java_data_8404 |
* limitations under the License.
*/
/**
- * Errors that are thrown by Kafka Clients
*/
package org.apache.kafka.common.errors;
\ No newline at end of file
```suggestion
* Provides common exception classes.
```
* limitations under the License.
*/
/**
+ * Provides common exception classes.
*/
package org.apache.kafka.common.errors;
\ No newline at end of file |
codereview_new_java_data_8405 |
* limitations under the License.
*/
/**
- * Provides API for application defined metadata attached to Kafka records.
*/
package org.apache.kafka.common.header;
\ No newline at end of file
```suggestion
* Provides API for application-defined metadata attached to Kafka records.
```
* limitations under the License.
*/
/**
+ * Provides API for application-defined metadata attached to Kafka records.
*/
package org.apache.kafka.common.header;
\ No newline at end of file |
codereview_new_java_data_8406 |
* limitations under the License.
*/
/**
- * Provides mechanism for sending to and receiving data from remote machines.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.network;
\ No newline at end of file
```suggestion
* Provides the network API used by the Kafka clients.
```
* limitations under the License.
*/
/**
+ * Provides the network API used by the Kafka clients
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.network;
\ No newline at end of file |
codereview_new_java_data_8407 |
* limitations under the License.
*/
/**
- * Provide classes representing a single record in a Kafka topic and/or partition.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.record;
\ No newline at end of file
```suggestion
* Provides the low-level representation of records and record batches used by clients and servers.
```
* limitations under the License.
*/
/**
+ * Provides the low-level representation of records and record batches used by clients and servers.
* <strong>This package is not a supported Kafka API; the implementation may change without warning between minor or patch releases.</strong>
*/
package org.apache.kafka.common.record;
\ No newline at end of file |
codereview_new_java_data_8408 |
* limitations under the License.
*/
/**
- * Provides pluggable interface for implementing Kafka authentication mechanisms.
*/
package org.apache.kafka.common.security.auth;
\ No newline at end of file
```suggestion
* Provides pluggable interfaces for implementing Kafka authentication mechanisms.
```
* limitations under the License.
*/
/**
+ * Provides pluggable interfaces for implementing Kafka authentication mechanisms.
*/
package org.apache.kafka.common.security.auth;
\ No newline at end of file |
codereview_new_java_data_8409 |
* limitations under the License.
*/
/**
- * Provides adaptor for using OAuth Bearer Token Authentication for securing Kafka clusters.
*/
package org.apache.kafka.common.security.oauthbearer;
\ No newline at end of file
```suggestion
* Provides a {@code LoginModule} for using OAuth Bearer Token authentication with Kafka clusters.
```
* limitations under the License.
*/
/**
+ * Provides a {@link javax.security.auth.spi.LoginModule} for using OAuth Bearer Token authentication with Kafka clusters.
*/
package org.apache.kafka.common.security.oauthbearer;
\ No newline at end of file |
codereview_new_java_data_8410 |
* limitations under the License.
*/
/**
- * Provides pluggable interface for authorization policies.
*/
package org.apache.kafka.server.policy;
\ No newline at end of file
```suggestion
* Provides pluggable interfaces for expressing policies on topics and configs.
```
* limitations under the License.
*/
/**
+ * Provides pluggable interfaces for expressing policies on topics and configs.
*/
package org.apache.kafka.server.policy;
\ No newline at end of file |
codereview_new_java_data_8411 | void createPartitions(CreatePartitionsTopic topic,
isrs.add(isr);
}
} else {
- partitionAssignments = clusterControl.replicaPlacer().place(new PlacementSpec(
- startPartitionId,
- additional,
- replicationFactor
- ), clusterDescriber).assignments();
- isrs = partitionAssignments.stream().map(x -> x.replicas()).collect(Collectors.toList());
}
int partitionId = startPartitionId;
for (int i = 0; i < partitionAssignments.size(); i++) {
How about:
```java
isrs = partitionAssignments.stream().map(PartitionAssignment::replicas).collect(Collectors.toList());
```
void createPartitions(CreatePartitionsTopic topic,
isrs.add(isr);
}
} else {
+ partitionAssignments = clusterControl.replicaPlacer().place(
+ new PlacementSpec(startPartitionId, additional, replicationFactor),
+ clusterDescriber
+ ).assignments();
+ isrs = partitionAssignments.stream().map(PartitionAssignment::replicas).collect(Collectors.toList());
}
int partitionId = startPartitionId;
for (int i = 0; i < partitionAssignments.size(); i++) { |
codereview_new_java_data_8412 | void createPartitions(CreatePartitionsTopic topic,
isrs.add(isr);
}
} else {
- partitionAssignments = clusterControl.replicaPlacer().place(new PlacementSpec(
- startPartitionId,
- additional,
- replicationFactor
- ), clusterDescriber).assignments();
- isrs = partitionAssignments.stream().map(x -> x.replicas()).collect(Collectors.toList());
}
int partitionId = startPartitionId;
for (int i = 0; i < partitionAssignments.size(); i++) {
I think we can improve the formatting with something like:
```java
partitionAssignments = clusterControl.replicaPlacer().place(
new PlacementSpec(...),
clusterDescriber
).assignments()
```
void createPartitions(CreatePartitionsTopic topic,
isrs.add(isr);
}
} else {
+ partitionAssignments = clusterControl.replicaPlacer().place(
+ new PlacementSpec(startPartitionId, additional, replicationFactor),
+ clusterDescriber
+ ).assignments();
+ isrs = partitionAssignments.stream().map(PartitionAssignment::replicas).collect(Collectors.toList());
}
int partitionId = startPartitionId;
for (int i = 0; i < partitionAssignments.size(); i++) { |
codereview_new_java_data_8413 | public TopicAssignment place(
for (int partition = 0; partition < placement.numPartitions(); partition++) {
placements.add(rackList.place(placement.numReplicas()));
}
- return new TopicAssignment(placements.stream().map(x -> new PartitionAssignment(x)).collect(Collectors.toList()));
}
}
How about:
```java
return new TopicAssignment(
placements.stream().map(PartitionAssignment::new).collect(Collectors.toList())
);
```
public TopicAssignment place(
for (int partition = 0; partition < placement.numPartitions(); partition++) {
placements.add(rackList.place(placement.numReplicas()));
}
+ return new TopicAssignment(
+ placements.stream().map(PartitionAssignment::new).collect(Collectors.toList())
+ );
}
} |
codereview_new_java_data_8418 | public void tick() {
} else if (now >= next.at) {
requests.pollFirst();
} else {
- scheduledTick = Math.min(scheduledTick, next.at);
break;
}
We will only reach this line if `scheduledTick` is `Long.MAX_VALUE`:
```suggestion
scheduledTick = next.at;
```
public void tick() {
} else if (now >= next.at) {
requests.pollFirst();
} else {
+ scheduledTick = next.at;
break;
}
|
codereview_new_java_data_8419 |
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.utils.ExponentialBackoff;
-// Visible for testing
class RequestState {
final static int RECONNECT_BACKOFF_EXP_BASE = 2;
final static double RECONNECT_BACKOFF_JITTER = 0.2;
We can get rid of these comments. A general-purpose javadoc would be good though.
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.utils.ExponentialBackoff;
class RequestState {
final static int RECONNECT_BACKOFF_EXP_BASE = 2;
final static double RECONNECT_BACKOFF_JITTER = 0.2; |
codereview_new_java_data_8420 | public FindCoordinatorResponse(FindCoordinatorResponseData data) {
this.data = data;
}
- public Optional<Coordinator> getCoordinatorByKey(String key) {
Objects.requireNonNull(key);
if (this.data.coordinators().isEmpty()) {
// version <= 3
nit: usually we drop `get` from method names
public FindCoordinatorResponse(FindCoordinatorResponseData data) {
this.data = data;
}
+ public Optional<Coordinator> coordinatorByKey(String key) {
Objects.requireNonNull(key);
if (this.data.coordinators().isEmpty()) {
// version <= 3 |
codereview_new_java_data_8421 | public String toString() {
return type + " ApplicationEvent";
}
public enum Type {
- NOOP,
- COMMIT,
}
}
Perhaps we can leave this for a follow-up since we are not implementing the COMMIT type here.
public String toString() {
return type + " ApplicationEvent";
}
public enum Type {
+ NOOP, COMMIT,
}
} |
codereview_new_java_data_8422 |
*/
package org.apache.kafka.migration;
public class ZkControllerState {
public static final ZkControllerState EMPTY = new ZkControllerState(-1, -1, -1);
Can you add a comment explaining this state as well?
*/
package org.apache.kafka.migration;
+/**
+ * The ZooKeeper state of the KRaft controller when it is elected as the active controller.
+ */
public class ZkControllerState {
public static final ZkControllerState EMPTY = new ZkControllerState(-1, -1, -1);
|
codereview_new_java_data_8424 | public LeaveGroupResponse(LeaveGroupResponseData data, short version) {
" can only contain one member, got " + data.members().size() + " members.");
}
- this.data = new LeaveGroupResponseData()
- .setErrorCode(getError(Errors.forCode(data.errorCode()), data.members()).code());
}
}
This seems to be normalizing the response? Seems like we just need to set the error code, but its done in a way that's a little confusing. Why do we need to getError when we have the data.errorCode?
Is it because it can be the top level error OR the single member error?
public LeaveGroupResponse(LeaveGroupResponseData data, short version) {
" can only contain one member, got " + data.members().size() + " members.");
}
+ Errors topLevelError = Errors.forCode(data.errorCode());
+ short errorCode = getError(topLevelError, data.members()).code();
+ this.data = new LeaveGroupResponseData().setErrorCode(errorCode);
}
}
|
codereview_new_java_data_8433 | public Connect startConnect(Map<String, String> workerProps) {
// herder is stopped. This is easier than having to track and own the lifecycle ourselves.
DistributedHerder herder = new DistributedHerder(config, time, worker,
kafkaClusterId, statusBackingStore, configBackingStore,
- advertisedUrl.toString(), restClient, connectorClientConfigOverridePolicy, sharedAdmin, restClient);
final Connect connect = new Connect(herder, rest);
log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart);
Isn't it a bit of an antipattern to have to pass in the client as part of the `uponShutdown` list? Is there any case where we'd want to instantiate a `DistributedHerder` with a client, but not have that client be shut down at the same time as the herder?
I'm wondering if we can just automatically close the client in `DistributedHerder::stop`.
public Connect startConnect(Map<String, String> workerProps) {
// herder is stopped. This is easier than having to track and own the lifecycle ourselves.
DistributedHerder herder = new DistributedHerder(config, time, worker,
kafkaClusterId, statusBackingStore, configBackingStore,
+ advertisedUrl.toString(), restClient, connectorClientConfigOverridePolicy, sharedAdmin);
final Connect connect = new Connect(herder, rest);
log.info("Kafka Connect distributed worker initialization took {}ms", time.hiResClockMs() - initStart); |
codereview_new_java_data_8434 | public class DistributedHerderTest {
public void setUp() throws Exception {
time = new MockTime();
metrics = new MockConnectMetrics(time);
- restClient = PowerMock.createMock(RestClient.class);
worker = PowerMock.createMock(Worker.class);
EasyMock.expect(worker.isSinkConnector(CONN1)).andStubReturn(Boolean.TRUE);
AutoCloseable uponShutdown = () -> shutdownCalled.countDown();
Do we need this? The `restClient` field is already annotated with `@Mock`.
public class DistributedHerderTest {
public void setUp() throws Exception {
time = new MockTime();
metrics = new MockConnectMetrics(time);
worker = PowerMock.createMock(Worker.class);
EasyMock.expect(worker.isSinkConnector(CONN1)).andStubReturn(Boolean.TRUE);
AutoCloseable uponShutdown = () -> shutdownCalled.countDown(); |
codereview_new_java_data_8435 | ConfigInfos validateConnectorConfig(Map<String, String> connectorProps, boolean
throw new BadRequestException("Connector config " + connectorProps + " contains no connector type");
Connector connector = getConnector(connType);
- org.apache.kafka.connect.health.ConnectorType connectorType;
ClassLoader connectorLoader = plugins().connectorLoader(connType);
- ConfigDef enrichedConfigDef;
- Map<String, ConfigValue> validatedConnectorConfig;
try (LoaderSwap loaderSwap = plugins().withClassLoader(connectorLoader)) {
if (connector instanceof SourceConnector) {
connectorType = org.apache.kafka.connect.health.ConnectorType.SOURCE;
enrichedConfigDef = ConnectorConfig.enrich(plugins(), SourceConnectorConfig.configDef(), connectorProps, false);
Nit: can we move these inside the `try` block?
ConfigInfos validateConnectorConfig(Map<String, String> connectorProps, boolean
throw new BadRequestException("Connector config " + connectorProps + " contains no connector type");
Connector connector = getConnector(connType);
ClassLoader connectorLoader = plugins().connectorLoader(connType);
try (LoaderSwap loaderSwap = plugins().withClassLoader(connectorLoader)) {
+ org.apache.kafka.connect.health.ConnectorType connectorType;
+ ConfigDef enrichedConfigDef;
+ Map<String, ConfigValue> validatedConnectorConfig;
if (connector instanceof SourceConnector) {
connectorType = org.apache.kafka.connect.health.ConnectorType.SOURCE;
enrichedConfigDef = ConnectorConfig.enrich(plugins(), SourceConnectorConfig.configDef(), connectorProps, false); |
codereview_new_java_data_8436 | private void handleProduceResponse(ClientResponse response, Map<TopicPartition,
RequestHeader requestHeader = response.requestHeader();
int correlationId = requestHeader.correlationId();
if (response.wasTimedOut()) {
- log.trace("Cancelled request with header {} due to node {} being disconnected due to timeout",
requestHeader, response.destination());
for (ProducerBatch batch : batches.values())
completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.REQUEST_TIMED_OUT, String.format("Disconnected from node %s due to timeout", response.destination())),
very nit: `due to the last request timed out`? just to distinguish from connection timeout.
private void handleProduceResponse(ClientResponse response, Map<TopicPartition,
RequestHeader requestHeader = response.requestHeader();
int correlationId = requestHeader.correlationId();
if (response.wasTimedOut()) {
+ log.trace("Cancelled request with header {} due to the last request to node {} timed out",
requestHeader, response.destination());
for (ProducerBatch batch : batches.values())
completeBatch(batch, new ProduceResponse.PartitionResponse(Errors.REQUEST_TIMED_OUT, String.format("Disconnected from node %s due to timeout", response.destination())), |
codereview_new_java_data_8437 | public void handle(SyncGroupResponse syncResponse,
}
/**
- * Discover the current coordinator for the group. Sends a GroupMetadata request to
* one of the brokers. The returned future should be polled to get the result of the request.
* @return A request future which indicates the completion of the metadata request
*/
The javadoc above also mention the GroupMetadata request. Can we update that as well?
public void handle(SyncGroupResponse syncResponse,
}
/**
+ * Discover the current coordinator for the group. Sends a FindCoordinator request to
* one of the brokers. The returned future should be polled to get the result of the request.
* @return A request future which indicates the completion of the metadata request
*/ |
codereview_new_java_data_8438 | private <VR, KO, VO> KTable<K, VR> doJoinOnForeignKey(final KTable<KO, VO> forei
Objects.requireNonNull(tableJoined, "tableJoined can't be null");
Objects.requireNonNull(materialized, "materialized can't be null");
-
//Old values are a useful optimization. The old values from the foreignKeyTable table are compared to the new values,
//such that identical values do not cause a prefixScan. PrefixScan and propagation can be expensive and should
//not be done needlessly.
nit: accidental extra line?
private <VR, KO, VO> KTable<K, VR> doJoinOnForeignKey(final KTable<KO, VO> forei
Objects.requireNonNull(tableJoined, "tableJoined can't be null");
Objects.requireNonNull(materialized, "materialized can't be null");
//Old values are a useful optimization. The old values from the foreignKeyTable table are compared to the new values,
//such that identical values do not cause a prefixScan. PrefixScan and propagation can be expensive and should
//not be done needlessly. |
codereview_new_java_data_8439 | protected void stopServices() {
// Timeout for herderExecutor to gracefully terminate is set to a value to accommodate
// reading to the end of the config topic + successfully attempting to stop all connectors and tasks and a buffer of 10s
- private long getHerderExecutorTimeoutMs() {
return this.workerSyncTimeoutMs +
config.getInt(DistributedConfig.WORKER_SYNC_TIMEOUT_MS_CONFIG) +
Worker.CONNECTOR_GRACEFUL_SHUTDOWN_TIMEOUT_MS + 10000;
Nit: We don't use `get` in this code base
```suggestion
private long herderExecutorTimeoutMs() {
```
Also, is there a reason this has to be a separate method at all? It's only called in one place right now and the logic isn't really complicated enough to warrant isolated unit testing.
protected void stopServices() {
// Timeout for herderExecutor to gracefully terminate is set to a value to accommodate
// reading to the end of the config topic + successfully attempting to stop all connectors and tasks and a buffer of 10s
+ private long herderExecutorTimeoutMs() {
return this.workerSyncTimeoutMs +
config.getInt(DistributedConfig.WORKER_SYNC_TIMEOUT_MS_CONFIG) +
Worker.CONNECTOR_GRACEFUL_SHUTDOWN_TIMEOUT_MS + 10000; |
codereview_new_java_data_8440 | protected void stopServices() {
// reading to the end of the config topic + successfully attempting to stop all connectors and tasks and a buffer of 10s
private long herderExecutorTimeoutMs() {
return this.workerSyncTimeoutMs +
- config.getInt(DistributedConfig.WORKER_SYNC_TIMEOUT_MS_CONFIG) +
Worker.CONNECTOR_GRACEFUL_SHUTDOWN_TIMEOUT_MS + 10000;
}
Wait a minute, is this right? Why are we using the sync timeout twice here?
protected void stopServices() {
// reading to the end of the config topic + successfully attempting to stop all connectors and tasks and a buffer of 10s
private long herderExecutorTimeoutMs() {
return this.workerSyncTimeoutMs +
+ config.getLong(DistributedConfig.TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG) +
Worker.CONNECTOR_GRACEFUL_SHUTDOWN_TIMEOUT_MS + 10000;
}
|
codereview_new_java_data_8442 | public Future<Void> set(final Map<ByteBuffer, ByteBuffer> values, final Callback
return producerCallback;
}
- protected final Callback<ConsumerRecord<byte[], byte[]>> consumedCallback = new Callback<ConsumerRecord<byte[], byte[]>>() {
- @Override
- public void onCompletion(Throwable error, ConsumerRecord<byte[], byte[]> record) {
- ByteBuffer key = record.key() != null ? ByteBuffer.wrap(record.key()) : null;
- ByteBuffer value = record.value() != null ? ByteBuffer.wrap(record.value()) : null;
- data.put(key, value);
}
};
I know this isn't a new bug introduced by this change, but we should probably add error-handling logic here:
```suggestion
if (error != null) {
log.error("Failed to read from offsets topic", error);
return;
}
ByteBuffer key = record.key() != null ? ByteBuffer.wrap(record.key()) : null;
```
public Future<Void> set(final Map<ByteBuffer, ByteBuffer> values, final Callback
return producerCallback;
}
+ protected final Callback<ConsumerRecord<byte[], byte[]>> consumedCallback = (error, record) -> {
+ if (error != null) {
+ log.error("Failed to read from the offsets topic", error);
+ return;
+ }
+
+ ByteBuffer key = record.key() != null ? ByteBuffer.wrap(record.key()) : null;
+
+ if (record.value() == null) {
+ data.remove(key);
+ } else {
+ data.put(key, ByteBuffer.wrap(record.value()));
}
};
|
codereview_new_java_data_8443 | private enum SourceSink {
@Before
public void setup() {
worker = PowerMock.createMock(Worker.class);
- String[] methodNames = new String[]{"connectorType"/*, "validateConnectorConfig"*/, "buildRestartPlan", "recordRestarting"};
herder = PowerMock.createPartialMock(StandaloneHerder.class, methodNames,
worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, new MemoryConfigBackingStore(transformer), noneConnectorClientConfigOverridePolicy);
createCallback = new FutureCallback<>();
While we're in the neighborhood?
```suggestion
String[] methodNames = new String[]{"connectorType", "buildRestartPlan", "recordRestarting"};
```
private enum SourceSink {
@Before
public void setup() {
worker = PowerMock.createMock(Worker.class);
+ String[] methodNames = new String[]{"connectorType", "buildRestartPlan", "recordRestarting"};
herder = PowerMock.createPartialMock(StandaloneHerder.class, methodNames,
worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, new MemoryConfigBackingStore(transformer), noneConnectorClientConfigOverridePolicy);
createCallback = new FutureCallback<>(); |
codereview_new_java_data_8452 | public void commitSync(final Duration timeout) {
* @return The set of topics currently subscribed to
*/
public Set<String> subscription() {
- return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription()));
}
/**
Why create a new, intermediate `HashSet` instead of just:
```suggestion
return Collections.unmodifiableSet(this.subscriptions.subscription());
```
public void commitSync(final Duration timeout) {
* @return The set of topics currently subscribed to
*/
public Set<String> subscription() {
+ return Collections.unmodifiableSet(this.subscriptions.subscription());
}
/** |
codereview_new_java_data_8453 | public void commitSync(final Duration timeout) {
* @return The set of topics currently subscribed to
*/
public Set<String> subscription() {
- return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription()));
}
/**
```suggestion
Set<String> subscription() {
```
I don't think this method should be `public`, should it? Let's do what we can to stick to the `Consumer` API.
public void commitSync(final Duration timeout) {
* @return The set of topics currently subscribed to
*/
public Set<String> subscription() {
+ return Collections.unmodifiableSet(this.subscriptions.subscription());
}
/** |
codereview_new_java_data_8454 | public void testRefreshTopicPartitionsTopicOnTargetFirst() throws Exception {
}
@Test
- public void testIsCycleWithNullUpstreamTopic() throws Exception {
class BadReplicationPolicy extends DefaultReplicationPolicy {
@Override
public String upstreamTopic(String topic) {
What about `CustomReplicationPolicy` as it's not really a bad policy, `null` is allowed
public void testRefreshTopicPartitionsTopicOnTargetFirst() throws Exception {
}
@Test
+ public void testIsCycleWithNullUpstreamTopic() {
class BadReplicationPolicy extends DefaultReplicationPolicy {
@Override
public String upstreamTopic(String topic) { |
codereview_new_java_data_8455 | public void shouldNotTransitToStandbyAgainAfterStandbyTaskFailed() throws Except
mkEntry(task1.id(), task1),
mkEntry(task2.id(), task2)
);
- final TaskCorruptedException taskCorruptedException =
- new TaskCorruptedException(mkSet(task1.id()));
- final ExceptionAndTasks expectedExceptionAndTasks =
- new ExceptionAndTasks(mkSet(task1), taskCorruptedException);
when(changelogReader.allChangelogsCompleted()).thenReturn(false);
doThrow(taskCorruptedException).doNothing().when(changelogReader).restore(updatingTasks);
nit:
```suggestion
final ExceptionAndTasks expectedExceptionAndTasks = new ExceptionAndTasks(mkSet(task1), taskCorruptedException);
```
public void shouldNotTransitToStandbyAgainAfterStandbyTaskFailed() throws Except
mkEntry(task1.id(), task1),
mkEntry(task2.id(), task2)
);
+ final TaskCorruptedException taskCorruptedException = new TaskCorruptedException(mkSet(task1.id()));
+ final ExceptionAndTasks expectedExceptionAndTasks = new ExceptionAndTasks(mkSet(task1), taskCorruptedException);
when(changelogReader.allChangelogsCompleted()).thenReturn(false);
doThrow(taskCorruptedException).doNothing().when(changelogReader).restore(updatingTasks);
|
codereview_new_java_data_8456 | default void beginShutdown() {}
* Returns the snapshot id of the latest snapshot, if it exists. If a snapshot doesn't exists, returns an
* {@link Optional#empty()}.
*
- * @return the snapshot of the latest snaphost, if it exists
*/
Optional<OffsetAndEpoch> latestSnapshotId();
}
%s/the snapshot of/ the id of/
default void beginShutdown() {}
* Returns the snapshot id of the latest snapshot, if it exists. If a snapshot doesn't exists, returns an
* {@link Optional#empty()}.
*
+ * @return the id of the latest snaphost, if it exists
*/
Optional<OffsetAndEpoch> latestSnapshotId();
} |
codereview_new_java_data_8459 | private void handleTaskMigrated(final TaskMigratedException e) {
subscribeConsumer();
}
- public long getCacheSize() {
- return cacheResizeSize.get();
- }
-
private void subscribeConsumer() {
if (topologyMetadata.usesPatternSubscription()) {
mainConsumer.subscribe(topologyMetadata.sourceTopicPattern(), rebalanceListener);
Unfortunately I don't think we can reliably use this variable to read out the current cache size, since we erase it immediately after reading it out to resize the cache. We might need to introduce a new variable if we need the thread's cache size
private void handleTaskMigrated(final TaskMigratedException e) {
subscribeConsumer();
}
private void subscribeConsumer() {
if (topologyMetadata.usesPatternSubscription()) {
mainConsumer.subscribe(topologyMetadata.sourceTopicPattern(), rebalanceListener); |
codereview_new_java_data_8460 | public BuiltInPartitioner(LogContext logContext, String topic, int stickyBatchSi
this.log = logContext.logger(BuiltInPartitioner.class);
this.topic = topic;
if (stickyBatchSize < 1) {
- throw new IllegalArgumentException("stickyBatchSize must at least 1");
}
this.stickyBatchSize = stickyBatchSize;
}
You should include the value of `stickyBatchSize` in the message.
public BuiltInPartitioner(LogContext logContext, String topic, int stickyBatchSi
this.log = logContext.logger(BuiltInPartitioner.class);
this.topic = topic;
if (stickyBatchSize < 1) {
+ throw new IllegalArgumentException("stickyBatchSize must be >= 1 but got " + stickyBatchSize);
}
this.stickyBatchSize = stickyBatchSize;
} |
codereview_new_java_data_8461 | public RecordAccumulator(LogContext logContext,
this.closed = false;
this.flushesInProgress = new AtomicInteger(0);
this.appendsInProgress = new AtomicInteger(0);
- // As per Kafka producer configuration documentation batch.size may be set to 0 to explicitly disable
- // batching which in practice actually means using a batch size of 1.
- this.batchSize = Math.max(1, batchSize);
this.compression = compression;
this.lingerMs = lingerMs;
this.retryBackoffMs = retryBackoffMs;
Would it make sense to do this in `KafkaProducer` when the config is retrieved?
public RecordAccumulator(LogContext logContext,
this.closed = false;
this.flushesInProgress = new AtomicInteger(0);
this.appendsInProgress = new AtomicInteger(0);
+ this.batchSize = batchSize;
this.compression = compression;
this.lingerMs = lingerMs;
this.retryBackoffMs = retryBackoffMs; |
codereview_new_java_data_8463 | public String toString() {
"topics=" + topics +
(userData == null ? "" : ", userDataSize=" + userData.remaining()) +
", ownedPartitions=" + ownedPartitions +
- ", groupInstanceId=" + (groupInstanceId.map(String::toString).orElse("null")) +
- ", generationId=" + (generationId.orElse(-1)) +
")";
}
}
nit: Are the parenthesis needed here?
public String toString() {
"topics=" + topics +
(userData == null ? "" : ", userDataSize=" + userData.remaining()) +
", ownedPartitions=" + ownedPartitions +
+ ", groupInstanceId=" + groupInstanceId.map(String::toString).orElse("null") +
+ ", generationId=" + generationId.orElse(-1) +
")";
}
} |
codereview_new_java_data_8464 | private boolean allSubscriptionsEqual(Set<String> allTopics,
// visible for testing
MemberData memberDataFromSubscription(Subscription subscription) {
- if (!subscription.ownedPartitions().isEmpty() && subscription.generationId().isPresent()) {
// In ConsumerProtocolSubscription v2 or higher, we don't need to deserialize the byte buffer
// and take from fields directly
return new MemberData(subscription.ownedPartitions(), subscription.generationId());
I wonder if this is 100% correct. Could we have a case where subscription v3 is used but no owned partitions are reported by the member? For instance, this could happen if the group has more members than partitions.
private boolean allSubscriptionsEqual(Set<String> allTopics,
// visible for testing
MemberData memberDataFromSubscription(Subscription subscription) {
+ if (subscription.generationId().isPresent()) {
// In ConsumerProtocolSubscription v2 or higher, we don't need to deserialize the byte buffer
// and take from fields directly
return new MemberData(subscription.ownedPartitions(), subscription.generationId()); |
codereview_new_java_data_8465 | public static String[] enumOptions(Class<? extends Enum<?>> enumClass) {
* @return string value of a given timestamp in the format "yyyy-MM-dd HH:mm:ss,SSS"
*/
public static String toLogDateTimeFormat(long timestamp) {
- final DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss,SSS");
return Instant.ofEpochMilli(timestamp).atZone(ZoneId.systemDefault()).format(dateTimeFormatter);
}
Would it be possible to add the timezone to the pattern? For example, assuming the system default is UTC+02:00, the time could contain +02:00.
public static String[] enumOptions(Class<? extends Enum<?>> enumClass) {
* @return string value of a given timestamp in the format "yyyy-MM-dd HH:mm:ss,SSS"
*/
public static String toLogDateTimeFormat(long timestamp) {
+ final DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss,SSS XXX");
return Instant.ofEpochMilli(timestamp).atZone(ZoneId.systemDefault()).format(dateTimeFormatter);
}
|
codereview_new_java_data_8466 | public void shouldUpgradeWithTopologyOptimizationOff() throws Exception {
@Test
@SuppressWarnings("unchecked")
- public void shouldUpgradeWithTopologyOptimizationOn() throws Exception {
final StreamsBuilder streamsBuilderOld = new StreamsBuilder();
final KStream<String, String> leftOld = streamsBuilderOld.stream(
In what sense is this an upgrade? It looks like a restart.
public void shouldUpgradeWithTopologyOptimizationOff() throws Exception {
@Test
@SuppressWarnings("unchecked")
+ public void shouldRestartWithTopologyOptimizationOn() throws Exception {
final StreamsBuilder streamsBuilderOld = new StreamsBuilder();
final KStream<String, String> leftOld = streamsBuilderOld.stream( |
codereview_new_java_data_8467 |
import java.io.Closeable;
/**
- * The {@code EventHandler} constructs a thread that runs {@code BackgroundThreadRunnable} to handle network requests
- * and responses.
*/
public interface BackgroundThreadRunnable extends Runnable, Closeable {
}
Using `{@link EventHander}` is a little nicer when navigating via JavaDoc, FWIW.
import java.io.Closeable;
/**
+ * The {@link org.apache.kafka.clients.consumer.internals.events.EventHandler} constructs a thread that runs
+ * {@code BackgroundThreadRunnable} to handle network requests and responses.
*/
public interface BackgroundThreadRunnable extends Runnable, Closeable {
} |
codereview_new_java_data_8468 |
*/
package org.apache.kafka.clients.consumer.internals.events;
/**
* The event is NoOp. This is intentionally left here for demonstration purpose.
*/
public class NoopApplicationEvent extends ApplicationEvent {
public final String message;
- public NoopApplicationEvent(String message) {
- super(EventType.NOOP);
this.message = message;
}
@Override
I think we should move this class to the test package instead of the main package to separate it from other production, non-testing code.
*/
package org.apache.kafka.clients.consumer.internals.events;
+import org.apache.kafka.clients.consumer.internals.NoopBackgroundEvent;
+
+import java.util.concurrent.BlockingQueue;
+
/**
* The event is NoOp. This is intentionally left here for demonstration purpose.
*/
public class NoopApplicationEvent extends ApplicationEvent {
public final String message;
+ private final BlockingQueue<BackgroundEvent> backgroundEventQueue;
+ public NoopApplicationEvent(
+ BlockingQueue<BackgroundEvent> backgroundEventQueue,
+ String message) {
this.message = message;
+ this.backgroundEventQueue = backgroundEventQueue;
+ }
+
+ @Override
+ public boolean process() {
+ return backgroundEventQueue.add(new NoopBackgroundEvent(message));
}
@Override |
codereview_new_java_data_8469 |
public class NoopBackgroundEvent extends BackgroundEvent {
public final String message;
- public NoopBackgroundEvent(String message) {
super(EventType.NOOP);
this.message = message;
}
Ditto here, I think we should move it to test package, not main package.
public class NoopBackgroundEvent extends BackgroundEvent {
public final String message;
+ public NoopBackgroundEvent(final String message) {
super(EventType.NOOP);
this.message = message;
} |
codereview_new_java_data_8470 | public ConsumerRecords<K, V> poll(final Duration timeout) {
processEvent(backgroundEvent.get(), timeout); // might trigger callbacks or handle exceptions
}
}
-
- maybeSendFetches(); // send new fetches
} while (time.timer(timeout).notExpired());
} catch (Exception e) {
throw new RuntimeException(e);
I would think this would be handled by the background thread.
public ConsumerRecords<K, V> poll(final Duration timeout) {
processEvent(backgroundEvent.get(), timeout); // might trigger callbacks or handle exceptions
}
}
} while (time.timer(timeout).notExpired());
} catch (Exception e) {
throw new RuntimeException(e); |
codereview_new_java_data_8471 | public int hashCode() {
*/
Set<Task> getUpdatingTasks();
- /**
- * Gets active tasks that are managed by the state updater.
- *
- * The state updater manages all active tasks that were added with the {@link StateUpdater#add(Task)} and that have
- * not been removed from the state updater with one of the following methods:
- * <ul>
- * <li>{@link StateUpdater#drainRestoredActiveTasks(Duration)}</li>
- * <li>{@link StateUpdater#drainRemovedTasks()}</li>
- * <li>{@link StateUpdater#drainExceptionsAndFailedTasks()}</li>
- * </ul>
- *
- * @return set of all tasks managed by the state updater
- */
- Set<StreamTask> getActiveTasks();
-
/**
* Returns if the state updater restores active tasks.
*
Could you please also remove `getActiveTasks()` from the interface?
public int hashCode() {
*/
Set<Task> getUpdatingTasks();
/**
* Returns if the state updater restores active tasks.
* |
codereview_new_java_data_8472 | public class WorkerConfig extends AbstractConfig {
public static final String OFFSET_COMMIT_INTERVAL_MS_CONFIG = "offset.flush.interval.ms";
private static final String OFFSET_COMMIT_INTERVAL_MS_DOC
- = "Interval at which to try committing offsets for source tasks.";
public static final long OFFSET_COMMIT_INTERVAL_MS_DEFAULT = 60000L;
public static final String OFFSET_COMMIT_TIMEOUT_MS_CONFIG = "offset.flush.timeout.ms";
The offset commit interval also affects sink tasks; see here:
https://github.com/apache/kafka/blob/352c71ffb5d825c4a88454c12b9fa66c1750add3/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java#L212
We can probably revert this change.
public class WorkerConfig extends AbstractConfig {
public static final String OFFSET_COMMIT_INTERVAL_MS_CONFIG = "offset.flush.interval.ms";
private static final String OFFSET_COMMIT_INTERVAL_MS_DOC
+ = "Interval at which to try committing offsets for tasks.";
public static final long OFFSET_COMMIT_INTERVAL_MS_DEFAULT = 60000L;
public static final String OFFSET_COMMIT_TIMEOUT_MS_CONFIG = "offset.flush.timeout.ms"; |
codereview_new_java_data_8473 | public class StandaloneConfig extends WorkerConfig {
* <code>offset.storage.file.filename</code>
*/
public static final String OFFSET_STORAGE_FILE_FILENAME_CONFIG = "offset.storage.file.filename";
- private static final String OFFSET_STORAGE_FILE_FILENAME_DOC = "File to store source task offsets";
static {
CONFIG = baseConfigDef()
To remain consistent with the description for the `offset.storage.topic`, we should probably use the term "connector" instead of "task":
```suggestion
private static final String OFFSET_STORAGE_FILE_FILENAME_DOC = "File to store source connector offsets";
```
public class StandaloneConfig extends WorkerConfig {
* <code>offset.storage.file.filename</code>
*/
public static final String OFFSET_STORAGE_FILE_FILENAME_CONFIG = "offset.storage.file.filename";
+ private static final String OFFSET_STORAGE_FILE_FILENAME_DOC = "File to store source connector offsets";
static {
CONFIG = baseConfigDef() |
codereview_new_java_data_8475 | public ApiKeys apiKey() {
return apiKey;
}
public abstract int throttleTimeMs();
- public abstract void setThrottleTimeMs(int throttleTimeMs);
public String toString() {
return data().toString();
How about `maybeSetThrottleTimeMs` since not all response schemas support it.
public ApiKeys apiKey() {
return apiKey;
}
+ /**
+ * Get the throttle time in milliseconds. If the response schema does not
+ * support this field, then 0 will be returned.
+ */
public abstract int throttleTimeMs();
+ /**
+ * Set the throttle time in the response if the schema supports it. Otherwise,
+ * this is a no-op.
+ *
+ * @param throttleTimeMs The throttle time in milliseconds
+ */
+ public abstract void maybeSetThrottleTimeMs(int throttleTimeMs);
public String toString() {
return data().toString(); |
codereview_new_java_data_8476 | public boolean equals(Object o) {
}
/**
- * Return the metadata for the next error response.
*/
public FetchMetadata nextCloseExisting() {
return new FetchMetadata(sessionId, INITIAL_EPOCH);
}
FYI reviewer
Server handles this message at `FetchSession.scala` at https://github.com/apache/kafka/blob/trunk/core/src/main/scala/kafka/server/FetchSession.scala#L785
public boolean equals(Object o) {
}
/**
+ * Return the metadata for the next request. The metadata is set to indicate that the client wants to close the
+ * existing session.
*/
public FetchMetadata nextCloseExisting() {
+ return new FetchMetadata(sessionId, FINAL_EPOCH);
+ }
+
+ /**
+ * Return the metadata for the next request. The metadata is set to indicate that the client wants to close the
+ * existing session and create a new one if possible.
+ */
+ public FetchMetadata nextCloseExistingAttemptNew() {
return new FetchMetadata(sessionId, INITIAL_EPOCH);
}
|
codereview_new_java_data_8477 | public boolean equals(Object o) {
}
/**
- * Return the metadata for the next error response.
*/
public FetchMetadata nextCloseExisting() {
return new FetchMetadata(sessionId, INITIAL_EPOCH);
}
Checked KIP-227, found nothing about the meaning of `$ID FINAL_EPOCH` in `FetchRequest Metadata meaning` section, but I checked the test code, and confirmed it is intended to close the session by `$ID FINAL_EPOCH`.
https://github.com/apache/kafka/blob/921885d31ff9604a498a9d0d872dc141715a28f1/core/src/test/scala/unit/kafka/server/FetchSessionTest.scala#L495-L504
public boolean equals(Object o) {
}
/**
+ * Return the metadata for the next request. The metadata is set to indicate that the client wants to close the
+ * existing session.
*/
public FetchMetadata nextCloseExisting() {
+ return new FetchMetadata(sessionId, FINAL_EPOCH);
+ }
+
+ /**
+ * Return the metadata for the next request. The metadata is set to indicate that the client wants to close the
+ * existing session and create a new one if possible.
+ */
+ public FetchMetadata nextCloseExistingAttemptNew() {
return new FetchMetadata(sessionId, INITIAL_EPOCH);
}
|
codereview_new_java_data_8478 | public void testNodeIfOffline() {
@Test
public void testNodeIfOnlineNonExistentTopicPartition() {
- Node node0 = new Node(0, "localhost", 9092);
-
- MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 2, Collections.emptyMap(), Collections.emptyMap(), _tp -> 99,
- (error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) ->
- new MetadataResponse.PartitionMetadata(error, partition, Optional.of(node0.id()), leaderEpoch,
- Collections.singletonList(node0.id()), Collections.emptyList(),
- Collections.emptyList()), ApiKeys.METADATA.latestVersion(), Collections.emptyMap());
- metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
- metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
TopicPartition tp = new TopicPartition("topic-1", 0);
assertEquals(metadata.fetch().nodeIfOnline(tp, 0), Optional.empty());
}
Could we use `RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap())`? It seems to me that we would end up with the same result because of the empty maps.
public void testNodeIfOffline() {
@Test
public void testNodeIfOnlineNonExistentTopicPartition() {
+ MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap());
+ metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
TopicPartition tp = new TopicPartition("topic-1", 0);
+ assertEquals(metadata.fetch().nodeById(0).id(), 0);
+ assertNull(metadata.fetch().partition(tp));
assertEquals(metadata.fetch().nodeIfOnline(tp, 0), Optional.empty());
}
|
codereview_new_java_data_8479 | public void testNodeIfOffline() {
@Test
public void testNodeIfOnlineNonExistentTopicPartition() {
- Node node0 = new Node(0, "localhost", 9092);
-
- MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 2, Collections.emptyMap(), Collections.emptyMap(), _tp -> 99,
- (error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) ->
- new MetadataResponse.PartitionMetadata(error, partition, Optional.of(node0.id()), leaderEpoch,
- Collections.singletonList(node0.id()), Collections.emptyList(),
- Collections.emptyList()), ApiKeys.METADATA.latestVersion(), Collections.emptyMap());
- metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
- metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
TopicPartition tp = new TopicPartition("topic-1", 0);
assertEquals(metadata.fetch().nodeIfOnline(tp, 0), Optional.empty());
}
Is this one really required?
public void testNodeIfOffline() {
@Test
public void testNodeIfOnlineNonExistentTopicPartition() {
+ MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap());
+ metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
TopicPartition tp = new TopicPartition("topic-1", 0);
+ assertEquals(metadata.fetch().nodeById(0).id(), 0);
+ assertNull(metadata.fetch().partition(tp));
assertEquals(metadata.fetch().nodeIfOnline(tp, 0), Optional.empty());
}
|
codereview_new_java_data_8480 | public void testNodeIfOffline() {
@Test
public void testNodeIfOnlineNonExistentTopicPartition() {
- Node node0 = new Node(0, "localhost", 9092);
-
- MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 2, Collections.emptyMap(), Collections.emptyMap(), _tp -> 99,
- (error, partition, leader, leaderEpoch, replicas, isr, offlineReplicas) ->
- new MetadataResponse.PartitionMetadata(error, partition, Optional.of(node0.id()), leaderEpoch,
- Collections.singletonList(node0.id()), Collections.emptyList(),
- Collections.emptyList()), ApiKeys.METADATA.latestVersion(), Collections.emptyMap());
- metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
- metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
TopicPartition tp = new TopicPartition("topic-1", 0);
assertEquals(metadata.fetch().nodeIfOnline(tp, 0), Optional.empty());
}
nit: Could we also assert that `metadata.fetch().nodeById` returns our node? I would also assert that `partition(tp)` returns null.
public void testNodeIfOffline() {
@Test
public void testNodeIfOnlineNonExistentTopicPartition() {
+ MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(2, Collections.emptyMap());
+ metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
TopicPartition tp = new TopicPartition("topic-1", 0);
+ assertEquals(metadata.fetch().nodeById(0).id(), 0);
+ assertNull(metadata.fetch().partition(tp));
assertEquals(metadata.fetch().nodeIfOnline(tp, 0), Optional.empty());
}
|
codereview_new_java_data_8481 | static void createCompactedTopic(String topicName, short partitions, short repli
}
if (cause instanceof UnsupportedVersionException) {
log.debug("Unable to create topic '{}' since the brokers do not support the CreateTopics API." +
- " Falling back to assume topic exist or will be auto-created by the broker.",
topicName);
}
if (cause instanceof ClusterAuthorizationException) {
This method handles a single topic at a time so we can drop `(s)` after `topic`. Same below
static void createCompactedTopic(String topicName, short partitions, short repli
}
if (cause instanceof UnsupportedVersionException) {
log.debug("Unable to create topic '{}' since the brokers do not support the CreateTopics API." +
+ " Falling back to assume topic exists or will be auto-created by the broker.",
topicName);
}
if (cause instanceof ClusterAuthorizationException) { |
codereview_new_java_data_8482 | static void createCompactedTopic(String topicName, short partitions, short repli
}
if (cause instanceof UnsupportedVersionException) {
log.debug("Unable to create topic '{}' since the brokers do not support the CreateTopics API." +
- " Falling back to assume topic exist or will be auto-created by the broker.",
topicName);
}
if (cause instanceof ClusterAuthorizationException) {
Should it be `exists`?
static void createCompactedTopic(String topicName, short partitions, short repli
}
if (cause instanceof UnsupportedVersionException) {
log.debug("Unable to create topic '{}' since the brokers do not support the CreateTopics API." +
+ " Falling back to assume topic exists or will be auto-created by the broker.",
topicName);
}
if (cause instanceof ClusterAuthorizationException) { |
codereview_new_java_data_8483 |
import java.util.Set;
/**
- * ForwardingAdmin is the default value of `forwarding.admin.class` in MM2.
- * MM2 users who wish to use customized behaviour Admin; they can extend the ForwardingAdmin and override some behaviours
- * without need to provide a whole implementation of Admin.
- * The class must have a contractor that accept configuration (Map<String, Object> config) to configure
- * {@link KafkaAdminClient} and any other needed resource management clients.
*/
public class ForwardingAdmin implements Admin {
private final Admin delegate;
This class needs some javadoc to explain that this is the default `forwarding.admin.class` used by MM2, and that it is intended that users wanting customized behaviour extend this class and override the methods they need.
import java.util.Set;
/**
+ * {@code ForwardingAdmin} is the default value of {@code forwarding.admin.class} in MM2.
+ * Users who wish to customize the MM2 behaviour for the creation of topics and access control lists can extend this
+ * class without needing to provide a whole implementation of {@code Admin}.
+ * The class must have a constructor with signature {@code (Map<String, Object> config)} for configuring
+ * a decorated {@link KafkaAdminClient} and any other clients needed for external resource management.
*/
public class ForwardingAdmin implements Admin {
private final Admin delegate; |
codereview_new_java_data_8484 |
import java.util.Set;
/**
- * ForwardingAdmin is the default value of `forwarding.admin.class` in MM2.
- * MM2 users who wish to use customized behaviour Admin; they can extend the ForwardingAdmin and override some behaviours
- * without need to provide a whole implementation of Admin.
- * The class must have a contractor that accept configuration (Map<String, Object> config) to configure
- * {@link KafkaAdminClient} and any other needed resource management clients.
*/
public class ForwardingAdmin implements Admin {
private final Admin delegate;
I guess this could be `Admin.create`, not need for explicit dependency on `AdminClient`.
import java.util.Set;
/**
+ * {@code ForwardingAdmin} is the default value of {@code forwarding.admin.class} in MM2.
+ * Users who wish to customize the MM2 behaviour for the creation of topics and access control lists can extend this
+ * class without needing to provide a whole implementation of {@code Admin}.
+ * The class must have a constructor with signature {@code (Map<String, Object> config)} for configuring
+ * a decorated {@link KafkaAdminClient} and any other clients needed for external resource management.
*/
public class ForwardingAdmin implements Admin {
private final Admin delegate; |
codereview_new_java_data_8485 | public class MirrorClientConfig extends AbstractConfig {
DefaultReplicationPolicy.SEPARATOR_DEFAULT;
public static final String FORWARDING_ADMIN_CLASS = "forwarding.admin.class";
- public static final String FORWARDING_ADMIN_CLASS_DOC = "Class which extends ForwardingAdmin to define custom cluster resource management (topics, configs, etc).";
public static final Class<?> FORWARDING_ADMIN_CLASS_DEFAULT = ForwardingAdmin.class;
public static final String ADMIN_CLIENT_PREFIX = "admin.";
public static final String CONSUMER_CLIENT_PREFIX = "consumer.";
I wonder if we should mention the constructor signature here as well?
public class MirrorClientConfig extends AbstractConfig {
DefaultReplicationPolicy.SEPARATOR_DEFAULT;
public static final String FORWARDING_ADMIN_CLASS = "forwarding.admin.class";
+ public static final String FORWARDING_ADMIN_CLASS_DOC = "Class which extends ForwardingAdmin to define custom cluster resource management (topics, configs, etc). " +
+ "The class must have a contractor that accept configuration (Map<String, Object> config) to configure KafkaAdminClient and any other needed clients.";
public static final Class<?> FORWARDING_ADMIN_CLASS_DEFAULT = ForwardingAdmin.class;
public static final String ADMIN_CLIENT_PREFIX = "admin.";
public static final String CONSUMER_CLIENT_PREFIX = "consumer."; |
codereview_new_java_data_8486 | public ForwardingAdmin(Map<String, Object> configs) {
@Override
public void close(Duration timeout) {
- delegate.close();
}
@Override
Should we pass the timeout to the delegate?
public ForwardingAdmin(Map<String, Object> configs) {
@Override
public void close(Duration timeout) {
+ delegate.close(timeout);
}
@Override |
codereview_new_java_data_8487 |
/**
* {@code ForwardingAdmin} is the default value of {@code forwarding.admin.class} in MirrorMaker.
* Users who wish to customize the MirrorMaker behaviour for the creation of topics and access control lists can extend this
- * class without needing to provide a whole implementation of {@code Admin}.
* The class must have a constructor with signature {@code (Map<String, Object> config)} for configuring
* a decorated {@link KafkaAdminClient} and any other clients needed for external resource management.
*/
```suggestion
* class without needing to provide a whole implementation of {@code Admin}.
```
/**
* {@code ForwardingAdmin} is the default value of {@code forwarding.admin.class} in MirrorMaker.
* Users who wish to customize the MirrorMaker behaviour for the creation of topics and access control lists can extend this
+ * class without needing to provide a whole implementation of {@code Admin}.
* The class must have a constructor with signature {@code (Map<String, Object> config)} for configuring
* a decorated {@link KafkaAdminClient} and any other clients needed for external resource management.
*/ |
codereview_new_java_data_8488 | public class MirrorClientConfig extends AbstractConfig {
public static final String FORWARDING_ADMIN_CLASS = "forwarding.admin.class";
public static final String FORWARDING_ADMIN_CLASS_DOC = "Class which extends ForwardingAdmin to define custom cluster resource management (topics, configs, etc). " +
- "The class must have a constructor that accept configuration (Map<String, Object> config) to configure KafkaAdminClient and any other needed clients.";
public static final Class<?> FORWARDING_ADMIN_CLASS_DEFAULT = ForwardingAdmin.class;
public static final String ADMIN_CLIENT_PREFIX = "admin.";
public static final String CONSUMER_CLIENT_PREFIX = "consumer.";
```suggestion
"The class must have a constructor with signature <code>(Map<String, Object> config)</code> that is used to configure a KafkaAdminClient and may also be used to configure clients for external systems if necessary.";
```
public class MirrorClientConfig extends AbstractConfig {
public static final String FORWARDING_ADMIN_CLASS = "forwarding.admin.class";
public static final String FORWARDING_ADMIN_CLASS_DOC = "Class which extends ForwardingAdmin to define custom cluster resource management (topics, configs, etc). " +
+ "The class must have a constructor with signature <code>(Map<String, Object> config)</code> that is used to configure a KafkaAdminClient and may also be used to configure clients for external systems if necessary.";
public static final Class<?> FORWARDING_ADMIN_CLASS_DEFAULT = ForwardingAdmin.class;
public static final String ADMIN_CLIENT_PREFIX = "admin.";
public static final String CONSUMER_CLIENT_PREFIX = "consumer."; |
codereview_new_java_data_8489 |
/** Internal utility methods. */
final class MirrorUtils {
- private static final Logger log = LoggerFactory.getLogger(MirrorCheckpointTask.class);
// utility class
private MirrorUtils() {}
`MirrorCheckpointTask` -> `MirrorUtils`
/** Internal utility methods. */
final class MirrorUtils {
+ private static final Logger log = LoggerFactory.getLogger(MirrorUtils.class);
// utility class
private MirrorUtils() {} |
codereview_new_java_data_8490 | public void onCompletion(RecordMetadata metadata, Exception exception) {
assertEquals(partition1, partition.get());
assertEquals(1, mockRandom.get());
- // Produce large record, we switched to next partition by previous produce, but
- // for this produce the switch would be disabled because of incomplete batch.
accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS,
callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster);
assertEquals(partition2, partition.get());
To be precise, the previous produce didn't switch to the next partition. The produce of this record forces the closing of the current batch, which cause the switch to the next partition.
public void onCompletion(RecordMetadata metadata, Exception exception) {
assertEquals(partition1, partition.get());
assertEquals(1, mockRandom.get());
+ // Produce large record, we should switch to next partition as we complete
+ // previous batch and exceeded sticky limit.
accum.append(topic, RecordMetadata.UNKNOWN_PARTITION, 0L, null, largeValue, Record.EMPTY_HEADERS,
callbacks, maxBlockTimeMs, false, time.milliseconds(), cluster);
assertEquals(partition2, partition.get()); |
codereview_new_java_data_8491 | public void testPollRedelivery() throws Exception {
assertTaskMetricValue("running-ratio", 1.0);
assertTaskMetricValue("batch-size-max", 1.0);
assertTaskMetricValue("batch-size-avg", 0.5);
-
- assertEquals(workerCurrentOffsets, Whitebox.<Map<TopicPartition, OffsetAndMetadata>>getInternalState(workerTask, "currentOffsets"));
-
sinkTaskContext.getValue().requestCommit();
time.sleep(10000L);
workerTask.iteration();
Do we have to probe internal fields to verify this change? Couldn't we examine the offsets given to `SinkTask::preCommit` or `Consumer::commitAsync` instead?
public void testPollRedelivery() throws Exception {
assertTaskMetricValue("running-ratio", 1.0);
assertTaskMetricValue("batch-size-max", 1.0);
assertTaskMetricValue("batch-size-avg", 0.5);
+
sinkTaskContext.getValue().requestCommit();
time.sleep(10000L);
workerTask.iteration(); |
codereview_new_java_data_8492 |
* Write an arbitrary set of metadata records into a Kafka metadata log batch format.
*
* This is similar to the binary format used for metadata snapshot files, but the log epoch
- * and initial offset are set to zero.
*/
public class BatchFileWriter implements AutoCloseable {
private final FileChannel channel;
We should document that this class adds the two control records automatically
* Write an arbitrary set of metadata records into a Kafka metadata log batch format.
*
* This is similar to the binary format used for metadata snapshot files, but the log epoch
+ * and initial offset are set to zero. This type includes a SnapshotHeaderRecord record in the
+ * first batch and a SnapshotFooterRecord record in the last batch.
*/
public class BatchFileWriter implements AutoCloseable {
private final FileChannel channel; |
codereview_new_java_data_8493 | ClusterAssignment performTaskAssignment(
log.debug("Skipping revocations in the current round with a delay of {}ms. Next scheduled rebalance:{}",
delay, scheduledRebalance);
} else {
- log.debug("Revoking assignments as scheduled.rebalance.max.delay.ms is set to 0");
revoke(toRevoke, toExplicitlyRevoke);
}
} else if (!toExplicitlyRevoke.isEmpty()) {
maybe: `Revoking assignments immediately since scheduled.rebalance.max.delay.ms is set to 0`
ClusterAssignment performTaskAssignment(
log.debug("Skipping revocations in the current round with a delay of {}ms. Next scheduled rebalance:{}",
delay, scheduledRebalance);
} else {
+ log.debug("Revoking assignments immediately since scheduled.rebalance.max.delay.ms is set to 0");
revoke(toRevoke, toExplicitlyRevoke);
}
} else if (!toExplicitlyRevoke.isEmpty()) { |
codereview_new_java_data_8494 | public int hashCode() {
/**
* Returns if the state updater restores active tasks.
*
- * The state updater restores active tasks if at least one active task was added with the {@link StateUpdater#add(Task)}
- * and the active task was not removed from the state updater with one of the following methods:
* <ul>
* <li>{@link StateUpdater#drainRestoredActiveTasks(Duration)}</li>
* <li>{@link StateUpdater#drainRemovedTasks()}</li>
Is this if it it _currently_ restoring an active task? It read like if it is able to do so. I guess I am not sure what this method is for.
public int hashCode() {
/**
* Returns if the state updater restores active tasks.
*
+ * The state updater restores active tasks if at least one active task was added with {@link StateUpdater#add(Task)},
+ * the task is not paused, and the task was not removed from the state updater with one of the following methods:
* <ul>
* <li>{@link StateUpdater#drainRestoredActiveTasks(Duration)}</li>
* <li>{@link StateUpdater#drainRemovedTasks()}</li> |
codereview_new_java_data_8495 | public static String RESTART_KEY(String connectorName) {
@Deprecated
public KafkaConfigBackingStore(Converter converter, DistributedConfig config, WorkerConfigTransformer configTransformer) {
- this(converter, config, configTransformer, null, "connect-distributed");
}
public KafkaConfigBackingStore(Converter converter, DistributedConfig config, WorkerConfigTransformer configTransformer, Supplier<TopicAdmin> adminSupplier, String clientIdBase) {
Should this be `connect-distributed-`?
public static String RESTART_KEY(String connectorName) {
@Deprecated
public KafkaConfigBackingStore(Converter converter, DistributedConfig config, WorkerConfigTransformer configTransformer) {
+ this(converter, config, configTransformer, null, "connect-distributed-");
}
public KafkaConfigBackingStore(Converter converter, DistributedConfig config, WorkerConfigTransformer configTransformer, Supplier<TopicAdmin> adminSupplier, String clientIdBase) { |
codereview_new_java_data_8496 | public class KafkaStatusBackingStore implements StatusBackingStore {
@Deprecated
public KafkaStatusBackingStore(Time time, Converter converter) {
- this(time, converter, null, "connect-distributed");
}
public KafkaStatusBackingStore(Time time, Converter converter, Supplier<TopicAdmin> topicAdminSupplier, String clientIdBase) {
Should this be `connect-distributed-`?
public class KafkaStatusBackingStore implements StatusBackingStore {
@Deprecated
public KafkaStatusBackingStore(Time time, Converter converter) {
+ this(time, converter, null, "connect-distributed-");
}
public KafkaStatusBackingStore(Time time, Converter converter, Supplier<TopicAdmin> topicAdminSupplier, String clientIdBase) { |
codereview_new_java_data_8499 |
class MetadataVersionTest {
@Test
- public void testFeatureLevel() {
- int i = 0;
- while (i < MetadataVersion.VERSIONS.length &&
- MetadataVersion.VERSIONS[i].featureLevel() < 0) {
- i++;
}
- int j = 1;
- while (i < MetadataVersion.VERSIONS.length) {
- assertEquals(j, MetadataVersion.VERSIONS[i].featureLevel());
- i++;
- j++;
}
}
nit: The new version of this test seems more obscure than the original. It seems more straightforward to assert 1) all versions below IBP_3_0_IV1 have -1 feature level, and 2) that all subsequent version increment feature level by 1. Also, it would be helpful to have a better name for the test so that we don't have to decipher what it's trying to do. For example, perhaps `testSequentialFeatureLevel` or something like that.
class MetadataVersionTest {
@Test
+ public void testKRaftFeatureLevelsBefore3_0_IV1() {
+ for (int i = 0; i < MetadataVersion.IBP_3_0_IV1.ordinal(); i++) {
+ assertEquals(-1, MetadataVersion.VERSIONS[i].featureLevel());
}
+ }
+
+ @Test
+ public void testKRaftFeatureLevelsAtAndAfter3_0_IV1() {
+ for (int i = MetadataVersion.IBP_3_0_IV1.ordinal(); i < MetadataVersion.VERSIONS.length; i++) {
+ int expectedLevel = i - MetadataVersion.IBP_3_0_IV1.ordinal() + 1;
+ assertEquals(expectedLevel, MetadataVersion.VERSIONS[i].featureLevel());
}
}
|
codereview_new_java_data_8500 | private void renounce() {
snapshotRegistry.revertToSnapshot(lastCommittedOffset);
authorizer.ifPresent(a -> a.loadSnapshot(aclControlManager.idToAcl()));
} else {
- log.warn("Unable to find last committed offset {} in snapshot registry; resetting " +
- "to empty state.", lastCommittedOffset);
resetToEmptyState();
authorizer.ifPresent(a -> a.loadSnapshot(Collections.emptyMap()));
needToCompleteAuthorizerLoad = authorizer.isPresent();
Is not having a snapshot for the committed offset an expected or unexpected state?
private void renounce() {
snapshotRegistry.revertToSnapshot(lastCommittedOffset);
authorizer.ifPresent(a -> a.loadSnapshot(aclControlManager.idToAcl()));
} else {
+ log.info("Unable to find last committed offset {} in snapshot registry; resetting " +
+ "to empty state.", lastCommittedOffset);
resetToEmptyState();
authorizer.ifPresent(a -> a.loadSnapshot(Collections.emptyMap()));
needToCompleteAuthorizerLoad = authorizer.isPresent(); |
codereview_new_java_data_8501 |
import org.slf4j.Logger;
public class LogReplayTracker {
public static class Builder {
private LogContext logContext = null;
Brief javadoc would be nice. Should probably mention this is not thread safe
import org.slf4j.Logger;
+/**
+ * The LogReplayTracker manages state associated with replaying the metadata log, such as whether
+ * we have seen any records and whether we have seen any metadata version records. It is accessed
+ * solely from the quorum controller thread.
+ */
public class LogReplayTracker {
public static class Builder {
private LogContext logContext = null; |
codereview_new_java_data_8502 | public boolean hasNext() {
public List<ApiMessageAndVersion> next() {
// Write the metadata.version first
if (!wroteVersion) {
- if (!metadataVersion.isLessThan(minimumBootstrapVersion)) {
wroteVersion = true;
return Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord()
.setName(MetadataVersion.FEATURE_NAME)
nit: i think this check reads easier as `metadataVersion.isAtLeast(minimumBootstrapVersion)`
public boolean hasNext() {
public List<ApiMessageAndVersion> next() {
// Write the metadata.version first
if (!wroteVersion) {
+ if (metadataVersion.isAtLeast(minimumBootstrapVersion)) {
wroteVersion = true;
return Collections.singletonList(new ApiMessageAndVersion(new FeatureLevelRecord()
.setName(MetadataVersion.FEATURE_NAME) |
codereview_new_java_data_8503 |
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
-@RunWith(MockitoJUnitRunner.class)
public class RootResourceTest {
@Mock private Herder herder;
To align this test with others we are currenclt migrating, can you please use `MockitoJUnitRunner.StrictStubs.class` instead of the `MockitoJUnitRunner.class`?
Mockito docs explaning what `StrictStubs` does:
- https://javadoc.io/doc/org.mockito/mockito-core/latest/org/mockito/junit/MockitoJUnitRunner.StrictStubs.html
- https://javadoc.io/doc/org.mockito/mockito-core/latest/org/mockito/quality/Strictness.html#STRICT_STUBS
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
+@RunWith(MockitoJUnitRunner.StrictStubs.class)
public class RootResourceTest {
@Mock private Herder herder; |
codereview_new_java_data_8507 | public class KStreamPrintTest {
private Processor<Integer, String, Void, Void> printProcessor;
@Mock
- ProcessorContext<Void, Void> processorContext;
@Before
public void setUp() {
[optional] Can you please double check if we can set `private` access modifier to the `ProcessorContext<Void, Void> processorContext;` ?
public class KStreamPrintTest {
private Processor<Integer, String, Void, Void> printProcessor;
@Mock
+ private ProcessorContext<Void, Void> processorContext;
@Before
public void setUp() { |
codereview_new_java_data_8512 | void backoff(int attempt, long deadline) {
if (delay > errorMaxDelayInMillis) {
delay = ThreadLocalRandom.current().nextLong(errorMaxDelayInMillis);
}
- if (delay + time.milliseconds() > deadline) {
- delay = deadline - time.milliseconds();
}
log.debug("Sleeping for up to {} millis", delay);
try {
Can this ever lead to negative delays? If so, we should add a test for that case (possibly adding in a call to, e.g., `time.sleep(1)` in the test case to simulate a little bit of time passing between checks) and add logic here to set a minimum delay of zero.
void backoff(int attempt, long deadline) {
if (delay > errorMaxDelayInMillis) {
delay = ThreadLocalRandom.current().nextLong(errorMaxDelayInMillis);
}
+ long currentTime = time.milliseconds();
+ if (delay + currentTime > deadline) {
+ delay = deadline - currentTime;
}
log.debug("Sleeping for up to {} millis", delay);
try { |
codereview_new_java_data_8514 | private void addToResetList(final TopicPartition partition, final Set<TopicParti
partitions.add(partition);
}
/**
* Try to commit all active tasks owned by this thread.
*
Note that adding this public method does not require a KIP because this class is not part of the public API at https://kafka.apache.org/32/javadoc/allclasses-index.html
private void addToResetList(final TopicPartition partition, final Set<TopicParti
partitions.add(partition);
}
+ // This method is added for usage in tests where mocking the underlying native call is not possible.
+ public boolean isThreadAlive() {
+ return isAlive();
+ }
+
/**
* Try to commit all active tasks owned by this thread.
* |
codereview_new_java_data_8515 | public void shouldRecordStatisticsBasedMetrics() {
verify(statisticsToAdd1, times(17)).getAndResetTickerCount(isA(TickerType.class));
verify(statisticsToAdd2, times(17)).getAndResetTickerCount(isA(TickerType.class));
- verify(statisticsToAdd2, times(2)).getHistogramData(isA(HistogramType.class));
verify(statisticsToAdd2, times(2)).getHistogramData(isA(HistogramType.class));
verify(bytesWrittenToDatabaseSensor).record(expectedBytesWrittenToDatabaseSensor, now);
verify(bytesReadFromDatabaseSensor).record(expectedBytesReadFromDatabaseSensor, now);
I think this should be `statisticsToAdd1` instead of `statisticsToAdd2`. Otherwise we would verify twice the same calls - here and in the next line.
public void shouldRecordStatisticsBasedMetrics() {
verify(statisticsToAdd1, times(17)).getAndResetTickerCount(isA(TickerType.class));
verify(statisticsToAdd2, times(17)).getAndResetTickerCount(isA(TickerType.class));
+ verify(statisticsToAdd1, times(2)).getHistogramData(isA(HistogramType.class));
verify(statisticsToAdd2, times(2)).getHistogramData(isA(HistogramType.class));
verify(bytesWrittenToDatabaseSensor).record(expectedBytesWrittenToDatabaseSensor, now);
verify(bytesReadFromDatabaseSensor).record(expectedBytesReadFromDatabaseSensor, now); |
codereview_new_java_data_8516 | public static <T> RecordsSnapshotReader<T> of(
RawSnapshotReader snapshot,
RecordSerde<T> serde,
BufferSupplier bufferSupplier,
- int maxBatchSize
) {
return new RecordsSnapshotReader<>(
snapshot.snapshotId(),
- new RecordsIterator<>(snapshot.records(), serde, bufferSupplier, maxBatchSize)
);
}
If you do this here, that means you have to read the snapshot twice, right?
public static <T> RecordsSnapshotReader<T> of(
RawSnapshotReader snapshot,
RecordSerde<T> serde,
BufferSupplier bufferSupplier,
+ int maxBatchSize,
+ boolean doCrcValidation
) {
return new RecordsSnapshotReader<>(
snapshot.snapshotId(),
+ new RecordsIterator<>(snapshot.records(), serde, bufferSupplier, maxBatchSize, doCrcValidation)
);
}
|
codereview_new_java_data_8518 |
*/
public class FileStreamSinkConnector extends SinkConnector {
- static final String FILE_CONFIG = "file";
static final ConfigDef CONFIG_DEF = new ConfigDef()
.define(FILE_CONFIG, Type.STRING, null, Importance.HIGH, "Destination filename. If not specified, the standard output will be used");
(These should be reverted too)
*/
public class FileStreamSinkConnector extends SinkConnector {
+ public static final String FILE_CONFIG = "file";
static final ConfigDef CONFIG_DEF = new ConfigDef()
.define(FILE_CONFIG, Type.STRING, null, Importance.HIGH, "Destination filename. If not specified, the standard output will be used");
|
codereview_new_java_data_8519 | public void testTaskClass() {
@Test
public void testConnectorConfigsPropagateToTaskConfigs() {
sinkProperties.put("transforms", "insert");
connector.start(sinkProperties);
List<Map<String, String>> taskConfigs = connector.taskConfigs(1);
Might be worth adding a comment here (and in the `start` implementations for each connector) on why we do this and/or containing a reference to the Jira ticket for KAFKA-13809?
public void testTaskClass() {
@Test
public void testConnectorConfigsPropagateToTaskConfigs() {
+ // This is required so that updates in transforms/converters/clients configs get reflected
+ // in tasks without manual restarts of the tasks (see https://issues.apache.org/jira/browse/KAFKA-13809)
sinkProperties.put("transforms", "insert");
connector.start(sinkProperties);
List<Map<String, String>> taskConfigs = connector.taskConfigs(1); |
codereview_new_java_data_8520 | public class FileStreamSourceConnector extends SourceConnector {
.define(FILE_CONFIG, Type.STRING, null, Importance.HIGH, "Source filename. If not specified, the standard input will be used")
.define(TOPIC_CONFIG, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyString(), Importance.HIGH, "The topic to publish data to")
.define(TASK_BATCH_SIZE_CONFIG, Type.INT, DEFAULT_TASK_BATCH_SIZE, Importance.LOW,
- "The maximum number of records the Source task can read from file one time");
private Map<String, String> props;
```suggestion
"The maximum number of records the source task can read from the file each time it is polled");
```
public class FileStreamSourceConnector extends SourceConnector {
.define(FILE_CONFIG, Type.STRING, null, Importance.HIGH, "Source filename. If not specified, the standard input will be used")
.define(TOPIC_CONFIG, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyString(), Importance.HIGH, "The topic to publish data to")
.define(TASK_BATCH_SIZE_CONFIG, Type.INT, DEFAULT_TASK_BATCH_SIZE, Importance.LOW,
+ "The maximum number of records the source task can read from the file each time it is polled");
private Map<String, String> props;
|
codereview_new_java_data_8521 |
/**
- * A metadata fault.
*/
public class MetadataFaultException extends RuntimeException {
public MetadataFaultException(String message, Throwable cause) {
Can we elaborate on when it's expected to use this exception? Is it just when applying records?
/**
+ * A fault that we encountered while we replayed cluster metadata.
*/
public class MetadataFaultException extends RuntimeException {
public MetadataFaultException(String message, Throwable cause) { |
codereview_new_java_data_8522 | void syncGroupOffset(String consumerGroupId, Map<TopicPartition, OffsetAndMetada
} else {
log.error("Unable to sync offsets for consumer group {}.", consumerGroupId, throwable);
}
}
});
- log.trace("sync-ed the offset for consumer group: {} with {} number of offset entries",
- consumerGroupId, offsetToSync.size());
}
}
Should we update this log message since it's not guaranteed that the sync will have completed by this point, or that it will even complete successfully at all?
Or alternatively, could we keep the message as-is, but move it into an `else` block in the callback we pass to `whenComplete`?
void syncGroupOffset(String consumerGroupId, Map<TopicPartition, OffsetAndMetada
} else {
log.error("Unable to sync offsets for consumer group {}.", consumerGroupId, throwable);
}
+ } else {
+ log.trace("Sync-ed {} offsets for consumer group {}.", offsetToSync.size(), consumerGroupId);
}
});
}
}
|
codereview_new_java_data_8523 | protected void finalOffsetCommit(boolean failed) {
log.debug("Skipping final offset commit as task has failed");
return;
} else if (isCancelled()) {
- log.debug("Skipping final offset commit as task has been cancelled and its producer has already been closed");
return;
}
Is it necessarily true that the producer is already closed, given that the closure on cancellation is actually delegated to an executor?
protected void finalOffsetCommit(boolean failed) {
log.debug("Skipping final offset commit as task has failed");
return;
} else if (isCancelled()) {
+ log.debug("Skipping final offset commit as task has been cancelled");
return;
}
|
codereview_new_java_data_8527 | public CompletableFuture<List<CreatePartitionsTopicResult>> createPartitions(
});
}
- // TODO: Figure out the reason as to why is a snapshot starting here? Who is calling beginWritingSnapshot()?
@Override
public CompletableFuture<Long> beginWritingSnapshot() {
CompletableFuture<Long> future = new CompletableFuture<>();
This method is used in tests, please also remove this comment.
public CompletableFuture<List<CreatePartitionsTopicResult>> createPartitions(
});
}
@Override
public CompletableFuture<Long> beginWritingSnapshot() {
CompletableFuture<Long> future = new CompletableFuture<>(); |
codereview_new_java_data_8531 |
import static org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForEmptyConsumerGroup;
@Category({IntegrationTest.class})
-public class KafkaStreamsCloseOptionsIntegrationTest {
@Rule
public Timeout globalTimeout = Timeout.seconds(600);
@Rule
I'm not wild about this IT as written. I copied from the `AbstractResetIntegrationTest` and I'd be happy to hear a suggestion on how to make a more minimal test.
import static org.apache.kafka.streams.integration.utils.IntegrationTestUtils.waitForEmptyConsumerGroup;
@Category({IntegrationTest.class})
+public class KafkaStreamsCloseOptionsIntegrationTest { //extends AbstractResetIntegrationTest {
@Rule
public Timeout globalTimeout = Timeout.seconds(600);
@Rule |
codereview_new_java_data_8532 | public void testCloseOptions() throws Exception {
IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(2));
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
- streams.close(new CloseOptions().leaveGroup(true));
waitForEmptyConsumerGroup(adminClient, appID, 0);
}
I think we should pass in a timeout via `CloseOptions` to overwrite default MAX_VALUE?
public void testCloseOptions() throws Exception {
IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(2));
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
+ streams.close(new CloseOptions().leaveGroup(true).timeout(Duration.ofMillis(1000)));
waitForEmptyConsumerGroup(adminClient, appID, 0);
}
|
codereview_new_java_data_8533 | public synchronized boolean close(final Duration timeout) throws IllegalArgument
* @throws IllegalArgumentException if {@code timeout} can't be represented as {@code long milliseconds}
*/
public synchronized boolean close(final CloseOptions options) throws IllegalArgumentException {
- Objects.requireNonNull(options);
final String msgPrefix = prepareMillisCheckFailMsgPrefix(options.timeout, "timeout");
final long timeoutMs = validateMillisecondDuration(options.timeout, msgPrefix);
if (timeoutMs < 0) {
```suggestion
Objects.requireNonNull(options, "options cannot be null");
```
public synchronized boolean close(final Duration timeout) throws IllegalArgument
* @throws IllegalArgumentException if {@code timeout} can't be represented as {@code long milliseconds}
*/
public synchronized boolean close(final CloseOptions options) throws IllegalArgumentException {
+ Objects.requireNonNull(options, "options cannot be null");
final String msgPrefix = prepareMillisCheckFailMsgPrefix(options.timeout, "timeout");
final long timeoutMs = validateMillisecondDuration(options.timeout, msgPrefix);
if (timeoutMs < 0) { |
codereview_new_java_data_8534 | public void testCloseOptions() throws Exception {
IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(2));
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
- streams.close(new CloseOptions().leaveGroup(true).timeout(Duration.ofMillis(1000)));
waitForEmptyConsumerGroup(adminClient, appID, 0);
}
```suggestion
streams.close(new CloseOptions().leaveGroup(true).timeout(Duration.ofMillis(30_000L)));
```
public void testCloseOptions() throws Exception {
IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(2));
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
+ streams.close(new CloseOptions().leaveGroup(true).timeout(Duration.ofMillis(30_000L)));
waitForEmptyConsumerGroup(adminClient, appID, 0);
}
|
codereview_new_java_data_8535 | public void testCloseOptions() throws Exception {
// RUN
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
- IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(2));
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close(new CloseOptions().leaveGroup(true).timeout(Duration.ofMillis(30_000L)));
```suggestion
IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(30));
```
public void testCloseOptions() throws Exception {
// RUN
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
+ IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(30));
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close(new CloseOptions().leaveGroup(true).timeout(Duration.ofMillis(30_000L))); |
codereview_new_java_data_8536 | public void testCloseOptions() throws Exception {
// RUN
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
- IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(2));
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close(new CloseOptions().leaveGroup(true).timeout(Duration.ofMillis(30_000L)));
```suggestion
streams.close(new CloseOptions().leaveGroup(true).timeout(Duration. ofSeconds(30)));
```
public void testCloseOptions() throws Exception {
// RUN
streams = new KafkaStreams(setupTopologyWithoutIntermediateUserTopic(), streamsConfig);
+ IntegrationTestUtils.startApplicationAndWaitUntilRunning(singletonList(streams), Duration.ofSeconds(30));
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(resultConsumerConfig, OUTPUT_TOPIC, 10);
streams.close(new CloseOptions().leaveGroup(true).timeout(Duration.ofMillis(30_000L))); |
codereview_new_java_data_8538 | public Set<Characteristics> characteristics() {
}
@SafeVarargs
- public static <E> Set<E> union(final Supplier<Set<E>> constructor, final Collection<E>... set) {
final Set<E> result = constructor.get();
- for (final Collection<E> s : set) {
result.addAll(s);
}
return result;
This seems a bit weird to me. I see that you changed this because `Tasks#allTaskIds()` returns a `Collection` instead of a `Set`. I think it is fine to let `Tasks#allTaskIds()` return a `Set`. The other reason you changed that is that is `Tasks#allTasks()` computing its result from two collections. Those two collections can be transformed to sets. Maybe we should also consider to change the return type of `allTasks()` to a set.
public Set<Characteristics> characteristics() {
}
@SafeVarargs
+ public static <E> Set<E> union(final Supplier<Set<E>> constructor, final Set<E>... set) {
final Set<E> result = constructor.get();
+ for (final Set<E> s : set) {
result.addAll(s);
}
return result; |
codereview_new_java_data_8539 | public static void validateGroupInstanceId(String id) {
/**
* Ensures that the provided {@code reason} remains within a range of 255 chars.
* @param reason This is the reason that is sent to the broker over the wire
- * as a part of {@code JoinGroupRequest}, {@code LeaveGroupRequest}
- * or {@code RemoveMembersFromConsumerGroupOptions} messages.
* @return a provided reason as is or truncated reason if it exceeds the 255 chars threshold.
*/
public static String maybeTruncateReason(final String reason) {
nit: We can remove this line because, in the end, we also send a LeaveGroupRequest in this case.
public static void validateGroupInstanceId(String id) {
/**
* Ensures that the provided {@code reason} remains within a range of 255 chars.
* @param reason This is the reason that is sent to the broker over the wire
+ * as a part of {@code JoinGroupRequest} or {@code LeaveGroupRequest}.
* @return a provided reason as is or truncated reason if it exceeds the 255 chars threshold.
*/
public static String maybeTruncateReason(final String reason) { |
codereview_new_java_data_8540 | public static TaskAndAction createPauseTask(final TaskId taskId) {
}
public static TaskAndAction createResumeTask(final TaskId taskId) {
- Objects.requireNonNull(taskId, "Task ID of task to pause is null!");
return new TaskAndAction(null, taskId, Action.RESUME);
}
Could you please add a unit test for this?
public static TaskAndAction createPauseTask(final TaskId taskId) {
}
public static TaskAndAction createResumeTask(final TaskId taskId) {
+ Objects.requireNonNull(taskId, "Task ID of task to resume is null!");
return new TaskAndAction(null, taskId, Action.RESUME);
}
|
codereview_new_java_data_8541 | public TopologyConfig(final String topologyName, final StreamsConfig globalAppCo
if (isTopologyOverride(MAX_TASK_IDLE_MS_CONFIG, topologyOverrides)) {
maxTaskIdleMs = getLong(MAX_TASK_IDLE_MS_CONFIG);
- log.info("Topology {} is overridding {} to {}", topologyName, MAX_TASK_IDLE_MS_CONFIG, maxTaskIdleMs);
} else {
maxTaskIdleMs = globalAppConfigs.getLong(MAX_TASK_IDLE_MS_CONFIG);
}
nit: typo in overriding.. I think that's how it was originally. Comment can be ignored :)
public TopologyConfig(final String topologyName, final StreamsConfig globalAppCo
if (isTopologyOverride(MAX_TASK_IDLE_MS_CONFIG, topologyOverrides)) {
maxTaskIdleMs = getLong(MAX_TASK_IDLE_MS_CONFIG);
+ log.info("Topology {} is overriding {} to {}", topologyName, MAX_TASK_IDLE_MS_CONFIG, maxTaskIdleMs);
} else {
maxTaskIdleMs = globalAppConfigs.getLong(MAX_TASK_IDLE_MS_CONFIG);
} |
codereview_new_java_data_8543 | void runOnce() {
log.info("Buffered records size {} bytes falls below {}. Resuming all the paused partitions {} in the consumer",
bufferSize, maxBufferSizeBytes.get(), pausedPartitions);
mainConsumer.resume(pausedPartitions);
}
}
Should `pausedPartitions` be cleared somewhere? After this line?
void runOnce() {
log.info("Buffered records size {} bytes falls below {}. Resuming all the paused partitions {} in the consumer",
bufferSize, maxBufferSizeBytes.get(), pausedPartitions);
mainConsumer.resume(pausedPartitions);
+ pausedPartitions.clear();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.