diff --git a/build.gradle b/build.gradle index 954e920d7..68dbbf513 100644 --- a/build.gradle +++ b/build.gradle @@ -2301,6 +2301,7 @@ project(':jmh-benchmarks') { // jmh requires jopt 4.x while `core` depends on 5.0, they are not binary compatible exclude group: 'net.sf.jopt-simple', module: 'jopt-simple' } + implementation project(':server-common') implementation project(':clients') implementation project(':metadata') implementation project(':streams') diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index a2502c8f0..99493bf77 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -301,6 +301,10 @@ files="metadata[\\/]src[\\/](generated|generated-test)[\\/].+.java$"/> + + latestSupportedFeatures, + Features finalizedFeatures, + long finalizedFeaturesEpoch, + NodeApiVersions controllerApiVersions, + ListenerType listenerType + ) { + ApiVersionCollection apiKeys; + if (controllerApiVersions != null) { + apiKeys = intersectForwardableApis( + listenerType, minRecordVersion, controllerApiVersions.allSupportedApiVersions()); + } else { + apiKeys = filterApis(minRecordVersion, listenerType); + } + + return createApiVersionsResponse( + throttleTimeMs, + apiKeys, + latestSupportedFeatures, + finalizedFeatures, + finalizedFeaturesEpoch + ); + } + public static ApiVersionsResponse createApiVersionsResponse( int throttleTimeMs, ApiVersionCollection apiVersions, diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java index 2c9b1e8fa..15f3cd240 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ApiVersionsResponseTest.java @@ -17,10 +17,18 @@ package org.apache.kafka.common.requests; +import java.util.HashSet; +import org.apache.kafka.common.feature.Features; +import org.apache.kafka.common.feature.FinalizedVersionRange; +import org.apache.kafka.common.feature.SupportedVersionRange; import org.apache.kafka.common.message.ApiMessageType; +import org.apache.kafka.common.message.ApiMessageType.ListenerType; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection; +import org.apache.kafka.common.message.ApiVersionsResponseData.FinalizedFeatureKey; +import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.RecordVersion; import org.apache.kafka.common.utils.Utils; import org.junit.jupiter.api.Test; @@ -102,6 +110,92 @@ public void shouldHaveCommonlyAgreedApiVersionResponseWithControllerOnForwardabl ApiKeys.JOIN_GROUP.latestVersion(), commonResponse); } + @Test + public void shouldCreateApiResponseOnlyWithKeysSupportedByMagicValue() { + ApiVersionsResponse response = ApiVersionsResponse.createApiVersionsResponse( + 10, + RecordVersion.V1, + Features.emptySupportedFeatures(), + Features.emptyFinalizedFeatures(), + ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, + null, + ListenerType.ZK_BROKER + ); + verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1); + assertEquals(10, response.throttleTimeMs()); + assertTrue(response.data().supportedFeatures().isEmpty()); + assertTrue(response.data().finalizedFeatures().isEmpty()); + assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, response.data().finalizedFeaturesEpoch()); + } + + @Test + public void shouldReturnFeatureKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle() { + ApiVersionsResponse response = ApiVersionsResponse.createApiVersionsResponse( + 10, + RecordVersion.V1, + Features.supportedFeatures( + Utils.mkMap(Utils.mkEntry("feature", new SupportedVersionRange((short) 1, (short) 4)))), + Features.finalizedFeatures( + Utils.mkMap(Utils.mkEntry("feature", new FinalizedVersionRange((short) 2, (short) 3)))), + 10L, + null, + ListenerType.ZK_BROKER + ); + + verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1); + assertEquals(10, response.throttleTimeMs()); + assertEquals(1, response.data().supportedFeatures().size()); + SupportedFeatureKey sKey = response.data().supportedFeatures().find("feature"); + assertNotNull(sKey); + assertEquals(1, sKey.minVersion()); + assertEquals(4, sKey.maxVersion()); + assertEquals(1, response.data().finalizedFeatures().size()); + FinalizedFeatureKey fKey = response.data().finalizedFeatures().find("feature"); + assertNotNull(fKey); + assertEquals(2, fKey.minVersionLevel()); + assertEquals(3, fKey.maxVersionLevel()); + assertEquals(10, response.data().finalizedFeaturesEpoch()); + } + + @Test + public void shouldReturnAllKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle() { + ApiVersionsResponse response = ApiVersionsResponse.createApiVersionsResponse( + AbstractResponse.DEFAULT_THROTTLE_TIME, + RecordVersion.current(), + Features.emptySupportedFeatures(), + Features.emptyFinalizedFeatures(), + ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, + null, + ListenerType.ZK_BROKER + ); + assertEquals(new HashSet(ApiKeys.zkBrokerApis()), apiKeysInResponse(response)); + assertEquals(AbstractResponse.DEFAULT_THROTTLE_TIME, response.throttleTimeMs()); + assertTrue(response.data().supportedFeatures().isEmpty()); + assertTrue(response.data().finalizedFeatures().isEmpty()); + assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, response.data().finalizedFeaturesEpoch()); + } + + @Test + public void testMetadataQuorumApisAreDisabled() { + ApiVersionsResponse response = ApiVersionsResponse.createApiVersionsResponse( + AbstractResponse.DEFAULT_THROTTLE_TIME, + RecordVersion.current(), + Features.emptySupportedFeatures(), + Features.emptyFinalizedFeatures(), + ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, + null, + ListenerType.ZK_BROKER + ); + + // Ensure that APIs needed for the KRaft mode are not exposed through ApiVersions until we are ready for them + HashSet exposedApis = apiKeysInResponse(response); + assertFalse(exposedApis.contains(ApiKeys.ENVELOPE)); + assertFalse(exposedApis.contains(ApiKeys.VOTE)); + assertFalse(exposedApis.contains(ApiKeys.BEGIN_QUORUM_EPOCH)); + assertFalse(exposedApis.contains(ApiKeys.END_QUORUM_EPOCH)); + assertFalse(exposedApis.contains(ApiKeys.DESCRIBE_QUORUM)); + } + @Test public void testIntersect() { assertFalse(ApiVersionsResponse.intersect(null, null).isPresent()); @@ -145,4 +239,18 @@ private void verifyVersions(short forwardableAPIKey, assertEquals(expectedVersionsForForwardableAPI, commonResponse.find(forwardableAPIKey)); } + private void verifyApiKeysForMagic(ApiVersionsResponse response, Byte maxMagic) { + for (ApiVersion version : response.data().apiKeys()) { + assertTrue(ApiKeys.forId(version.apiKey()).minRequiredInterBrokerMagic <= maxMagic); + } + } + + private HashSet apiKeysInResponse(ApiVersionsResponse apiVersions) { + HashSet apiKeys = new HashSet<>(); + for (ApiVersion version : apiVersions.data().apiKeys()) { + apiKeys.add(ApiKeys.forId(version.apiKey())); + } + return apiKeys; + } + } diff --git a/core/src/main/java/kafka/server/builders/LogManagerBuilder.java b/core/src/main/java/kafka/server/builders/LogManagerBuilder.java index 3ebe7fa8f..6b6bd919f 100644 --- a/core/src/main/java/kafka/server/builders/LogManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/LogManagerBuilder.java @@ -17,7 +17,6 @@ package kafka.server.builders; -import kafka.api.ApiVersion; import kafka.log.CleanerConfig; import kafka.log.LogConfig; import kafka.log.LogManager; @@ -26,6 +25,7 @@ import kafka.server.metadata.ConfigRepository; import kafka.utils.Scheduler; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.server.common.MetadataVersion; import scala.collection.JavaConverters; import java.io.File; @@ -46,7 +46,7 @@ public class LogManagerBuilder { private long retentionCheckMs = 1000L; private int maxTransactionTimeoutMs = 15 * 60 * 1000; private int maxPidExpirationMs = 60000; - private ApiVersion interBrokerProtocolVersion = ApiVersion.latestVersion(); + private MetadataVersion interBrokerProtocolVersion = MetadataVersion.latest(); private Scheduler scheduler = null; private BrokerTopicStats brokerTopicStats = null; private LogDirFailureChannel logDirFailureChannel = null; @@ -113,7 +113,7 @@ public LogManagerBuilder setMaxPidExpirationMs(int maxPidExpirationMs) { return this; } - public LogManagerBuilder setInterBrokerProtocolVersion(ApiVersion interBrokerProtocolVersion) { + public LogManagerBuilder setInterBrokerProtocolVersion(MetadataVersion interBrokerProtocolVersion) { this.interBrokerProtocolVersion = interBrokerProtocolVersion; return this; } diff --git a/core/src/main/scala/kafka/cluster/Partition.scala b/core/src/main/scala/kafka/cluster/Partition.scala index 0d1e5de0c..126aa71e7 100755 --- a/core/src/main/scala/kafka/cluster/Partition.scala +++ b/core/src/main/scala/kafka/cluster/Partition.scala @@ -20,7 +20,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock import java.util.Optional import java.util.concurrent.CompletableFuture -import kafka.api.{ApiVersion, LeaderAndIsr} +import kafka.api.LeaderAndIsr import kafka.common.UnexpectedAppendOffsetException import kafka.controller.{KafkaController, StateChangeLogger} import kafka.log._ @@ -42,6 +42,7 @@ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED import org.apache.kafka.common.utils.Time import org.apache.kafka.common.{IsolationLevel, TopicPartition, Uuid} import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.common.MetadataVersion import scala.collection.{Map, Seq} import scala.jdk.CollectionConverters._ @@ -232,7 +233,7 @@ case class CommittedPartitionState( */ class Partition(val topicPartition: TopicPartition, val replicaLagTimeMaxMs: Long, - interBrokerProtocolVersion: ApiVersion, + interBrokerProtocolVersion: MetadataVersion, localBrokerId: Int, time: Time, alterPartitionListener: AlterPartitionListener, diff --git a/core/src/main/scala/kafka/controller/ControllerChannelManager.scala b/core/src/main/scala/kafka/controller/ControllerChannelManager.scala index cc079582b..d900a7cce 100755 --- a/core/src/main/scala/kafka/controller/ControllerChannelManager.scala +++ b/core/src/main/scala/kafka/controller/ControllerChannelManager.scala @@ -38,6 +38,7 @@ import org.apache.kafka.common.security.JaasContext import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{KafkaException, Node, Reconfigurable, TopicPartition, Uuid} +import org.apache.kafka.server.common.MetadataVersion._ import scala.jdk.CollectionConverters._ import scala.collection.mutable.HashMap @@ -390,7 +391,7 @@ abstract class AbstractControllerBrokerRequestBatch(config: KafkaConfig, .setRemovingReplicas(replicaAssignment.removingReplicas.map(Integer.valueOf).asJava) .setIsNew(isNew || alreadyNew) - if (config.interBrokerProtocolVersion >= KAFKA_3_2_IV0) { + if (config.interBrokerProtocolVersion.isAtLeast(IBP_3_2_IV0)) { partitionState.setLeaderRecoveryState(leaderAndIsr.leaderRecoveryState.value) } @@ -460,12 +461,12 @@ abstract class AbstractControllerBrokerRequestBatch(config: KafkaConfig, private def sendLeaderAndIsrRequest(controllerEpoch: Int, stateChangeLog: StateChangeLogger): Unit = { val leaderAndIsrRequestVersion: Short = - if (config.interBrokerProtocolVersion >= KAFKA_3_2_IV0) 6 - else if (config.interBrokerProtocolVersion >= KAFKA_2_8_IV1) 5 - else if (config.interBrokerProtocolVersion >= KAFKA_2_4_IV1) 4 - else if (config.interBrokerProtocolVersion >= KAFKA_2_4_IV0) 3 - else if (config.interBrokerProtocolVersion >= KAFKA_2_2_IV0) 2 - else if (config.interBrokerProtocolVersion >= KAFKA_1_0_IV0) 1 + if (config.interBrokerProtocolVersion.isAtLeast(IBP_3_2_IV0)) 6 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_8_IV1)) 5 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_4_IV1)) 4 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_4_IV0)) 3 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_2_IV0)) 2 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_1_0_IV0)) 1 else 0 leaderAndIsrRequestMap.forKeyValue { (broker, leaderAndIsrPartitionStates) => @@ -511,13 +512,13 @@ abstract class AbstractControllerBrokerRequestBatch(config: KafkaConfig, val partitionStates = updateMetadataRequestPartitionInfoMap.values.toBuffer val updateMetadataRequestVersion: Short = - if (config.interBrokerProtocolVersion >= KAFKA_2_8_IV1) 7 - else if (config.interBrokerProtocolVersion >= KAFKA_2_4_IV1) 6 - else if (config.interBrokerProtocolVersion >= KAFKA_2_2_IV0) 5 - else if (config.interBrokerProtocolVersion >= KAFKA_1_0_IV0) 4 - else if (config.interBrokerProtocolVersion >= KAFKA_0_10_2_IV0) 3 - else if (config.interBrokerProtocolVersion >= KAFKA_0_10_0_IV1) 2 - else if (config.interBrokerProtocolVersion >= KAFKA_0_9_0) 1 + if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_8_IV1)) 7 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_4_IV1)) 6 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_2_IV0)) 5 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_1_0_IV0)) 4 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_0_10_2_IV0)) 3 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_0_10_0_IV1)) 2 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_0_9_0)) 1 else 0 val liveBrokers = controllerContext.liveOrShuttingDownBrokers.iterator.map { broker => @@ -567,9 +568,9 @@ abstract class AbstractControllerBrokerRequestBatch(config: KafkaConfig, private def sendStopReplicaRequests(controllerEpoch: Int, stateChangeLog: StateChangeLogger): Unit = { val traceEnabled = stateChangeLog.isTraceEnabled val stopReplicaRequestVersion: Short = - if (config.interBrokerProtocolVersion >= KAFKA_2_6_IV0) 3 - else if (config.interBrokerProtocolVersion >= KAFKA_2_4_IV1) 2 - else if (config.interBrokerProtocolVersion >= KAFKA_2_2_IV0) 1 + if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_6_IV0)) 3 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_4_IV1)) 2 + else if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_2_IV0)) 1 else 0 def responseCallback(brokerId: Int, isPartitionDeleted: TopicPartition => Boolean) diff --git a/core/src/main/scala/kafka/controller/KafkaController.scala b/core/src/main/scala/kafka/controller/KafkaController.scala index bf83ab9d6..baac815b8 100755 --- a/core/src/main/scala/kafka/controller/KafkaController.scala +++ b/core/src/main/scala/kafka/controller/KafkaController.scala @@ -331,36 +331,36 @@ class KafkaController(val config: KafkaConfig, * This method sets up the FeatureZNode with enabled status, which means that the finalized * features stored in the FeatureZNode are active. The enabled status should be written by the * controller to the FeatureZNode only when the broker IBP config is greater than or equal to - * KAFKA_2_7_IV0. + * IBP_2_7_IV0. * * There are multiple cases handled here: * * 1. New cluster bootstrap: * A new Kafka cluster (i.e. it is deployed first time) is almost always started with IBP config - * setting greater than or equal to KAFKA_2_7_IV0. We would like to start the cluster with all + * setting greater than or equal to IBP_2_7_IV0. We would like to start the cluster with all * the possible supported features finalized immediately. Assuming this is the case, the * controller will start up and notice that the FeatureZNode is absent in the new cluster, * it will then create a FeatureZNode (with enabled status) containing the entire list of * supported features as its finalized features. * - * 2. Broker binary upgraded, but IBP config set to lower than KAFKA_2_7_IV0: - * Imagine there was an existing Kafka cluster with IBP config less than KAFKA_2_7_IV0, and the + * 2. Broker binary upgraded, but IBP config set to lower than IBP_2_7_IV0: + * Imagine there was an existing Kafka cluster with IBP config less than IBP_2_7_IV0, and the * broker binary has now been upgraded to a newer version that supports the feature versioning - * system (KIP-584). But the IBP config is still set to lower than KAFKA_2_7_IV0, and may be + * system (KIP-584). But the IBP config is still set to lower than IBP_2_7_IV0, and may be * set to a higher value later. In this case, we want to start with no finalized features and * allow the user to finalize them whenever they are ready i.e. in the future whenever the - * user sets IBP config to be greater than or equal to KAFKA_2_7_IV0, then the user could start + * user sets IBP config to be greater than or equal to IBP_2_7_IV0, then the user could start * finalizing the features. This process ensures we do not enable all the possible features * immediately after an upgrade, which could be harmful to Kafka. * This is how we handle such a case: - * - Before the IBP config upgrade (i.e. IBP config set to less than KAFKA_2_7_IV0), the + * - Before the IBP config upgrade (i.e. IBP config set to less than IBP_2_7_IV0), the * controller will start up and check if the FeatureZNode is absent. * - If the node is absent, it will react by creating a FeatureZNode with disabled status * and empty finalized features. * - Otherwise, if a node already exists in enabled status then the controller will just * flip the status to disabled and clear the finalized features. * - After the IBP config upgrade (i.e. IBP config set to greater than or equal to - * KAFKA_2_7_IV0), when the controller starts up it will check if the FeatureZNode exists + * IBP_2_7_IV0), when the controller starts up it will check if the FeatureZNode exists * and whether it is disabled. * - If the node is in disabled status, the controller won’t upgrade all features immediately. * Instead it will just switch the FeatureZNode status to enabled status. This lets the @@ -368,17 +368,17 @@ class KafkaController(val config: KafkaConfig, * - Otherwise, if a node already exists in enabled status then the controller will leave * the node umodified. * - * 3. Broker binary upgraded, with existing cluster IBP config >= KAFKA_2_7_IV0: - * Imagine there was an existing Kafka cluster with IBP config >= KAFKA_2_7_IV0, and the broker - * binary has just been upgraded to a newer version (that supports IBP config KAFKA_2_7_IV0 and + * 3. Broker binary upgraded, with existing cluster IBP config >= IBP_2_7_IV0: + * Imagine there was an existing Kafka cluster with IBP config >= IBP_2_7_IV0, and the broker + * binary has just been upgraded to a newer version (that supports IBP config IBP_2_7_IV0 and * higher). The controller will start up and find that a FeatureZNode is already present with * enabled status and existing finalized features. In such a case, the controller leaves the node * unmodified. * * 4. Broker downgrade: * Imagine that a Kafka cluster exists already and the IBP config is greater than or equal to - * KAFKA_2_7_IV0. Then, the user decided to downgrade the cluster by setting IBP config to a - * value less than KAFKA_2_7_IV0. This means the user is also disabling the feature versioning + * IBP_2_7_IV0. Then, the user decided to downgrade the cluster by setting IBP config to a + * value less than IBP_2_7_IV0. This means the user is also disabling the feature versioning * system (KIP-584). In this case, when the controller starts up with the lower IBP config, it * will switch the FeatureZNode status to disabled with empty features. */ @@ -413,14 +413,14 @@ class KafkaController(val config: KafkaConfig, * Sets up the FeatureZNode with disabled status. This status means the feature versioning system * (KIP-584) is disabled, and, the finalized features stored in the FeatureZNode are not relevant. * This status should be written by the controller to the FeatureZNode only when the broker - * IBP config is less than KAFKA_2_7_IV0. + * IBP config is less than IBP_2_7_IV0. * * NOTE: * 1. When this method returns, existing finalized features (if any) will be cleared from the * FeatureZNode. * 2. This method, unlike enableFeatureVersioning() need not wait for the FinalizedFeatureCache * to be updated, because, such updates to the cache (via FinalizedFeatureChangeListener) - * are disabled when IBP config is < than KAFKA_2_7_IV0. + * are disabled when IBP config is < than IBP_2_7_IV0. */ private def disableFeatureVersioning(): Unit = { val newNode = FeatureZNode(FeatureZNodeStatus.Disabled, Features.emptyFinalizedFeatures()) diff --git a/core/src/main/scala/kafka/controller/PartitionStateMachine.scala b/core/src/main/scala/kafka/controller/PartitionStateMachine.scala index 3316e0579..71b163a2e 100755 --- a/core/src/main/scala/kafka/controller/PartitionStateMachine.scala +++ b/core/src/main/scala/kafka/controller/PartitionStateMachine.scala @@ -16,7 +16,6 @@ */ package kafka.controller -import kafka.api.KAFKA_3_2_IV0 import kafka.api.LeaderAndIsr import kafka.common.StateChangeFailedException import kafka.controller.Election._ @@ -28,8 +27,10 @@ import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult import kafka.zk.TopicPartitionStateZNode import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.ControllerMovedException +import org.apache.kafka.server.common.MetadataVersion.IBP_3_2_IV0 import org.apache.zookeeper.KeeperException import org.apache.zookeeper.KeeperException.Code + import scala.collection.{Map, Seq, mutable} abstract class PartitionStateMachine(controllerContext: ControllerContext) extends Logging { @@ -132,7 +133,7 @@ class ZkPartitionStateMachine(config: KafkaConfig, controllerBrokerRequestBatch: ControllerBrokerRequestBatch) extends PartitionStateMachine(controllerContext) { - private val isLeaderRecoverySupported = config.interBrokerProtocolVersion >= KAFKA_3_2_IV0 + private val isLeaderRecoverySupported = config.interBrokerProtocolVersion.isAtLeast(IBP_3_2_IV0) private val controllerId = config.brokerId this.logIdent = s"[PartitionStateMachine controllerId=$controllerId] " diff --git a/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala b/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala index 24f9ad5fe..e1bb7ccf2 100644 --- a/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala +++ b/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala @@ -26,7 +26,6 @@ import java.util.concurrent.locks.ReentrantLock import java.util.concurrent.{ConcurrentHashMap, TimeUnit} import com.yammer.metrics.core.Gauge -import kafka.api.{ApiVersion, KAFKA_0_10_1_IV0, KAFKA_2_1_IV0, KAFKA_2_1_IV1, KAFKA_2_3_IV0} import kafka.common.OffsetAndMetadata import kafka.internals.generated.{GroupMetadataValue, OffsetCommitKey, OffsetCommitValue, GroupMetadataKey => GroupMetadataKeyData} import kafka.log.AppendOrigin @@ -47,13 +46,15 @@ import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests.{OffsetCommitRequest, OffsetFetchResponse} import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{KafkaException, MessageFormatter, TopicPartition} +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_1_IV0, IBP_2_1_IV0, IBP_2_1_IV1, IBP_2_3_IV0} import scala.collection._ import scala.collection.mutable.ArrayBuffer import scala.jdk.CollectionConverters._ class GroupMetadataManager(brokerId: Int, - interBrokerProtocolVersion: ApiVersion, + interBrokerProtocolVersion: MetadataVersion, config: OffsetConfig, val replicaManager: ReplicaManager, time: Time, @@ -1074,14 +1075,14 @@ object GroupMetadataManager { * Generates the payload for offset commit message from given offset and metadata * * @param offsetAndMetadata consumer's current offset and metadata - * @param apiVersion the api version + * @param metadataVersion the api version * @return payload for offset commit message */ def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata, - apiVersion: ApiVersion): Array[Byte] = { + metadataVersion: MetadataVersion): Array[Byte] = { val version = - if (apiVersion < KAFKA_2_1_IV0 || offsetAndMetadata.expireTimestamp.nonEmpty) 1.toShort - else if (apiVersion < KAFKA_2_1_IV1) 2.toShort + if (metadataVersion.isLessThan(IBP_2_1_IV0) || offsetAndMetadata.expireTimestamp.nonEmpty) 1.toShort + else if (metadataVersion.isLessThan(IBP_2_1_IV1)) 2.toShort else 3.toShort MessageUtil.toVersionPrefixedBytes(version, new OffsetCommitValue() .setOffset(offsetAndMetadata.offset) @@ -1099,17 +1100,17 @@ object GroupMetadataManager { * * @param groupMetadata current group metadata * @param assignment the assignment for the rebalancing generation - * @param apiVersion the api version + * @param metadataVersion the api version * @return payload for offset commit message */ def groupMetadataValue(groupMetadata: GroupMetadata, assignment: Map[String, Array[Byte]], - apiVersion: ApiVersion): Array[Byte] = { + metadataVersion: MetadataVersion): Array[Byte] = { val version = - if (apiVersion < KAFKA_0_10_1_IV0) 0.toShort - else if (apiVersion < KAFKA_2_1_IV0) 1.toShort - else if (apiVersion < KAFKA_2_3_IV0) 2.toShort + if (metadataVersion.isLessThan(IBP_0_10_1_IV0)) 0.toShort + else if (metadataVersion.isLessThan(IBP_2_1_IV0)) 1.toShort + else if (metadataVersion.isLessThan(IBP_2_3_IV0)) 2.toShort else 3.toShort MessageUtil.toVersionPrefixedBytes(version, new GroupMetadataValue() diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala index 62c70d911..94baf0f97 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala @@ -19,7 +19,7 @@ package kafka.coordinator.transaction import java.util import java.util.concurrent.{BlockingQueue, ConcurrentHashMap, LinkedBlockingQueue} -import kafka.api.KAFKA_2_8_IV0 + import kafka.common.{InterBrokerSendThread, RequestAndCompletionHandler} import kafka.metrics.KafkaMetricsGroup import kafka.server.{KafkaConfig, MetadataCache, RequestLocal} @@ -34,6 +34,7 @@ import org.apache.kafka.common.requests.{TransactionResult, WriteTxnMarkersReque import org.apache.kafka.common.security.JaasContext import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{Node, Reconfigurable, TopicPartition} +import org.apache.kafka.server.common.MetadataVersion.IBP_2_8_IV0 import scala.collection.{concurrent, immutable} import scala.jdk.CollectionConverters._ @@ -147,7 +148,7 @@ class TransactionMarkerChannelManager( private val transactionsWithPendingMarkers = new ConcurrentHashMap[String, PendingCompleteTxn] val writeTxnMarkersRequestVersion: Short = - if (config.interBrokerProtocolVersion >= KAFKA_2_8_IV0) 1 + if (config.interBrokerProtocolVersion.isAtLeast(IBP_2_8_IV0)) 1 else 0 newGauge("UnknownDestinationQueueSize", () => markersQueueForUnknownBroker.totalNumMarkers) diff --git a/core/src/main/scala/kafka/log/LogConfig.scala b/core/src/main/scala/kafka/log/LogConfig.scala index 3f70f8832..7b008fe26 100755 --- a/core/src/main/scala/kafka/log/LogConfig.scala +++ b/core/src/main/scala/kafka/log/LogConfig.scala @@ -17,7 +17,6 @@ package kafka.log -import kafka.api.{ApiVersion, ApiVersionValidator, KAFKA_3_0_IV1} import kafka.log.LogConfig.configDef import kafka.message.BrokerCompressionCodec import kafka.server.{KafkaConfig, ThrottledReplicaListValidator} @@ -29,9 +28,12 @@ import org.apache.kafka.common.record.{LegacyRecord, RecordVersion, TimestampTyp import org.apache.kafka.common.utils.{ConfigUtils, Utils} import org.apache.kafka.metadata.ConfigSynonym import org.apache.kafka.metadata.ConfigSynonym.{HOURS_TO_MILLISECONDS, MINUTES_TO_MILLISECONDS} - import java.util.Arrays.asList import java.util.{Collections, Locale, Properties} + +import org.apache.kafka.server.common.{MetadataVersion, MetadataVersionValidator} +import org.apache.kafka.server.common.MetadataVersion._ + import scala.annotation.nowarn import scala.collection.{Map, mutable} import scala.jdk.CollectionConverters._ @@ -103,7 +105,7 @@ case class LogConfig(props: java.util.Map[_, _], overriddenConfigs: Set[String] /* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */ @deprecated("3.0") - val messageFormatVersion = ApiVersion(getString(LogConfig.MessageFormatVersionProp)) + val messageFormatVersion = MetadataVersion.fromVersionString(getString(LogConfig.MessageFormatVersionProp)) val messageTimestampType = TimestampType.forName(getString(LogConfig.MessageTimestampTypeProp)) val messageTimestampDifferenceMaxMs = getLong(LogConfig.MessageTimestampDifferenceMaxMsProp).longValue @@ -157,7 +159,7 @@ case class LogConfig(props: java.util.Map[_, _], overriddenConfigs: Set[String] def remoteLogConfig = _remoteLogConfig @nowarn("cat=deprecation") - def recordVersion = messageFormatVersion.recordVersion + def recordVersion = messageFormatVersion.highestSupportedRecordVersion def randomSegmentJitter: Long = if (segmentJitterMs == 0) 0 else Utils.abs(scala.util.Random.nextInt()) % math.min(segmentJitterMs, segmentMs) @@ -367,7 +369,7 @@ object LogConfig { MEDIUM, CompressionTypeDoc, KafkaConfig.CompressionTypeProp) .define(PreAllocateEnableProp, BOOLEAN, Defaults.PreAllocateEnable, MEDIUM, PreAllocateEnableDoc, KafkaConfig.LogPreAllocateProp) - .define(MessageFormatVersionProp, STRING, Defaults.MessageFormatVersion, ApiVersionValidator, MEDIUM, MessageFormatVersionDoc, + .define(MessageFormatVersionProp, STRING, Defaults.MessageFormatVersion, new MetadataVersionValidator(), MEDIUM, MessageFormatVersionDoc, KafkaConfig.LogMessageFormatVersionProp) .define(MessageTimestampTypeProp, STRING, Defaults.MessageTimestampType, in("CreateTime", "LogAppendTime"), MEDIUM, MessageTimestampTypeDoc, KafkaConfig.LogMessageTimestampTypeProp) @@ -560,17 +562,17 @@ object LogConfig { logProps } - def shouldIgnoreMessageFormatVersion(interBrokerProtocolVersion: ApiVersion): Boolean = - interBrokerProtocolVersion >= KAFKA_3_0_IV1 + def shouldIgnoreMessageFormatVersion(interBrokerProtocolVersion: MetadataVersion): Boolean = + interBrokerProtocolVersion.isAtLeast(IBP_3_0_IV1) class MessageFormatVersion(messageFormatVersionString: String, interBrokerProtocolVersionString: String) { - val messageFormatVersion = ApiVersion(messageFormatVersionString) - private val interBrokerProtocolVersion = ApiVersion(interBrokerProtocolVersionString) + val messageFormatVersion = MetadataVersion.fromVersionString(messageFormatVersionString) + private val interBrokerProtocolVersion = MetadataVersion.fromVersionString(interBrokerProtocolVersionString) def shouldIgnore: Boolean = shouldIgnoreMessageFormatVersion(interBrokerProtocolVersion) def shouldWarn: Boolean = - interBrokerProtocolVersion >= KAFKA_3_0_IV1 && messageFormatVersion.recordVersion.precedes(RecordVersion.V2) + interBrokerProtocolVersion.isAtLeast(IBP_3_0_IV1) && messageFormatVersion.highestSupportedRecordVersion.precedes(RecordVersion.V2) @nowarn("cat=deprecation") def topicWarningMessage(topicName: String): String = { diff --git a/core/src/main/scala/kafka/log/LogManager.scala b/core/src/main/scala/kafka/log/LogManager.scala index b81f6a928..4023d2734 100755 --- a/core/src/main/scala/kafka/log/LogManager.scala +++ b/core/src/main/scala/kafka/log/LogManager.scala @@ -17,13 +17,12 @@ package kafka.log -import kafka.api.ApiVersion import kafka.log.LogConfig.MessageFormatVersion - import java.io._ import java.nio.file.Files import java.util.concurrent._ import java.util.concurrent.atomic.AtomicInteger + import kafka.metrics.KafkaMetricsGroup import kafka.server.checkpoints.OffsetCheckpointFile import kafka.server.metadata.ConfigRepository @@ -38,8 +37,10 @@ import scala.collection._ import scala.collection.mutable.ArrayBuffer import scala.util.{Failure, Success, Try} import kafka.utils.Implicits._ - import java.util.Properties + +import org.apache.kafka.server.common.MetadataVersion + import scala.annotation.nowarn /** @@ -65,7 +66,7 @@ class LogManager(logDirs: Seq[File], val retentionCheckMs: Long, val maxTransactionTimeoutMs: Int, val maxPidExpirationMs: Int, - interBrokerProtocolVersion: ApiVersion, + interBrokerProtocolVersion: MetadataVersion, scheduler: Scheduler, brokerTopicStats: BrokerTopicStats, logDirFailureChannel: LogDirFailureChannel, diff --git a/core/src/main/scala/kafka/log/LogValidator.scala b/core/src/main/scala/kafka/log/LogValidator.scala index 0949c1110..74ea83297 100644 --- a/core/src/main/scala/kafka/log/LogValidator.scala +++ b/core/src/main/scala/kafka/log/LogValidator.scala @@ -17,7 +17,7 @@ package kafka.log import java.nio.ByteBuffer -import kafka.api.{ApiVersion, KAFKA_2_1_IV0} + import kafka.common.{LongRef, RecordValidationException} import kafka.message.{CompressionCodec, NoCompressionCodec, ZStdCompressionCodec} import kafka.server.{BrokerTopicStats, RequestLocal} @@ -29,6 +29,8 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.ProduceResponse.RecordError import org.apache.kafka.common.utils.Time +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.IBP_2_1_IV0 import scala.collection.{Seq, mutable} import scala.jdk.CollectionConverters._ @@ -94,7 +96,7 @@ private[log] object LogValidator extends Logging { timestampDiffMaxMs: Long, partitionLeaderEpoch: Int, origin: AppendOrigin, - interBrokerProtocolVersion: ApiVersion, + interBrokerProtocolVersion: MetadataVersion, brokerTopicStats: BrokerTopicStats, requestLocal: RequestLocal): ValidationAndOffsetAssignResult = { if (sourceCodec == NoCompressionCodec && targetCodec == NoCompressionCodec) { @@ -365,11 +367,11 @@ private[log] object LogValidator extends Logging { timestampDiffMaxMs: Long, partitionLeaderEpoch: Int, origin: AppendOrigin, - interBrokerProtocolVersion: ApiVersion, + interBrokerProtocolVersion: MetadataVersion, brokerTopicStats: BrokerTopicStats, requestLocal: RequestLocal): ValidationAndOffsetAssignResult = { - if (targetCodec == ZStdCompressionCodec && interBrokerProtocolVersion < KAFKA_2_1_IV0) + if (targetCodec == ZStdCompressionCodec && interBrokerProtocolVersion.isLessThan(IBP_2_1_IV0)) throw new UnsupportedCompressionTypeException("Produce requests to inter.broker.protocol.version < 2.1 broker " + "are not allowed to use ZStandard compression") diff --git a/core/src/main/scala/kafka/log/UnifiedLog.scala b/core/src/main/scala/kafka/log/UnifiedLog.scala index 7b81523cf..e84b3238e 100644 --- a/core/src/main/scala/kafka/log/UnifiedLog.scala +++ b/core/src/main/scala/kafka/log/UnifiedLog.scala @@ -18,12 +18,11 @@ package kafka.log import com.yammer.metrics.core.MetricName - import java.io.{File, IOException} import java.nio.file.Files import java.util.Optional import java.util.concurrent.TimeUnit -import kafka.api.{ApiVersion, KAFKA_0_10_0_IV0} + import kafka.common.{LongRef, OffsetsOutOfOrderException, UnexpectedAppendOffsetException} import kafka.log.AppendOrigin.RaftLeader import kafka.message.{BrokerCompressionCodec, CompressionCodec, NoCompressionCodec} @@ -41,6 +40,8 @@ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_ import org.apache.kafka.common.requests.ProduceResponse.RecordError import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{InvalidRecordException, KafkaException, TopicPartition, Uuid} +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.IBP_0_10_0_IV0 import scala.annotation.nowarn import scala.jdk.CollectionConverters._ @@ -717,7 +718,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, def appendAsLeader(records: MemoryRecords, leaderEpoch: Int, origin: AppendOrigin = AppendOrigin.Client, - interBrokerProtocolVersion: ApiVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion: MetadataVersion = MetadataVersion.latest, requestLocal: RequestLocal = RequestLocal.NoCaching): LogAppendInfo = { val validateAndAssignOffsets = origin != AppendOrigin.RaftLeader append(records, origin, interBrokerProtocolVersion, validateAndAssignOffsets, leaderEpoch, Some(requestLocal), ignoreRecordSize = false) @@ -733,7 +734,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, def appendAsFollower(records: MemoryRecords): LogAppendInfo = { append(records, origin = AppendOrigin.Replication, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, validateAndAssignOffsets = false, leaderEpoch = -1, None, @@ -761,7 +762,7 @@ class UnifiedLog(@volatile var logStartOffset: Long, */ private def append(records: MemoryRecords, origin: AppendOrigin, - interBrokerProtocolVersion: ApiVersion, + interBrokerProtocolVersion: MetadataVersion, validateAndAssignOffsets: Boolean, leaderEpoch: Int, requestLocal: Option[RequestLocal], @@ -1225,12 +1226,12 @@ class UnifiedLog(@volatile var logStartOffset: Long, maybeHandleIOException(s"Error while fetching offset by timestamp for $topicPartition in dir ${dir.getParent}") { debug(s"Searching offset for timestamp $targetTimestamp") - if (config.messageFormatVersion < KAFKA_0_10_0_IV0 && + if (config.messageFormatVersion.isLessThan(IBP_0_10_0_IV0) && targetTimestamp != ListOffsetsRequest.EARLIEST_TIMESTAMP && targetTimestamp != ListOffsetsRequest.LATEST_TIMESTAMP) throw new UnsupportedForMessageFormatException(s"Cannot search offsets based on timestamp because message format version " + s"for partition $topicPartition is ${config.messageFormatVersion} which is earlier than the minimum " + - s"required version $KAFKA_0_10_0_IV0") + s"required version $IBP_0_10_0_IV0") // For the earliest and latest, we do not need to return the timestamp. if (targetTimestamp == ListOffsetsRequest.EARLIEST_TIMESTAMP) { diff --git a/core/src/main/scala/kafka/security/authorizer/AclAuthorizer.scala b/core/src/main/scala/kafka/security/authorizer/AclAuthorizer.scala index e2ac9737f..0c2b6f619 100644 --- a/core/src/main/scala/kafka/security/authorizer/AclAuthorizer.scala +++ b/core/src/main/scala/kafka/security/authorizer/AclAuthorizer.scala @@ -20,7 +20,6 @@ import java.{lang, util} import java.util.concurrent.{CompletableFuture, CompletionStage} import com.typesafe.scalalogging.Logger -import kafka.api.KAFKA_2_0_IV1 import kafka.security.authorizer.AclEntry.ResourceSeparator import kafka.server.{KafkaConfig, KafkaServer} import kafka.utils._ @@ -37,6 +36,7 @@ import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.utils.{SecurityUtils, Time} import org.apache.kafka.server.authorizer.AclDeleteResult.AclBindingDeleteResult import org.apache.kafka.server.authorizer._ +import org.apache.kafka.server.common.MetadataVersion.IBP_2_0_IV1 import org.apache.zookeeper.client.ZKClientConfig import scala.annotation.nowarn @@ -182,7 +182,7 @@ class AclAuthorizer extends Authorizer with Logging { metricGroup = "kafka.security", metricType = "AclAuthorizer", createChrootIfNecessary = true) zkClient.createAclPaths() - extendedAclSupport = kafkaConfig.interBrokerProtocolVersion >= KAFKA_2_0_IV1 + extendedAclSupport = kafkaConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_0_IV1) // Start change listeners first and then populate the cache so that there is no timing window // between loading cache and processing change notifications. @@ -207,7 +207,7 @@ class AclAuthorizer extends Authorizer with Logging { try { if (!extendedAclSupport && aclBinding.pattern.patternType == PatternType.PREFIXED) { throw new UnsupportedVersionException(s"Adding ACLs on prefixed resource patterns requires " + - s"${KafkaConfig.InterBrokerProtocolVersionProp} of $KAFKA_2_0_IV1 or greater") + s"${KafkaConfig.InterBrokerProtocolVersionProp} of $IBP_2_0_IV1 or greater") } validateAclBinding(aclBinding) true diff --git a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala index 507d6bb95..8aa3f1e03 100755 --- a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala +++ b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala @@ -557,11 +557,11 @@ abstract class AbstractFetcherThread(name: String, * For each topic partition, the offset to truncate to is calculated based on leader's returned * epoch and offset: * -- If the leader replied with undefined epoch offset, we must use the high watermark. This can - * happen if 1) the leader is still using message format older than KAFKA_0_11_0; 2) the follower + * happen if 1) the leader is still using message format older than IBP_0_11_0; 2) the follower * requested leader epoch < the first leader epoch known to the leader. * -- If the leader replied with the valid offset but undefined leader epoch, we truncate to * leader's offset if it is lower than follower's Log End Offset. This may happen if the - * leader is on the inter-broker protocol version < KAFKA_2_0_IV0 + * leader is on the inter-broker protocol version < IBP_2_0_IV0 * -- If the leader replied with leader epoch not known to the follower, we truncate to the * end offset of the largest epoch that is smaller than the epoch the leader replied with, and * send OffsetsForLeaderEpochRequest with that leader epoch. In a more rare case, where the @@ -584,7 +584,7 @@ abstract class AbstractFetcherThread(name: String, s"The initial fetch offset ${partitionStates.stateValue(tp).fetchOffset} will be used for truncation.") OffsetTruncationState(partitionStates.stateValue(tp).fetchOffset, truncationCompleted = true) } else if (leaderEpochOffset.leaderEpoch == UNDEFINED_EPOCH) { - // either leader or follower or both use inter-broker protocol version < KAFKA_2_0_IV0 + // either leader or follower or both use inter-broker protocol version < IBP_2_0_IV0 // (version 0 of OffsetForLeaderEpoch request/response) warn(s"Leader or replica is on protocol version where leader epoch is not considered in the OffsetsForLeaderEpoch response. " + s"The leader's offset ${leaderEpochOffset.endOffset} will be used for truncation in $tp.") diff --git a/core/src/main/scala/kafka/server/AlterPartitionManager.scala b/core/src/main/scala/kafka/server/AlterPartitionManager.scala index 8f5e4438c..d26cd542a 100644 --- a/core/src/main/scala/kafka/server/AlterPartitionManager.scala +++ b/core/src/main/scala/kafka/server/AlterPartitionManager.scala @@ -20,8 +20,6 @@ import java.util import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, TimeUnit} -import kafka.api.ApiVersion -import kafka.api.KAFKA_3_2_IV0 import kafka.api.LeaderAndIsr import kafka.metrics.KafkaMetricsGroup import kafka.utils.{KafkaScheduler, Logging, Scheduler} @@ -35,6 +33,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AlterPartitionRequest, AlterPartitionResponse} import org.apache.kafka.common.utils.Time import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.common.MetadataVersion import scala.collection.mutable import scala.collection.mutable.ListBuffer @@ -122,7 +121,7 @@ class DefaultAlterPartitionManager( val time: Time, val brokerId: Int, val brokerEpochSupplier: () => Long, - ibpVersion: ApiVersion + ibpVersion: MetadataVersion ) extends AlterPartitionManager with Logging with KafkaMetricsGroup { // Used to allow only one pending ISR update per partition (visible for testing) @@ -234,7 +233,7 @@ class DefaultAlterPartitionManager( .setNewIsr(item.leaderAndIsr.isr.map(Integer.valueOf).asJava) .setPartitionEpoch(item.leaderAndIsr.partitionEpoch) - if (ibpVersion >= KAFKA_3_2_IV0) { + if (ibpVersion.isAtLeast(MetadataVersion.IBP_3_2_IV0)) { partitionData.setLeaderRecoveryState(item.leaderAndIsr.leaderRecoveryState.value) } diff --git a/core/src/main/scala/kafka/server/ApiVersionManager.scala b/core/src/main/scala/kafka/server/ApiVersionManager.scala index e3d62c6ac..61a59ec2c 100644 --- a/core/src/main/scala/kafka/server/ApiVersionManager.scala +++ b/core/src/main/scala/kafka/server/ApiVersionManager.scala @@ -16,12 +16,13 @@ */ package kafka.server -import kafka.api.ApiVersion import kafka.network import kafka.network.RequestChannel +import org.apache.kafka.common.feature.Features import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.requests.ApiVersionsResponse +import org.apache.kafka.server.common.MetadataVersion import scala.jdk.CollectionConverters._ @@ -69,7 +70,7 @@ class SimpleApiVersionManager( class DefaultApiVersionManager( val listenerType: ListenerType, - interBrokerProtocolVersion: ApiVersion, + interBrokerProtocolVersion: MetadataVersion, forwardingManager: Option[ForwardingManager], features: BrokerFeatures, featureCache: FinalizedFeatureCache @@ -81,19 +82,21 @@ class DefaultApiVersionManager( val controllerApiVersions = forwardingManager.flatMap(_.controllerApiVersions) finalizedFeaturesOpt match { - case Some(finalizedFeatures) => ApiVersion.apiVersionsResponse( + case Some(finalizedFeatures) => ApiVersionsResponse.createApiVersionsResponse( throttleTimeMs, - interBrokerProtocolVersion.recordVersion, + interBrokerProtocolVersion.highestSupportedRecordVersion, supportedFeatures, finalizedFeatures.features, finalizedFeatures.epoch, - controllerApiVersions, + controllerApiVersions.orNull, listenerType) - case None => ApiVersion.apiVersionsResponse( + case None => ApiVersionsResponse.createApiVersionsResponse( throttleTimeMs, - interBrokerProtocolVersion.recordVersion, + interBrokerProtocolVersion.highestSupportedRecordVersion, supportedFeatures, - controllerApiVersions, + Features.emptyFinalizedFeatures, + ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, + controllerApiVersions.orNull, listenerType) } } diff --git a/core/src/main/scala/kafka/server/ConfigHandler.scala b/core/src/main/scala/kafka/server/ConfigHandler.scala index ca6a42b82..13be87287 100644 --- a/core/src/main/scala/kafka/server/ConfigHandler.scala +++ b/core/src/main/scala/kafka/server/ConfigHandler.scala @@ -105,7 +105,7 @@ class TopicConfigHandler(private val logManager: LogManager, kafkaConfig: KafkaC if (messageFormatVersion.shouldWarn) warn(messageFormatVersion.topicWarningMessage(topic)) Some(LogConfig.MessageFormatVersionProp) - } else if (kafkaConfig.interBrokerProtocolVersion < messageFormatVersion.messageFormatVersion) { + } else if (kafkaConfig.interBrokerProtocolVersion.isLessThan(messageFormatVersion.messageFormatVersion)) { warn(s"Topic configuration ${LogConfig.MessageFormatVersionProp} is ignored for `$topic` because `$versionString` " + s"is higher than what is allowed by the inter-broker protocol version `${kafkaConfig.interBrokerProtocolVersionString}`") Some(LogConfig.MessageFormatVersionProp) diff --git a/core/src/main/scala/kafka/server/ControllerServer.scala b/core/src/main/scala/kafka/server/ControllerServer.scala index 498402370..ba38aa178 100644 --- a/core/src/main/scala/kafka/server/ControllerServer.scala +++ b/core/src/main/scala/kafka/server/ControllerServer.scala @@ -22,7 +22,6 @@ import java.util.OptionalLong import java.util.concurrent.locks.ReentrantLock import java.util.concurrent.{CompletableFuture, TimeUnit} -import kafka.api.KAFKA_3_2_IV0 import kafka.cluster.Broker.ServerInfo import kafka.metrics.{KafkaMetricsGroup, LinuxIoMetricsCollector} import kafka.network.{DataPlaneAcceptor, SocketServer} @@ -43,6 +42,7 @@ import org.apache.kafka.raft.RaftConfig import org.apache.kafka.raft.RaftConfig.AddressSpec import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.ApiMessageAndVersion +import org.apache.kafka.server.common.MetadataVersion.IBP_3_2_IV0 import org.apache.kafka.common.config.ConfigException import org.apache.kafka.metadata.authorizer.ClusterMetadataAuthorizer import org.apache.kafka.server.metrics.KafkaYammerMetrics @@ -177,7 +177,7 @@ class ControllerServer( setQuorumFeatures(quorumFeatures). setDefaultReplicationFactor(config.defaultReplicationFactor.toShort). setDefaultNumPartitions(config.numPartitions.intValue()). - setIsLeaderRecoverySupported(config.interBrokerProtocolVersion >= KAFKA_3_2_IV0). + setIsLeaderRecoverySupported(config.interBrokerProtocolVersion.isAtLeast(IBP_3_2_IV0)). setSessionTimeoutNs(TimeUnit.NANOSECONDS.convert(config.brokerSessionTimeoutMs.longValue(), TimeUnit.MILLISECONDS)). setSnapshotMaxNewRecordBytes(config.metadataSnapshotMaxNewRecordBytes). diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 5793a87dc..84c14e069 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -18,7 +18,7 @@ package kafka.server import kafka.admin.AdminUtils -import kafka.api.{ApiVersion, ElectLeadersRequestOps, KAFKA_0_11_0_IV0, KAFKA_2_3_IV0} +import kafka.api.ElectLeadersRequestOps import kafka.common.OffsetAndMetadata import kafka.controller.ReplicaAssignment import kafka.coordinator.group._ @@ -79,6 +79,9 @@ import java.util.concurrent.ConcurrentHashMap import java.util.concurrent.atomic.AtomicInteger import java.util.{Collections, Optional} +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_0_11_0_IV0, IBP_2_3_IV0} + import scala.annotation.nowarn import scala.collection.{Map, Seq, Set, immutable, mutable} import scala.jdk.CollectionConverters._ @@ -430,7 +433,7 @@ class KafkaApis(val requestChannel: RequestChannel, .setTopics(responseTopicList) .setThrottleTimeMs(requestThrottleMs) )) - } else if (offsetCommitRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion < KAFKA_2_3_IV0) { + } else if (offsetCommitRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion.isLessThan(IBP_2_3_IV0)) { // Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic // until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard // the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states. @@ -1664,7 +1667,7 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseMaybeThrottle(request, createResponse) } - if (joinGroupRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion < KAFKA_2_3_IV0) { + if (joinGroupRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion.isLessThan(IBP_2_3_IV0)) { // Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic // until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard // the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states. @@ -1718,7 +1721,7 @@ class KafkaApis(val requestChannel: RequestChannel, )) } - if (syncGroupRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion < KAFKA_2_3_IV0) { + if (syncGroupRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion.isLessThan(IBP_2_3_IV0)) { // Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic // until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard // the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states. @@ -1791,7 +1794,7 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseMaybeThrottle(request, createResponse) } - if (heartbeatRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion < KAFKA_2_3_IV0) { + if (heartbeatRequest.data.groupInstanceId != null && config.interBrokerProtocolVersion.isLessThan(IBP_2_3_IV0)) { // Only enable static membership when IBP >= 2.3, because it is not safe for the broker to use the static member logic // until we are sure that all brokers support it. If static group being loaded by an older coordinator, it will discard // the group.instance.id field, so static members could accidentally become "dynamic", which leads to wrong states. @@ -2229,7 +2232,7 @@ class KafkaApis(val requestChannel: RequestChannel, } def handleEndTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - ensureInterBrokerVersion(KAFKA_0_11_0_IV0) + ensureInterBrokerVersion(IBP_0_11_0_IV0) val endTxnRequest = request.body[EndTxnRequest] val transactionalId = endTxnRequest.data.transactionalId @@ -2270,7 +2273,7 @@ class KafkaApis(val requestChannel: RequestChannel, } def handleWriteTxnMarkersRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - ensureInterBrokerVersion(KAFKA_0_11_0_IV0) + ensureInterBrokerVersion(IBP_0_11_0_IV0) authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) val writeTxnMarkersRequest = request.body[WriteTxnMarkersRequest] val errors = new ConcurrentHashMap[java.lang.Long, util.Map[TopicPartition, Errors]]() @@ -2375,13 +2378,13 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors)) } - def ensureInterBrokerVersion(version: ApiVersion): Unit = { - if (config.interBrokerProtocolVersion < version) + def ensureInterBrokerVersion(version: MetadataVersion): Unit = { + if (config.interBrokerProtocolVersion.isLessThan(version)) throw new UnsupportedVersionException(s"inter.broker.protocol.version: ${config.interBrokerProtocolVersion.version} is less than the required version: ${version.version}") } def handleAddPartitionToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - ensureInterBrokerVersion(KAFKA_0_11_0_IV0) + ensureInterBrokerVersion(IBP_0_11_0_IV0) val addPartitionsToTxnRequest = request.body[AddPartitionsToTxnRequest] val transactionalId = addPartitionsToTxnRequest.data.transactionalId val partitionsToAdd = addPartitionsToTxnRequest.partitions.asScala @@ -2444,7 +2447,7 @@ class KafkaApis(val requestChannel: RequestChannel, } def handleAddOffsetsToTxnRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - ensureInterBrokerVersion(KAFKA_0_11_0_IV0) + ensureInterBrokerVersion(IBP_0_11_0_IV0) val addOffsetsToTxnRequest = request.body[AddOffsetsToTxnRequest] val transactionalId = addOffsetsToTxnRequest.data.transactionalId val groupId = addOffsetsToTxnRequest.data.groupId @@ -2494,7 +2497,7 @@ class KafkaApis(val requestChannel: RequestChannel, } def handleTxnOffsetCommitRequest(request: RequestChannel.Request, requestLocal: RequestLocal): Unit = { - ensureInterBrokerVersion(KAFKA_0_11_0_IV0) + ensureInterBrokerVersion(IBP_0_11_0_IV0) val header = request.header val txnOffsetCommitRequest = request.body[TxnOffsetCommitRequest] diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 1dd162613..7186dcdc7 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -19,7 +19,7 @@ package kafka.server import java.util import java.util.{Collections, Locale, Properties} -import kafka.api.{ApiVersion, ApiVersionValidator, KAFKA_0_10_0_IV1, KAFKA_2_1_IV0, KAFKA_2_7_IV0, KAFKA_2_8_IV0, KAFKA_3_0_IV1} + import kafka.cluster.EndPoint import kafka.coordinator.group.OffsetConfig import kafka.coordinator.transaction.{TransactionLog, TransactionStateManager} @@ -47,6 +47,8 @@ import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuild import org.apache.kafka.common.utils.Utils import org.apache.kafka.raft.RaftConfig import org.apache.kafka.server.authorizer.Authorizer +import org.apache.kafka.server.common.{MetadataVersion, MetadataVersionValidator} +import org.apache.kafka.server.common.MetadataVersion._ import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.zookeeper.client.ZKClientConfig @@ -141,7 +143,7 @@ object Defaults { /* See `TopicConfig.MESSAGE_FORMAT_VERSION_CONFIG` for details */ @deprecated("3.0") - val LogMessageFormatVersion = KAFKA_3_0_IV1.version + val LogMessageFormatVersion = IBP_3_0_IV1.version val LogMessageTimestampType = "CreateTime" val LogMessageTimestampDifferenceMaxMs = Long.MaxValue @@ -172,7 +174,7 @@ object Defaults { val LeaderImbalanceCheckIntervalSeconds = 300 val UncleanLeaderElectionEnable = false val InterBrokerSecurityProtocol = SecurityProtocol.PLAINTEXT.toString - val InterBrokerProtocolVersion = ApiVersion.latestVersion.toString + val InterBrokerProtocolVersion = MetadataVersion.latest.version /** ********* Controlled shutdown configuration ***********/ val ControlledShutdownMaxRetries = 3 @@ -841,8 +843,8 @@ object KafkaConfig { val LogFlushOffsetCheckpointIntervalMsDoc = "The frequency with which we update the persistent record of the last flush which acts as the log recovery point" val LogFlushStartOffsetCheckpointIntervalMsDoc = "The frequency with which we update the persistent record of log start offset" val LogPreAllocateEnableDoc = "Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true." - val LogMessageFormatVersionDoc = "Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. " + - "Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the " + + val LogMessageFormatVersionDoc = "Specify the message format version the broker will use to append messages to the logs. The value should be a valid MetadataVersion. " + + "Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check MetadataVersion for more details. By setting a particular message format version, the " + "user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly " + "will cause consumers with older versions to break as they will receive messages with a format that they don't understand." @@ -907,7 +909,7 @@ object KafkaConfig { "properties at the same time." val InterBrokerProtocolVersionDoc = "Specify which version of the inter-broker protocol will be used.\n" + " This is typically bumped after all brokers were upgraded to a new version.\n" + - " Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1 Check ApiVersion for the full list." + " Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1 Check MetadataVersion for the full list." val InterBrokerListenerNameDoc = s"Name of listener used for communication between brokers. If this is unset, the listener name is defined by $InterBrokerSecurityProtocolProp. " + s"It is an error to set this and $InterBrokerSecurityProtocolProp properties at the same time." val ReplicaSelectorClassDoc = "The fully qualified class name that implements ReplicaSelector. This is used by the broker to find the preferred read replica. By default, we use an implementation that returns the leader." @@ -1202,7 +1204,7 @@ object KafkaConfig { .define(NumRecoveryThreadsPerDataDirProp, INT, Defaults.NumRecoveryThreadsPerDataDir, atLeast(1), HIGH, NumRecoveryThreadsPerDataDirDoc) .define(AutoCreateTopicsEnableProp, BOOLEAN, Defaults.AutoCreateTopicsEnable, HIGH, AutoCreateTopicsEnableDoc) .define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), HIGH, MinInSyncReplicasDoc) - .define(LogMessageFormatVersionProp, STRING, Defaults.LogMessageFormatVersion, ApiVersionValidator, MEDIUM, LogMessageFormatVersionDoc) + .define(LogMessageFormatVersionProp, STRING, Defaults.LogMessageFormatVersion, new MetadataVersionValidator(), MEDIUM, LogMessageFormatVersionDoc) .define(LogMessageTimestampTypeProp, STRING, Defaults.LogMessageTimestampType, in("CreateTime", "LogAppendTime"), MEDIUM, LogMessageTimestampTypeDoc) .define(LogMessageTimestampDifferenceMaxMsProp, LONG, Defaults.LogMessageTimestampDifferenceMaxMs, atLeast(0), MEDIUM, LogMessageTimestampDifferenceMaxMsDoc) .define(CreateTopicPolicyClassNameProp, CLASS, null, LOW, CreateTopicPolicyClassNameDoc) @@ -1230,7 +1232,7 @@ object KafkaConfig { .define(LeaderImbalanceCheckIntervalSecondsProp, LONG, Defaults.LeaderImbalanceCheckIntervalSeconds, atLeast(1), HIGH, LeaderImbalanceCheckIntervalSecondsDoc) .define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable, HIGH, UncleanLeaderElectionEnableDoc) .define(InterBrokerSecurityProtocolProp, STRING, Defaults.InterBrokerSecurityProtocol, MEDIUM, InterBrokerSecurityProtocolDoc) - .define(InterBrokerProtocolVersionProp, STRING, Defaults.InterBrokerProtocolVersion, ApiVersionValidator, MEDIUM, InterBrokerProtocolVersionDoc) + .define(InterBrokerProtocolVersionProp, STRING, Defaults.InterBrokerProtocolVersion, new MetadataVersionValidator(), MEDIUM, InterBrokerProtocolVersionDoc) .define(InterBrokerListenerNameProp, STRING, null, MEDIUM, InterBrokerListenerNameDoc) .define(ReplicaSelectorClassProp, STRING, null, MEDIUM, ReplicaSelectorClassDoc) @@ -1712,7 +1714,7 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami def minInSyncReplicas = getInt(KafkaConfig.MinInSyncReplicasProp) def logPreAllocateEnable: java.lang.Boolean = getBoolean(KafkaConfig.LogPreAllocateProp) - // We keep the user-provided String as `ApiVersion.apply` can choose a slightly different version (eg if `0.10.0` + // We keep the user-provided String as `MetadataVersion.fromVersionString` can choose a slightly different version (eg if `0.10.0` // is passed, `0.10.0-IV0` may be picked) @nowarn("cat=deprecation") private val logMessageFormatVersionString = getString(KafkaConfig.LogMessageFormatVersionProp) @@ -1721,8 +1723,8 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami @deprecated("3.0") lazy val logMessageFormatVersion = if (LogConfig.shouldIgnoreMessageFormatVersion(interBrokerProtocolVersion)) - ApiVersion(Defaults.LogMessageFormatVersion) - else ApiVersion(logMessageFormatVersionString) + MetadataVersion.fromVersionString(Defaults.LogMessageFormatVersion) + else MetadataVersion.fromVersionString(logMessageFormatVersionString) def logMessageTimestampType = TimestampType.forName(getString(KafkaConfig.LogMessageTimestampTypeProp)) def logMessageTimestampDifferenceMaxMs: Long = getLong(KafkaConfig.LogMessageTimestampDifferenceMaxMsProp) @@ -1749,10 +1751,10 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami val leaderImbalanceCheckIntervalSeconds: Long = getLong(KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp) def uncleanLeaderElectionEnable: java.lang.Boolean = getBoolean(KafkaConfig.UncleanLeaderElectionEnableProp) - // We keep the user-provided String as `ApiVersion.apply` can choose a slightly different version (eg if `0.10.0` + // We keep the user-provided String as `MetadataVersion.fromVersionString` can choose a slightly different version (eg if `0.10.0` // is passed, `0.10.0-IV0` may be picked) val interBrokerProtocolVersionString = getString(KafkaConfig.InterBrokerProtocolVersionProp) - val interBrokerProtocolVersion = ApiVersion(interBrokerProtocolVersionString) + val interBrokerProtocolVersion = MetadataVersion.fromVersionString(interBrokerProtocolVersionString) /** ********* Controlled shutdown configuration ***********/ val controlledShutdownMaxRetries = getInt(KafkaConfig.ControlledShutdownMaxRetriesProp) @@ -1760,7 +1762,7 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami val controlledShutdownEnable = getBoolean(KafkaConfig.ControlledShutdownEnableProp) /** ********* Feature configuration ***********/ - def isFeatureVersioningSupported = interBrokerProtocolVersion >= KAFKA_2_7_IV0 + def isFeatureVersioningSupported = interBrokerProtocolVersion.isFeatureVersioningSupported() /** ********* Group coordinator configuration ***********/ val groupMinSessionTimeoutMs = getInt(KafkaConfig.GroupMinSessionTimeoutMsProp) @@ -1812,7 +1814,7 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami def controlPlaneListenerName = getControlPlaneListenerNameAndSecurityProtocol.map { case (listenerName, _) => listenerName } def controlPlaneSecurityProtocol = getControlPlaneListenerNameAndSecurityProtocol.map { case (_, securityProtocol) => securityProtocol } def saslMechanismInterBrokerProtocol = getString(KafkaConfig.SaslMechanismInterBrokerProtocolProp) - val saslInterBrokerHandshakeRequestEnable = interBrokerProtocolVersion >= KAFKA_0_10_0_IV1 + val saslInterBrokerHandshakeRequestEnable = interBrokerProtocolVersion.isSaslInterBrokerHandshakeRequestEnabled() /** ********* DelegationToken Configuration **************/ val delegationTokenSecretKey = Option(getPassword(KafkaConfig.DelegationTokenSecretKeyProp)) @@ -1993,7 +1995,7 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami // Topic IDs are used with all self-managed quorum clusters and ZK cluster with IBP greater than or equal to 2.8 def usesTopicId: Boolean = - usesSelfManagedQuorum || interBrokerProtocolVersion >= KAFKA_2_8_IV0 + usesSelfManagedQuorum || interBrokerProtocolVersion.isTopicIdsSupported() validateValues() @@ -2157,15 +2159,15 @@ class KafkaConfig private(doLog: Boolean, val props: java.util.Map[_, _], dynami if (messageFormatVersion.shouldWarn) warn(messageFormatVersion.brokerWarningMessage) - val recordVersion = logMessageFormatVersion.recordVersion - require(interBrokerProtocolVersion.recordVersion.value >= recordVersion.value, + val recordVersion = logMessageFormatVersion.highestSupportedRecordVersion + require(interBrokerProtocolVersion.highestSupportedRecordVersion().value >= recordVersion.value, s"log.message.format.version $logMessageFormatVersionString can only be used when inter.broker.protocol.version " + - s"is set to version ${ApiVersion.minSupportedFor(recordVersion).shortVersion} or higher") + s"is set to version ${MetadataVersion.minSupportedFor(recordVersion).shortVersion} or higher") if (offsetsTopicCompressionCodec == ZStdCompressionCodec) - require(interBrokerProtocolVersion.recordVersion.value >= KAFKA_2_1_IV0.recordVersion.value, + require(interBrokerProtocolVersion.highestSupportedRecordVersion().value >= IBP_2_1_IV0.highestSupportedRecordVersion().value, "offsets.topic.compression.codec zstd can only be used when inter.broker.protocol.version " + - s"is set to version ${KAFKA_2_1_IV0.shortVersion} or higher") + s"is set to version ${IBP_2_1_IV0.shortVersion} or higher") val interBrokerUsesSasl = interBrokerSecurityProtocol == SecurityProtocol.SASL_PLAINTEXT || interBrokerSecurityProtocol == SecurityProtocol.SASL_SSL require(!interBrokerUsesSasl || saslInterBrokerHandshakeRequestEnable || saslMechanismInterBrokerProtocol == SaslConfigs.GSSAPI_MECHANISM, diff --git a/core/src/main/scala/kafka/server/KafkaServer.scala b/core/src/main/scala/kafka/server/KafkaServer.scala index 78ec415c3..5cd16ce16 100755 --- a/core/src/main/scala/kafka/server/KafkaServer.scala +++ b/core/src/main/scala/kafka/server/KafkaServer.scala @@ -22,7 +22,6 @@ import java.net.{InetAddress, SocketTimeoutException} import java.util.concurrent._ import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} -import kafka.api.{KAFKA_0_9_0, KAFKA_2_2_IV0, KAFKA_2_4_IV1} import kafka.cluster.{Broker, EndPoint} import kafka.common.{GenerateBrokerIdException, InconsistentBrokerIdException, InconsistentClusterIdException} import kafka.controller.KafkaController @@ -50,6 +49,7 @@ import org.apache.kafka.common.utils.{AppInfoParser, LogContext, Time, Utils} import org.apache.kafka.common.{Endpoint, Node} import org.apache.kafka.metadata.BrokerState import org.apache.kafka.server.authorizer.Authorizer +import org.apache.kafka.server.common.MetadataVersion._ import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.zookeeper.client.ZKClientConfig @@ -627,9 +627,9 @@ class KafkaServer( // send the controlled shutdown request val controlledShutdownApiVersion: Short = - if (config.interBrokerProtocolVersion < KAFKA_0_9_0) 0 - else if (config.interBrokerProtocolVersion < KAFKA_2_2_IV0) 1 - else if (config.interBrokerProtocolVersion < KAFKA_2_4_IV1) 2 + if (config.interBrokerProtocolVersion.isLessThan(IBP_0_9_0)) 0 + else if (config.interBrokerProtocolVersion.isLessThan(IBP_2_2_IV0)) 1 + else if (config.interBrokerProtocolVersion.isLessThan(IBP_2_4_IV1)) 2 else 3 val controlledShutdownRequest = new ControlledShutdownRequest.Builder( diff --git a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala index 57d89dc3d..b598c397c 100644 --- a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala +++ b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala @@ -20,7 +20,6 @@ package kafka.server import java.util.Collections import java.util.Optional -import kafka.api._ import kafka.cluster.BrokerEndPoint import kafka.log.{LeaderOffsetIncremented, LogAppendInfo} import kafka.server.AbstractFetcherThread.ReplicaFetch @@ -38,6 +37,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.MemoryRecords import org.apache.kafka.common.requests._ import org.apache.kafka.common.utils.{LogContext, Time} +import org.apache.kafka.server.common.MetadataVersion._ import scala.jdk.CollectionConverters._ import scala.collection.{Map, mutable} @@ -72,44 +72,44 @@ class ReplicaFetcherThread(name: String, // Visible for testing private[server] val fetchRequestVersion: Short = - if (brokerConfig.interBrokerProtocolVersion >= KAFKA_3_1_IV0) 13 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_7_IV1) 12 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_3_IV1) 11 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_1_IV2) 10 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_0_IV1) 8 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_1_1_IV0) 7 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_11_0_IV1) 5 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_11_0_IV0) 4 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_10_1_IV1) 3 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_10_0_IV0) 2 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_9_0) 1 + if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_3_1_IV0)) 13 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_7_IV1)) 12 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_3_IV1)) 11 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_1_IV2)) 10 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_0_IV1)) 8 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_1_1_IV0)) 7 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_0_11_0_IV1)) 5 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_0_11_0_IV0)) 4 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_0_10_1_IV1)) 3 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_0_10_0_IV0)) 2 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_0_9_0)) 1 else 0 // Visible for testing private[server] val offsetForLeaderEpochRequestVersion: Short = - if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_8_IV0) 4 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_3_IV1) 3 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_1_IV1) 2 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_0_IV0) 1 + if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_8_IV0)) 4 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_3_IV1)) 3 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_1_IV1)) 2 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_0_IV0)) 1 else 0 // Visible for testing private[server] val listOffsetRequestVersion: Short = - if (brokerConfig.interBrokerProtocolVersion >= KAFKA_3_0_IV1) 7 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_8_IV0) 6 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_2_IV1) 5 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_1_IV1) 4 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_2_0_IV1) 3 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_11_0_IV0) 2 - else if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_10_1_IV2) 1 + if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_3_0_IV1)) 7 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_8_IV0)) 6 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_2_IV1)) 5 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_1_IV1)) 4 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_2_0_IV1)) 3 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_0_11_0_IV0)) 2 + else if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_0_10_1_IV2)) 1 else 0 private val maxWait = brokerConfig.replicaFetchWaitMaxMs private val minBytes = brokerConfig.replicaFetchMinBytes private val maxBytes = brokerConfig.replicaFetchResponseMaxBytes private val fetchSize = brokerConfig.replicaFetchMaxBytes - override protected val isOffsetForLeaderEpochSupported: Boolean = brokerConfig.interBrokerProtocolVersion >= KAFKA_0_11_0_IV2 - override protected val isTruncationOnFetchSupported = ApiVersion.isTruncationOnFetchSupported(brokerConfig.interBrokerProtocolVersion) + override protected val isOffsetForLeaderEpochSupported: Boolean = brokerConfig.interBrokerProtocolVersion.isOffsetForLeaderEpochSupported + override protected val isTruncationOnFetchSupported = brokerConfig.interBrokerProtocolVersion.isTruncationOnFetchSupported val fetchSessionHandler = new FetchSessionHandler(logContext, sourceBroker.id) override protected def latestEpoch(topicPartition: TopicPartition): Option[Int] = { @@ -261,7 +261,7 @@ class ReplicaFetcherThread(name: String, Errors.forCode(responsePartition.errorCode) match { case Errors.NONE => - if (brokerConfig.interBrokerProtocolVersion >= KAFKA_0_10_1_IV2) + if (brokerConfig.interBrokerProtocolVersion.isAtLeast(IBP_0_10_1_IV2)) responsePartition.offset else responsePartition.oldStyleOffsets.get(0) diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index dac8313be..fff7fb273 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -21,6 +21,7 @@ import java.util.Optional import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.locks.Lock + import com.yammer.metrics.core.Meter import kafka.api._ import kafka.cluster.{BrokerEndPoint, Partition} @@ -60,6 +61,7 @@ import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests._ import org.apache.kafka.common.utils.Time import org.apache.kafka.image.{LocalReplicaChanges, MetadataImage, TopicsDelta} +import org.apache.kafka.server.common.MetadataVersion._ import scala.jdk.CollectionConverters._ import scala.collection.{Map, Seq, Set, mutable} @@ -307,7 +309,7 @@ class ReplicaManager(val config: KafkaConfig, // If inter-broker protocol (IBP) < 1.0, the controller will send LeaderAndIsrRequest V0 which does not include isNew field. // In this case, the broker receiving the request cannot determine whether it is safe to create a partition if a log directory has failed. // Thus, we choose to halt the broker on any log directory failure if IBP < 1.0 - val haltBrokerOnFailure = config.interBrokerProtocolVersion < KAFKA_1_0_IV0 + val haltBrokerOnFailure = config.interBrokerProtocolVersion.isLessThan(IBP_1_0_IV0) logDirFailureHandler = new LogDirFailureHandler("LogDirFailureHandler", haltBrokerOnFailure) logDirFailureHandler.start() } @@ -1806,7 +1808,7 @@ class ReplicaManager(val config: KafkaConfig, * OffsetForLeaderEpoch request. */ protected def initialFetchOffset(log: UnifiedLog): Long = { - if (ApiVersion.isTruncationOnFetchSupported(config.interBrokerProtocolVersion) && log.latestEpoch.nonEmpty) + if (config.interBrokerProtocolVersion.isTruncationOnFetchSupported() && log.latestEpoch.nonEmpty) log.logEndOffset else log.highWatermark diff --git a/core/src/main/scala/kafka/zk/ZkData.scala b/core/src/main/scala/kafka/zk/ZkData.scala index c8e1861bb..9733e9c83 100644 --- a/core/src/main/scala/kafka/zk/ZkData.scala +++ b/core/src/main/scala/kafka/zk/ZkData.scala @@ -19,9 +19,10 @@ package kafka.zk import java.nio.charset.StandardCharsets.UTF_8 import java.util import java.util.Properties + import com.fasterxml.jackson.annotation.JsonProperty import com.fasterxml.jackson.core.JsonProcessingException -import kafka.api.{ApiVersion, KAFKA_0_10_0_IV1, KAFKA_2_7_IV0, LeaderAndIsr} +import kafka.api.LeaderAndIsr import kafka.cluster.{Broker, EndPoint} import kafka.common.{NotificationHandler, ZkNodeChangeNotificationListener} import kafka.controller.{IsrChangeNotificationHandler, LeaderIsrAndControllerEpoch, ReplicaAssignment} @@ -40,7 +41,8 @@ import org.apache.kafka.common.security.token.delegation.{DelegationToken, Token import org.apache.kafka.common.utils.{SecurityUtils, Time} import org.apache.kafka.common.{KafkaException, TopicPartition, Uuid} import org.apache.kafka.metadata.LeaderRecoveryState -import org.apache.kafka.server.common.ProducerIdsBlock +import org.apache.kafka.server.common.{MetadataVersion, ProducerIdsBlock} +import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_0_IV1, IBP_2_7_IV0} import org.apache.zookeeper.ZooDefs import org.apache.zookeeper.data.{ACL, Stat} @@ -84,9 +86,9 @@ object BrokerIdsZNode { object BrokerInfo { /** - * - Create a broker info with v5 json format if the apiVersion is 2.7.x or above. + * - Create a broker info with v5 json format if the metadataVersion is 2.7.x or above. * - Create a broker info with v4 json format (which includes multiple endpoints and rack) if - * the apiVersion is 0.10.0.X or above but lesser than 2.7.x. + * the metadataVersion is 0.10.0.X or above but lesser than 2.7.x. * - Register the broker with v2 json format otherwise. * * Due to KAFKA-3100, 0.9.0.0 broker and old clients will break if JSON version is above 2. @@ -95,11 +97,11 @@ object BrokerInfo { * without having to upgrade to 0.9.0.1 first (clients have to be upgraded to 0.9.0.1 in * any case). */ - def apply(broker: Broker, apiVersion: ApiVersion, jmxPort: Int): BrokerInfo = { + def apply(broker: Broker, metadataVersion: MetadataVersion, jmxPort: Int): BrokerInfo = { val version = { - if (apiVersion >= KAFKA_2_7_IV0) + if (metadataVersion.isAtLeast(IBP_2_7_IV0)) 5 - else if (apiVersion >= KAFKA_0_10_0_IV1) + else if (metadataVersion.isAtLeast(IBP_0_10_0_IV1)) 4 else 2 @@ -846,12 +848,12 @@ object DelegationTokenInfoZNode { * Enabled -> This status means the feature versioning system (KIP-584) is enabled, and, the * finalized features stored in the FeatureZNode are active. This status is written by * the controller to the FeatureZNode only when the broker IBP config is greater than - * or equal to KAFKA_2_7_IV0. + * or equal to IBP_2_7_IV0. * * Disabled -> This status means the feature versioning system (KIP-584) is disabled, and, the * the finalized features stored in the FeatureZNode is not relevant. This status is * written by the controller to the FeatureZNode only when the broker IBP config - * is less than KAFKA_2_7_IV0. + * is less than IBP_2_7_IV0. */ sealed trait FeatureZNodeStatus { def id: Int diff --git a/core/src/test/scala/integration/kafka/admin/ConfigCommandIntegrationTest.scala b/core/src/test/scala/integration/kafka/admin/ConfigCommandIntegrationTest.scala index b47a7194c..f2a6e71dd 100644 --- a/core/src/test/scala/integration/kafka/admin/ConfigCommandIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/admin/ConfigCommandIntegrationTest.scala @@ -17,7 +17,6 @@ package kafka.admin import kafka.admin.ConfigCommand.ConfigCommandOptions -import kafka.api.ApiVersion import kafka.cluster.{Broker, EndPoint} import kafka.server.{ConfigEntityName, KafkaConfig, QuorumTestHarness} import kafka.utils.{Exit, Logging, TestInfoUtils} @@ -25,6 +24,7 @@ import kafka.zk.{AdminZkClient, BrokerInfo} import org.apache.kafka.common.config.ConfigException import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol +import org.apache.kafka.server.common.MetadataVersion import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource @@ -173,7 +173,7 @@ class ConfigCommandIntegrationTest extends QuorumTestHarness with Logging { zkClient.createTopLevelPaths() val securityProtocol = SecurityProtocol.PLAINTEXT val endpoint = new EndPoint("localhost", 9092, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol) - val brokerInfo = BrokerInfo(Broker(id, Seq(endpoint), rack = None), ApiVersion.latestVersion, jmxPort = 9192) + val brokerInfo = BrokerInfo(Broker(id, Seq(endpoint), rack = None), MetadataVersion.latest, jmxPort = 9192) zkClient.registerBroker(brokerInfo) } } diff --git a/core/src/test/scala/integration/kafka/admin/ReassignPartitionsIntegrationTest.scala b/core/src/test/scala/integration/kafka/admin/ReassignPartitionsIntegrationTest.scala index 5385b2faa..769389a5e 100644 --- a/core/src/test/scala/integration/kafka/admin/ReassignPartitionsIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/admin/ReassignPartitionsIntegrationTest.scala @@ -19,8 +19,8 @@ package kafka.admin import java.io.Closeable import java.util.{Collections, HashMap, List} + import kafka.admin.ReassignPartitionsCommand._ -import kafka.api.KAFKA_2_7_IV1 import kafka.server.{IsrChangePropagationConfig, KafkaConfig, KafkaServer, ZkAlterPartitionManager} import kafka.utils.Implicits._ import kafka.utils.TestUtils @@ -30,6 +30,7 @@ import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.utils.Utils import org.apache.kafka.common.{TopicPartition, TopicPartitionReplica} +import org.apache.kafka.server.common.MetadataVersion.IBP_2_7_IV1 import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} import org.junit.jupiter.api.{AfterEach, Test, Timeout} @@ -66,7 +67,7 @@ class ReassignPartitionsIntegrationTest extends QuorumTestHarness { // the `AlterIsr` API. In this case, the controller will register individual // watches for each reassigning partition so that the reassignment can be // completed as soon as the ISR is expanded. - val configOverrides = Map(KafkaConfig.InterBrokerProtocolVersionProp -> KAFKA_2_7_IV1.version) + val configOverrides = Map(KafkaConfig.InterBrokerProtocolVersionProp -> IBP_2_7_IV1.version) cluster = new ReassignPartitionsTestCluster(zkConnect, configOverrides = configOverrides) cluster.setup() executeAndVerifyReassignment() @@ -89,7 +90,7 @@ class ReassignPartitionsIntegrationTest extends QuorumTestHarness { maxDelayMs = 500 ) - val oldIbpConfig = Map(KafkaConfig.InterBrokerProtocolVersionProp -> KAFKA_2_7_IV1.version) + val oldIbpConfig = Map(KafkaConfig.InterBrokerProtocolVersionProp -> IBP_2_7_IV1.version) val brokerConfigOverrides = Map(1 -> oldIbpConfig, 2 -> oldIbpConfig, 3 -> oldIbpConfig) cluster = new ReassignPartitionsTestCluster(zkConnect, brokerConfigOverrides = brokerConfigOverrides) diff --git a/core/src/test/scala/integration/kafka/server/FetchRequestBetweenDifferentIbpTest.scala b/core/src/test/scala/integration/kafka/server/FetchRequestBetweenDifferentIbpTest.scala index 405b0099a..36d9c00bf 100644 --- a/core/src/test/scala/integration/kafka/server/FetchRequestBetweenDifferentIbpTest.scala +++ b/core/src/test/scala/integration/kafka/server/FetchRequestBetweenDifferentIbpTest.scala @@ -19,11 +19,13 @@ package integration.kafka.server import java.time.Duration import java.util.Arrays.asList -import kafka.api.{ApiVersion, DefaultApiVersion, KAFKA_2_7_IV0, KAFKA_2_8_IV1, KAFKA_3_1_IV0} + import kafka.server.{BaseRequestTest, KafkaConfig} import kafka.utils.TestUtils import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.TopicPartition +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_2_7_IV0, IBP_2_8_IV1, IBP_3_1_IV0} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -35,26 +37,26 @@ class FetchRequestBetweenDifferentIbpTest extends BaseRequestTest { override def generateConfigs: Seq[KafkaConfig] = { // Brokers should be at most 2 different IBP versions, but for more test coverage, three are used here. Seq( - createConfig(0, KAFKA_2_7_IV0), - createConfig(1, KAFKA_2_8_IV1), - createConfig(2, KAFKA_3_1_IV0) + createConfig(0, IBP_2_7_IV0), + createConfig(1, IBP_2_8_IV1), + createConfig(2, IBP_3_1_IV0) ) } @Test def testControllerOldIBP(): Unit = { - // Ensure controller version < KAFKA_2_8_IV1, and then create a topic where leader of partition 0 is not the controller, + // Ensure controller version < IBP_2_8_IV1, and then create a topic where leader of partition 0 is not the controller, // leader of partition 1 is. - testControllerWithGivenIBP(KAFKA_2_7_IV0, 0) + testControllerWithGivenIBP(IBP_2_7_IV0, 0) } @Test def testControllerNewIBP(): Unit = { - // Ensure controller version = KAFKA_3_1_IV0, and then create a topic where leader of partition 1 is the old version. - testControllerWithGivenIBP(KAFKA_3_1_IV0, 2) + // Ensure controller version = IBP_3_1_IV0, and then create a topic where leader of partition 1 is the old version. + testControllerWithGivenIBP(IBP_3_1_IV0, 2) } - def testControllerWithGivenIBP(version: DefaultApiVersion, controllerBroker: Int): Unit = { + def testControllerWithGivenIBP(version: MetadataVersion, controllerBroker: Int): Unit = { val topic = "topic" val producer = createProducer() val consumer = createConsumer() @@ -79,16 +81,16 @@ class FetchRequestBetweenDifferentIbpTest extends BaseRequestTest { @Test def testControllerNewToOldIBP(): Unit = { - testControllerSwitchingIBP(KAFKA_3_1_IV0, 2, KAFKA_2_7_IV0, 0) + testControllerSwitchingIBP(IBP_3_1_IV0, 2, IBP_2_7_IV0, 0) } @Test def testControllerOldToNewIBP(): Unit = { - testControllerSwitchingIBP(KAFKA_2_7_IV0, 0, KAFKA_3_1_IV0, 2) + testControllerSwitchingIBP(IBP_2_7_IV0, 0, IBP_3_1_IV0, 2) } - def testControllerSwitchingIBP(version1: DefaultApiVersion, broker1: Int, version2: DefaultApiVersion, broker2: Int): Unit = { + def testControllerSwitchingIBP(version1: MetadataVersion, broker1: Int, version2: MetadataVersion, broker2: Int): Unit = { val topic = "topic" val topic2 = "topic2" val producer = createProducer() @@ -132,7 +134,7 @@ class FetchRequestBetweenDifferentIbpTest extends BaseRequestTest { assertEquals(2, count2) } - private def ensureControllerWithIBP(version: DefaultApiVersion): Unit = { + private def ensureControllerWithIBP(version: MetadataVersion): Unit = { val nonControllerServers = servers.filter(_.config.interBrokerProtocolVersion != version) nonControllerServers.iterator.foreach(server => { server.shutdown() @@ -143,7 +145,7 @@ class FetchRequestBetweenDifferentIbpTest extends BaseRequestTest { }) } - private def createConfig(nodeId: Int, interBrokerVersion: ApiVersion): KafkaConfig = { + private def createConfig(nodeId: Int, interBrokerVersion: MetadataVersion): KafkaConfig = { val props = TestUtils.createBrokerConfig(nodeId, zkConnect) props.put(KafkaConfig.InterBrokerProtocolVersionProp, interBrokerVersion.version) KafkaConfig.fromProps(props) diff --git a/core/src/test/scala/integration/kafka/server/FetchRequestTestDowngrade.scala b/core/src/test/scala/integration/kafka/server/FetchRequestTestDowngrade.scala index 3c0bff81b..c714b8cc3 100644 --- a/core/src/test/scala/integration/kafka/server/FetchRequestTestDowngrade.scala +++ b/core/src/test/scala/integration/kafka/server/FetchRequestTestDowngrade.scala @@ -20,12 +20,13 @@ package integration.kafka.server import java.time.Duration import java.util.Arrays.asList -import kafka.api.{ApiVersion, KAFKA_2_7_IV0, KAFKA_3_1_IV0} import kafka.server.{BaseRequestTest, KafkaConfig} import kafka.utils.TestUtils import kafka.zk.ZkVersion import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.TopicPartition +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_2_7_IV0, IBP_3_1_IV0} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -37,8 +38,8 @@ class FetchRequestTestDowngrade extends BaseRequestTest { override def generateConfigs: Seq[KafkaConfig] = { // Controller should start with newer IBP and downgrade to the older one. Seq( - createConfig(0, KAFKA_3_1_IV0), - createConfig(1, KAFKA_2_7_IV0) + createConfig(0, IBP_3_1_IV0), + createConfig(1, IBP_2_7_IV0) ) } @@ -72,7 +73,7 @@ class FetchRequestTestDowngrade extends BaseRequestTest { } } - private def createConfig(nodeId: Int, interBrokerVersion: ApiVersion): KafkaConfig = { + private def createConfig(nodeId: Int, interBrokerVersion: MetadataVersion): KafkaConfig = { val props = TestUtils.createBrokerConfig(nodeId, zkConnect) props.put(KafkaConfig.InterBrokerProtocolVersionProp, interBrokerVersion.version) KafkaConfig.fromProps(props) diff --git a/core/src/test/scala/integration/kafka/server/MetadataRequestBetweenDifferentIbpTest.scala b/core/src/test/scala/integration/kafka/server/MetadataRequestBetweenDifferentIbpTest.scala index aad5ae7f9..fac859eff 100644 --- a/core/src/test/scala/integration/kafka/server/MetadataRequestBetweenDifferentIbpTest.scala +++ b/core/src/test/scala/integration/kafka/server/MetadataRequestBetweenDifferentIbpTest.scala @@ -17,7 +17,6 @@ package kafka.server -import kafka.api.{ApiVersion, KAFKA_2_8_IV0} import kafka.network.SocketServer import kafka.utils.TestUtils import kafka.zk.ZkVersion @@ -25,6 +24,8 @@ import org.apache.kafka.common.Uuid import org.apache.kafka.common.message.MetadataRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{MetadataRequest, MetadataResponse} +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.IBP_2_8_IV0 import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -35,9 +36,9 @@ class MetadataRequestBetweenDifferentIbpTest extends BaseRequestTest { override def brokerCount: Int = 3 override def generateConfigs: Seq[KafkaConfig] = { Seq( - createConfig(0, KAFKA_2_8_IV0), - createConfig(1, ApiVersion.latestVersion), - createConfig(2, ApiVersion.latestVersion) + createConfig(0, IBP_2_8_IV0), + createConfig(1, MetadataVersion.latest), + createConfig(2, MetadataVersion.latest) ) } @@ -58,7 +59,7 @@ class MetadataRequestBetweenDifferentIbpTest extends BaseRequestTest { assertEquals(topicId, topicMetadata.topicId()) assertEquals(topic, topicMetadata.topic()) - // Make the broker whose version=KAFKA_2_8_IV0 controller + // Make the broker whose version=IBP_2_8_IV0 controller ensureControllerIn(Seq(0)) // Restart the broker whose ibp is higher, and the controller will send metadata request to it @@ -77,7 +78,7 @@ class MetadataRequestBetweenDifferentIbpTest extends BaseRequestTest { } } - private def createConfig(nodeId: Int,interBrokerVersion: ApiVersion): KafkaConfig = { + private def createConfig(nodeId: Int, interBrokerVersion: MetadataVersion): KafkaConfig = { val props = TestUtils.createBrokerConfig(nodeId, zkConnect) props.put(KafkaConfig.InterBrokerProtocolVersionProp, interBrokerVersion.version) KafkaConfig.fromProps(props) diff --git a/core/src/test/scala/unit/kafka/admin/FeatureCommandTest.scala b/core/src/test/scala/unit/kafka/admin/FeatureCommandTest.scala index d75714818..ac715d217 100644 --- a/core/src/test/scala/unit/kafka/admin/FeatureCommandTest.scala +++ b/core/src/test/scala/unit/kafka/admin/FeatureCommandTest.scala @@ -17,15 +17,14 @@ package kafka.admin -import kafka.api.KAFKA_2_7_IV0 import kafka.server.{BaseRequestTest, KafkaConfig, KafkaServer} import kafka.utils.TestUtils import kafka.utils.TestUtils.waitUntilTrue import org.apache.kafka.common.feature.{Features, SupportedVersionRange} import org.apache.kafka.common.utils.Utils - import java.util.Properties +import org.apache.kafka.server.common.MetadataVersion.IBP_2_7_IV0 import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Test @@ -33,7 +32,7 @@ class FeatureCommandTest extends BaseRequestTest { override def brokerCount: Int = 3 override def brokerPropertyOverrides(props: Properties): Unit = { - props.put(KafkaConfig.InterBrokerProtocolVersionProp, KAFKA_2_7_IV0.toString) + props.put(KafkaConfig.InterBrokerProtocolVersionProp, IBP_2_7_IV0.toString) } private val defaultSupportedFeatures: Features[SupportedVersionRange] = diff --git a/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala b/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala index 52addbc82..2117d87e4 100644 --- a/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala @@ -16,7 +16,6 @@ */ package kafka.cluster -import kafka.api.ApiVersion import kafka.log.{CleanerConfig, LogConfig, LogManager} import kafka.server.{Defaults, MetadataCache} import kafka.server.checkpoints.OffsetCheckpoints @@ -30,10 +29,11 @@ import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach} import org.mockito.ArgumentMatchers import org.mockito.Mockito.{mock, when} - import java.io.File import java.util.Properties +import org.apache.kafka.server.common.MetadataVersion + import scala.jdk.CollectionConverters._ object AbstractPartitionTest { @@ -90,7 +90,7 @@ class AbstractPartitionTest { .thenReturn(None) } - protected def interBrokerProtocolVersion: ApiVersion = ApiVersion.latestVersion + protected def interBrokerProtocolVersion: MetadataVersion = MetadataVersion.latest def createLogProperties(overrides: Map[String, String]): Properties = { val logProps = new Properties() diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala index 50bbe18f4..39a2edb50 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala @@ -21,7 +21,7 @@ import java.util.Properties import java.util.concurrent._ import java.util.concurrent.atomic.AtomicBoolean -import kafka.api.{ApiVersion, LeaderAndIsr} +import kafka.api.LeaderAndIsr import kafka.log._ import kafka.server._ import kafka.server.checkpoints.OffsetCheckpoints @@ -32,6 +32,7 @@ import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrParti import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.utils.Utils import org.apache.kafka.common.{TopicPartition, Uuid} +import org.apache.kafka.server.common.MetadataVersion import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.ArgumentMatchers @@ -262,7 +263,7 @@ class PartitionLockTest extends Logging { logManager.startup(Set.empty) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = kafka.server.Defaults.ReplicaLagTimeMaxMs, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, localBrokerId = brokerId, mockTime, isrChangeListener, @@ -396,7 +397,7 @@ class PartitionLockTest extends Logging { keepPartitionMetadataFile = true) { override def appendAsLeader(records: MemoryRecords, leaderEpoch: Int, origin: AppendOrigin, - interBrokerProtocolVersion: ApiVersion, requestLocal: RequestLocal): LogAppendInfo = { + interBrokerProtocolVersion: MetadataVersion, requestLocal: RequestLocal): LogAppendInfo = { val appendInfo = super.appendAsLeader(records, leaderEpoch, origin, interBrokerProtocolVersion, requestLocal) appendSemaphore.acquire() appendInfo diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala index dcf91ad2d..63e06a5bc 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala @@ -17,7 +17,6 @@ package kafka.cluster import com.yammer.metrics.core.Metric -import kafka.api.{ApiVersion, KAFKA_2_6_IV0} import kafka.common.UnexpectedAppendOffsetException import kafka.log.{Defaults => _, _} import kafka.server._ @@ -45,6 +44,8 @@ import java.util.Optional import java.util.concurrent.{CountDownLatch, Semaphore} import kafka.server.epoch.LeaderEpochFileCache +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 import org.apache.kafka.server.metrics.KafkaYammerMetrics import scala.jdk.CollectionConverters._ @@ -199,7 +200,7 @@ class PartitionTest extends AbstractPartitionTest { partition = new Partition( topicPartition, replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, localBrokerId = brokerId, time, isrChangeListener, @@ -1739,7 +1740,7 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition(topicPartition, replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs, - interBrokerProtocolVersion = KAFKA_2_6_IV0, // shouldn't matter, but set this to a ZK isr version + interBrokerProtocolVersion = IBP_2_6_IV0, // shouldn't matter, but set this to a ZK isr version localBrokerId = brokerId, time, isrChangeListener, @@ -1836,7 +1837,7 @@ class PartitionTest extends AbstractPartitionTest { // Create new Partition object for same topicPartition val partition2 = new Partition(topicPartition, replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, localBrokerId = brokerId, time, isrChangeListener, @@ -1880,7 +1881,7 @@ class PartitionTest extends AbstractPartitionTest { // Create new Partition object for same topicPartition val partition2 = new Partition(topicPartition, replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, localBrokerId = brokerId, time, isrChangeListener, @@ -1962,7 +1963,7 @@ class PartitionTest extends AbstractPartitionTest { def testUpdateAssignmentAndIsr(): Unit = { val topicPartition = new TopicPartition("test", 1) val partition = new Partition( - topicPartition, 1000, ApiVersion.latestVersion, 0, + topicPartition, 1000, MetadataVersion.latest, 0, new SystemTime(), mock(classOf[AlterPartitionListener]), mock(classOf[DelayedOperations]), mock(classOf[MetadataCache]), mock(classOf[LogManager]), mock(classOf[AlterPartitionManager])) @@ -2005,7 +2006,7 @@ class PartitionTest extends AbstractPartitionTest { val spyLogManager = spy(logManager) val partition = new Partition(topicPartition, replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, localBrokerId = brokerId, time, isrChangeListener, @@ -2043,7 +2044,7 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition(topicPartition, replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, localBrokerId = brokerId, time, isrChangeListener, @@ -2084,7 +2085,7 @@ class PartitionTest extends AbstractPartitionTest { val partition = new Partition(topicPartition, replicaLagTimeMaxMs = Defaults.ReplicaLagTimeMaxMs, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, localBrokerId = brokerId, time, isrChangeListener, diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionWithLegacyMessageFormatTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionWithLegacyMessageFormatTest.scala index 50b10fa20..75fec767a 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionWithLegacyMessageFormatTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionWithLegacyMessageFormatTest.scala @@ -16,28 +16,30 @@ */ package kafka.cluster -import kafka.api.{ApiVersion, KAFKA_2_8_IV1} import kafka.log.LogConfig import kafka.utils.TestUtils import org.apache.kafka.common.record.{RecordVersion, SimpleRecord} import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Test - import java.util.Optional + +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.IBP_2_8_IV1 + import scala.annotation.nowarn class PartitionWithLegacyMessageFormatTest extends AbstractPartitionTest { // legacy message formats are only supported with IBP < 3.0 - override protected def interBrokerProtocolVersion: ApiVersion = KAFKA_2_8_IV1 + override protected def interBrokerProtocolVersion: MetadataVersion = IBP_2_8_IV1 @nowarn("cat=deprecation") @Test def testMakeLeaderDoesNotUpdateEpochCacheForOldFormats(): Unit = { val leaderEpoch = 8 configRepository.setTopicConfig(topicPartition.topic(), - LogConfig.MessageFormatVersionProp, kafka.api.KAFKA_0_10_2_IV0.shortVersion) + LogConfig.MessageFormatVersionProp, MetadataVersion.IBP_0_10_2_IV0.shortVersion) val log = logManager.getOrCreateLog(topicPartition, topicId = None) log.appendAsLeader(TestUtils.records(List( new SimpleRecord("k1".getBytes, "v1".getBytes), diff --git a/core/src/test/scala/unit/kafka/controller/ControllerChannelManagerTest.scala b/core/src/test/scala/unit/kafka/controller/ControllerChannelManagerTest.scala index f9a1642cd..f237335ba 100644 --- a/core/src/test/scala/unit/kafka/controller/ControllerChannelManagerTest.scala +++ b/core/src/test/scala/unit/kafka/controller/ControllerChannelManagerTest.scala @@ -17,7 +17,8 @@ package kafka.controller import java.util.Properties -import kafka.api.{ApiVersion, KAFKA_0_10_0_IV1, KAFKA_0_10_2_IV0, KAFKA_0_9_0, KAFKA_1_0_IV0, KAFKA_2_2_IV0, KAFKA_2_4_IV0, KAFKA_2_4_IV1, KAFKA_2_6_IV0, KAFKA_2_8_IV1, KAFKA_3_2_IV0, LeaderAndIsr} + +import kafka.api.LeaderAndIsr import kafka.cluster.{Broker, EndPoint} import kafka.server.KafkaConfig import kafka.utils.TestUtils @@ -32,8 +33,11 @@ import org.apache.kafka.common.requests.{AbstractControlRequest, AbstractRespons import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_0_IV1, IBP_0_10_2_IV0, IBP_0_9_0, IBP_1_0_IV0, IBP_2_2_IV0, IBP_2_4_IV0, IBP_2_4_IV1, IBP_2_6_IV0, IBP_2_8_IV1, IBP_3_2_IV0} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test + import scala.collection.mutable import scala.collection.mutable.ListBuffer import scala.jdk.CollectionConverters._ @@ -157,23 +161,23 @@ class ControllerChannelManagerTest { @Test def testLeaderAndIsrInterBrokerProtocolVersion(): Unit = { - testLeaderAndIsrRequestFollowsInterBrokerProtocolVersion(ApiVersion.latestVersion, ApiKeys.LEADER_AND_ISR.latestVersion) + testLeaderAndIsrRequestFollowsInterBrokerProtocolVersion(MetadataVersion.latest, ApiKeys.LEADER_AND_ISR.latestVersion) - for (apiVersion <- ApiVersion.allVersions) { + for (metadataVersion <- MetadataVersion.VALUES) { val leaderAndIsrRequestVersion: Short = - if (apiVersion >= KAFKA_3_2_IV0) 6 - else if (apiVersion >= KAFKA_2_8_IV1) 5 - else if (apiVersion >= KAFKA_2_4_IV1) 4 - else if (apiVersion >= KAFKA_2_4_IV0) 3 - else if (apiVersion >= KAFKA_2_2_IV0) 2 - else if (apiVersion >= KAFKA_1_0_IV0) 1 + if (metadataVersion.isAtLeast(IBP_3_2_IV0)) 6 + else if (metadataVersion.isAtLeast(IBP_2_8_IV1)) 5 + else if (metadataVersion.isAtLeast(IBP_2_4_IV1)) 4 + else if (metadataVersion.isAtLeast(IBP_2_4_IV0)) 3 + else if (metadataVersion.isAtLeast(IBP_2_2_IV0)) 2 + else if (metadataVersion.isAtLeast(IBP_1_0_IV0)) 1 else 0 - testLeaderAndIsrRequestFollowsInterBrokerProtocolVersion(apiVersion, leaderAndIsrRequestVersion) + testLeaderAndIsrRequestFollowsInterBrokerProtocolVersion(metadataVersion, leaderAndIsrRequestVersion) } } - private def testLeaderAndIsrRequestFollowsInterBrokerProtocolVersion(interBrokerProtocolVersion: ApiVersion, + private def testLeaderAndIsrRequestFollowsInterBrokerProtocolVersion(interBrokerProtocolVersion: MetadataVersion, expectedLeaderAndIsrVersion: Short): Unit = { val context = initContext(Seq(1, 2, 3), 2, 3, Set("foo", "bar")) val config = createConfig(interBrokerProtocolVersion) @@ -181,7 +185,7 @@ class ControllerChannelManagerTest { val partition = new TopicPartition("foo", 0) var leaderAndIsr = LeaderAndIsr(1, List(1, 2)) - if (interBrokerProtocolVersion >= KAFKA_3_2_IV0) { + if (interBrokerProtocolVersion.isAtLeast(IBP_3_2_IV0)) { leaderAndIsr = leaderAndIsr.copy(leaderRecoveryState = LeaderRecoveryState.RECOVERING) } @@ -201,7 +205,7 @@ class ControllerChannelManagerTest { val byteBuffer = request.serialize val deserializedRequest = LeaderAndIsrRequest.parse(byteBuffer, expectedLeaderAndIsrVersion) - val expectedRecovery = if (interBrokerProtocolVersion >= KAFKA_3_2_IV0) { + val expectedRecovery = if (interBrokerProtocolVersion.isAtLeast(IBP_3_2_IV0)) { LeaderRecoveryState.RECOVERING } else { LeaderRecoveryState.RECOVERED @@ -213,10 +217,10 @@ class ControllerChannelManagerTest { } } - if (interBrokerProtocolVersion >= KAFKA_2_8_IV1) { + if (interBrokerProtocolVersion.isAtLeast(IBP_2_8_IV1)) { assertFalse(request.topicIds().get("foo").equals(Uuid.ZERO_UUID)) assertFalse(deserializedRequest.topicIds().get("foo").equals(Uuid.ZERO_UUID)) - } else if (interBrokerProtocolVersion >= KAFKA_2_2_IV0) { + } else if (interBrokerProtocolVersion.isAtLeast(IBP_2_2_IV0)) { assertFalse(request.topicIds().get("foo").equals(Uuid.ZERO_UUID)) assertTrue(deserializedRequest.topicIds().get("foo").equals(Uuid.ZERO_UUID)) } else { @@ -374,24 +378,24 @@ class ControllerChannelManagerTest { @Test def testUpdateMetadataInterBrokerProtocolVersion(): Unit = { - testUpdateMetadataFollowsInterBrokerProtocolVersion(ApiVersion.latestVersion, ApiKeys.UPDATE_METADATA.latestVersion) + testUpdateMetadataFollowsInterBrokerProtocolVersion(MetadataVersion.latest, ApiKeys.UPDATE_METADATA.latestVersion) - for (apiVersion <- ApiVersion.allVersions) { + for (metadataVersion <- MetadataVersion.VALUES) { val updateMetadataRequestVersion: Short = - if (apiVersion >= KAFKA_2_8_IV1) 7 - else if (apiVersion >= KAFKA_2_4_IV1) 6 - else if (apiVersion >= KAFKA_2_2_IV0) 5 - else if (apiVersion >= KAFKA_1_0_IV0) 4 - else if (apiVersion >= KAFKA_0_10_2_IV0) 3 - else if (apiVersion >= KAFKA_0_10_0_IV1) 2 - else if (apiVersion >= KAFKA_0_9_0) 1 + if (metadataVersion.isAtLeast(IBP_2_8_IV1)) 7 + else if (metadataVersion.isAtLeast(IBP_2_4_IV1)) 6 + else if (metadataVersion.isAtLeast(IBP_2_2_IV0)) 5 + else if (metadataVersion.isAtLeast(IBP_1_0_IV0)) 4 + else if (metadataVersion.isAtLeast(IBP_0_10_2_IV0)) 3 + else if (metadataVersion.isAtLeast(IBP_0_10_0_IV1)) 2 + else if (metadataVersion.isAtLeast(IBP_0_9_0)) 1 else 0 - testUpdateMetadataFollowsInterBrokerProtocolVersion(apiVersion, updateMetadataRequestVersion) + testUpdateMetadataFollowsInterBrokerProtocolVersion(metadataVersion, updateMetadataRequestVersion) } } - private def testUpdateMetadataFollowsInterBrokerProtocolVersion(interBrokerProtocolVersion: ApiVersion, + private def testUpdateMetadataFollowsInterBrokerProtocolVersion(interBrokerProtocolVersion: MetadataVersion, expectedUpdateMetadataVersion: Short): Unit = { val context = initContext(Seq(1, 2, 3), 2, 3, Set("foo", "bar")) val config = createConfig(interBrokerProtocolVersion) @@ -470,12 +474,12 @@ class ControllerChannelManagerTest { @Test def testStopReplicaRequestsWhileTopicQueuedForDeletion(): Unit = { - for (apiVersion <- ApiVersion.allVersions) { - testStopReplicaRequestsWhileTopicQueuedForDeletion(apiVersion) + for (metadataVersion <- MetadataVersion.VALUES) { + testStopReplicaRequestsWhileTopicQueuedForDeletion(metadataVersion) } } - private def testStopReplicaRequestsWhileTopicQueuedForDeletion(interBrokerProtocolVersion: ApiVersion): Unit = { + private def testStopReplicaRequestsWhileTopicQueuedForDeletion(interBrokerProtocolVersion: MetadataVersion): Unit = { val context = initContext(Seq(1, 2, 3), 2, 3, Set("foo", "bar")) val config = createConfig(interBrokerProtocolVersion) val batch = new MockControllerBrokerRequestBatch(context, config) @@ -517,12 +521,12 @@ class ControllerChannelManagerTest { @Test def testStopReplicaRequestsWhileTopicDeletionStarted(): Unit = { - for (apiVersion <- ApiVersion.allVersions) { - testStopReplicaRequestsWhileTopicDeletionStarted(apiVersion) + for (metadataVersion <- MetadataVersion.VALUES) { + testStopReplicaRequestsWhileTopicDeletionStarted(metadataVersion) } } - private def testStopReplicaRequestsWhileTopicDeletionStarted(interBrokerProtocolVersion: ApiVersion): Unit = { + private def testStopReplicaRequestsWhileTopicDeletionStarted(interBrokerProtocolVersion: MetadataVersion): Unit = { val context = initContext(Seq(1, 2, 3), 2, 3, Set("foo", "bar")) val config = createConfig(interBrokerProtocolVersion) val batch = new MockControllerBrokerRequestBatch(context, config) @@ -572,12 +576,12 @@ class ControllerChannelManagerTest { @Test def testStopReplicaRequestWithoutDeletePartitionWhileTopicDeletionStarted(): Unit = { - for (apiVersion <- ApiVersion.allVersions) { - testStopReplicaRequestWithoutDeletePartitionWhileTopicDeletionStarted(apiVersion) + for (metadataVersion <- MetadataVersion.VALUES) { + testStopReplicaRequestWithoutDeletePartitionWhileTopicDeletionStarted(metadataVersion) } } - private def testStopReplicaRequestWithoutDeletePartitionWhileTopicDeletionStarted(interBrokerProtocolVersion: ApiVersion): Unit = { + private def testStopReplicaRequestWithoutDeletePartitionWhileTopicDeletionStarted(interBrokerProtocolVersion: MetadataVersion): Unit = { val context = initContext(Seq(1, 2, 3), 2, 3, Set("foo", "bar")) val config = createConfig(interBrokerProtocolVersion) val batch = new MockControllerBrokerRequestBatch(context, config) @@ -619,22 +623,22 @@ class ControllerChannelManagerTest { @Test def testMixedDeleteAndNotDeleteStopReplicaRequests(): Unit = { - testMixedDeleteAndNotDeleteStopReplicaRequests(ApiVersion.latestVersion, + testMixedDeleteAndNotDeleteStopReplicaRequests(MetadataVersion.latest, ApiKeys.STOP_REPLICA.latestVersion) - for (apiVersion <- ApiVersion.allVersions) { - if (apiVersion < KAFKA_2_2_IV0) - testMixedDeleteAndNotDeleteStopReplicaRequests(apiVersion, 0.toShort) - else if (apiVersion < KAFKA_2_4_IV1) - testMixedDeleteAndNotDeleteStopReplicaRequests(apiVersion, 1.toShort) - else if (apiVersion < KAFKA_2_6_IV0) - testMixedDeleteAndNotDeleteStopReplicaRequests(apiVersion, 2.toShort) + for (metadataVersion <- MetadataVersion.VALUES) { + if (metadataVersion.isLessThan(IBP_2_2_IV0)) + testMixedDeleteAndNotDeleteStopReplicaRequests(metadataVersion, 0.toShort) + else if (metadataVersion.isLessThan(IBP_2_4_IV1)) + testMixedDeleteAndNotDeleteStopReplicaRequests(metadataVersion, 1.toShort) + else if (metadataVersion.isLessThan(IBP_2_6_IV0)) + testMixedDeleteAndNotDeleteStopReplicaRequests(metadataVersion, 2.toShort) else - testMixedDeleteAndNotDeleteStopReplicaRequests(apiVersion, 3.toShort) + testMixedDeleteAndNotDeleteStopReplicaRequests(metadataVersion, 3.toShort) } } - private def testMixedDeleteAndNotDeleteStopReplicaRequests(interBrokerProtocolVersion: ApiVersion, + private def testMixedDeleteAndNotDeleteStopReplicaRequests(interBrokerProtocolVersion: MetadataVersion, expectedStopReplicaRequestVersion: Short): Unit = { val context = initContext(Seq(1, 2, 3), 2, 3, Set("foo", "bar")) val config = createConfig(interBrokerProtocolVersion) @@ -665,8 +669,8 @@ class ControllerChannelManagerTest { assertEquals(1, batch.sentRequests.size) assertTrue(batch.sentRequests.contains(2)) - // Since KAFKA_2_6_IV0, only one StopReplicaRequest is sent out - if (interBrokerProtocolVersion >= KAFKA_2_6_IV0) { + // Since IBP_2_6_IV0, only one StopReplicaRequest is sent out + if (interBrokerProtocolVersion.isAtLeast(IBP_2_6_IV0)) { val sentRequests = batch.sentRequests(2) assertEquals(1, sentRequests.size) @@ -769,21 +773,21 @@ class ControllerChannelManagerTest { @Test def testStopReplicaInterBrokerProtocolVersion(): Unit = { - testStopReplicaFollowsInterBrokerProtocolVersion(ApiVersion.latestVersion, ApiKeys.STOP_REPLICA.latestVersion) - - for (apiVersion <- ApiVersion.allVersions) { - if (apiVersion < KAFKA_2_2_IV0) - testStopReplicaFollowsInterBrokerProtocolVersion(apiVersion, 0.toShort) - else if (apiVersion < KAFKA_2_4_IV1) - testStopReplicaFollowsInterBrokerProtocolVersion(apiVersion, 1.toShort) - else if (apiVersion < KAFKA_2_6_IV0) - testStopReplicaFollowsInterBrokerProtocolVersion(apiVersion, 2.toShort) + testStopReplicaFollowsInterBrokerProtocolVersion(MetadataVersion.latest, ApiKeys.STOP_REPLICA.latestVersion) + + for (metadataVersion <- MetadataVersion.VALUES) { + if (metadataVersion.isLessThan(IBP_2_2_IV0)) + testStopReplicaFollowsInterBrokerProtocolVersion(metadataVersion, 0.toShort) + else if (metadataVersion.isLessThan(IBP_2_4_IV1)) + testStopReplicaFollowsInterBrokerProtocolVersion(metadataVersion, 1.toShort) + else if (metadataVersion.isLessThan(IBP_2_6_IV0)) + testStopReplicaFollowsInterBrokerProtocolVersion(metadataVersion, 2.toShort) else - testStopReplicaFollowsInterBrokerProtocolVersion(apiVersion, 3.toShort) + testStopReplicaFollowsInterBrokerProtocolVersion(metadataVersion, 3.toShort) } } - private def testStopReplicaFollowsInterBrokerProtocolVersion(interBrokerProtocolVersion: ApiVersion, + private def testStopReplicaFollowsInterBrokerProtocolVersion(interBrokerProtocolVersion: MetadataVersion, expectedStopReplicaRequestVersion: Short): Unit = { val context = initContext(Seq(1, 2, 3), 2, 3, Set("foo")) val config = createConfig(interBrokerProtocolVersion) @@ -884,7 +888,7 @@ class ControllerChannelManagerTest { } } - private def createConfig(interBrokerVersion: ApiVersion): KafkaConfig = { + private def createConfig(interBrokerVersion: MetadataVersion): KafkaConfig = { val props = new Properties() props.put(KafkaConfig.BrokerIdProp, controllerId.toString) props.put(KafkaConfig.ZkConnectProp, "zkConnect") diff --git a/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala b/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala index aa631c95e..d53fc763e 100644 --- a/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/controller/ControllerIntegrationTest.scala @@ -21,7 +21,7 @@ import java.util.Properties import java.util.concurrent.{CompletableFuture, CountDownLatch, LinkedBlockingQueue, TimeUnit} import com.yammer.metrics.core.Timer -import kafka.api.{ApiVersion, KAFKA_2_6_IV0, KAFKA_2_7_IV0, LeaderAndIsr} +import kafka.api.LeaderAndIsr import kafka.controller.KafkaController.AlterPartitionCallback import kafka.server.{KafkaConfig, KafkaServer, QuorumTestHarness} import kafka.utils.{LogCaptureAppender, TestUtils} @@ -32,6 +32,8 @@ import org.apache.kafka.common.metrics.KafkaMetric import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.{ElectionType, TopicPartition, Uuid} import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_2_6_IV0, IBP_2_7_IV0} import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.log4j.Level import org.junit.jupiter.api.Assertions.{assertEquals, assertNotEquals, assertTrue} @@ -630,32 +632,32 @@ class ControllerIntegrationTest extends QuorumTestHarness { @Test def testControllerFeatureZNodeSetupWhenFeatureVersioningIsEnabledWithNonExistingFeatureZNode(): Unit = { - testControllerFeatureZNodeSetup(Option.empty, KAFKA_2_7_IV0) + testControllerFeatureZNodeSetup(Option.empty, IBP_2_7_IV0) } @Test def testControllerFeatureZNodeSetupWhenFeatureVersioningIsEnabledWithDisabledExistingFeatureZNode(): Unit = { - testControllerFeatureZNodeSetup(Some(new FeatureZNode(FeatureZNodeStatus.Disabled, Features.emptyFinalizedFeatures())), KAFKA_2_7_IV0) + testControllerFeatureZNodeSetup(Some(new FeatureZNode(FeatureZNodeStatus.Disabled, Features.emptyFinalizedFeatures())), IBP_2_7_IV0) } @Test def testControllerFeatureZNodeSetupWhenFeatureVersioningIsEnabledWithEnabledExistingFeatureZNode(): Unit = { - testControllerFeatureZNodeSetup(Some(new FeatureZNode(FeatureZNodeStatus.Enabled, Features.emptyFinalizedFeatures())), KAFKA_2_7_IV0) + testControllerFeatureZNodeSetup(Some(new FeatureZNode(FeatureZNodeStatus.Enabled, Features.emptyFinalizedFeatures())), IBP_2_7_IV0) } @Test def testControllerFeatureZNodeSetupWhenFeatureVersioningIsDisabledWithNonExistingFeatureZNode(): Unit = { - testControllerFeatureZNodeSetup(Option.empty, KAFKA_2_6_IV0) + testControllerFeatureZNodeSetup(Option.empty, IBP_2_6_IV0) } @Test def testControllerFeatureZNodeSetupWhenFeatureVersioningIsDisabledWithDisabledExistingFeatureZNode(): Unit = { - testControllerFeatureZNodeSetup(Some(new FeatureZNode(FeatureZNodeStatus.Disabled, Features.emptyFinalizedFeatures())), KAFKA_2_6_IV0) + testControllerFeatureZNodeSetup(Some(new FeatureZNode(FeatureZNodeStatus.Disabled, Features.emptyFinalizedFeatures())), IBP_2_6_IV0) } @Test def testControllerFeatureZNodeSetupWhenFeatureVersioningIsDisabledWithEnabledExistingFeatureZNode(): Unit = { - testControllerFeatureZNodeSetup(Some(new FeatureZNode(FeatureZNodeStatus.Enabled, Features.emptyFinalizedFeatures())), KAFKA_2_6_IV0) + testControllerFeatureZNodeSetup(Some(new FeatureZNode(FeatureZNodeStatus.Enabled, Features.emptyFinalizedFeatures())), IBP_2_6_IV0) } @Test @@ -782,7 +784,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { } private def testControllerFeatureZNodeSetup(initialZNode: Option[FeatureZNode], - interBrokerProtocolVersion: ApiVersion): Unit = { + interBrokerProtocolVersion: MetadataVersion): Unit = { val versionBeforeOpt = initialZNode match { case Some(node) => zkClient.createFeatureZNode(node) @@ -809,7 +811,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { val (mayBeFeatureZNodeBytes, versionAfter) = zkClient.getDataAndVersion(FeatureZNode.path) val newZNode = FeatureZNode.decode(mayBeFeatureZNodeBytes.get) - if (interBrokerProtocolVersion >= KAFKA_2_7_IV0) { + if (interBrokerProtocolVersion.isAtLeast(IBP_2_7_IV0)) { val emptyZNode = new FeatureZNode(FeatureZNodeStatus.Enabled, Features.emptyFinalizedFeatures) initialZNode match { case Some(node) => { @@ -1213,7 +1215,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { @Test def testTopicIdsAreNotAdded(): Unit = { - servers = makeServers(1, interBrokerProtocolVersion = Some(KAFKA_2_7_IV0)) + servers = makeServers(1, interBrokerProtocolVersion = Some(IBP_2_7_IV0)) TestUtils.waitUntilControllerElected(zkClient) val controller = getController().kafkaController val tp1 = new TopicPartition("t1", 0) @@ -1275,7 +1277,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { val assignment = Map(tp.partition -> ReplicaAssignment(Seq(0), List(), List())) val adminZkClient = new AdminZkClient(zkClient) - servers = makeServers(1, interBrokerProtocolVersion = Some(KAFKA_2_7_IV0)) + servers = makeServers(1, interBrokerProtocolVersion = Some(IBP_2_7_IV0)) adminZkClient.createTopic(tp.topic, 1, 1) waitForPartitionState(tp, firstControllerEpoch, 0, LeaderAndIsr.InitialLeaderEpoch, "failed to get expected partition state upon topic creation") @@ -1316,7 +1318,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { @Test def testNoTopicIdPersistsThroughControllerReelection(): Unit = { - servers = makeServers(2, interBrokerProtocolVersion = Some(KAFKA_2_7_IV0)) + servers = makeServers(2, interBrokerProtocolVersion = Some(IBP_2_7_IV0)) val controllerId = TestUtils.waitUntilControllerElected(zkClient) val controller = getController().kafkaController val tp = new TopicPartition("t", 0) @@ -1356,7 +1358,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { @Test def testTopicIdCreatedOnUpgrade(): Unit = { - servers = makeServers(1, interBrokerProtocolVersion = Some(KAFKA_2_7_IV0)) + servers = makeServers(1, interBrokerProtocolVersion = Some(IBP_2_7_IV0)) val controllerId = TestUtils.waitUntilControllerElected(zkClient) val controller = getController().kafkaController val tp = new TopicPartition("t", 0) @@ -1393,12 +1395,12 @@ class ControllerIntegrationTest extends QuorumTestHarness { @Test def testTopicIdCreatedOnUpgradeMultiBrokerScenario(): Unit = { // Simulate an upgrade scenario where the controller is still on a pre-topic ID IBP, but the other two brokers are upgraded. - servers = makeServers(1, interBrokerProtocolVersion = Some(KAFKA_2_7_IV0)) + servers = makeServers(1, interBrokerProtocolVersion = Some(MetadataVersion.IBP_2_7_IV0)) servers = servers ++ makeServers(3, startingIdNumber = 1) val originalControllerId = TestUtils.waitUntilControllerElected(zkClient) assertEquals(0, originalControllerId) val controller = getController().kafkaController - assertEquals(KAFKA_2_7_IV0, servers(originalControllerId).config.interBrokerProtocolVersion) + assertEquals(IBP_2_7_IV0, servers(originalControllerId).config.interBrokerProtocolVersion) val remainingBrokers = servers.filter(_.config.brokerId != originalControllerId) val tp = new TopicPartition("t", 0) // Only the remaining brokers will have the replicas for the partition @@ -1452,7 +1454,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { val adminZkClient = new AdminZkClient(zkClient) // start server with old IBP - servers = makeServers(1, interBrokerProtocolVersion = Some(KAFKA_2_7_IV0)) + servers = makeServers(1, interBrokerProtocolVersion = Some(IBP_2_7_IV0)) // use create topic with ZK client directly, without topic ID adminZkClient.createTopic(tp.topic, 1, 1) waitForPartitionState(tp, firstControllerEpoch, 0, LeaderAndIsr.InitialLeaderEpoch, @@ -1478,7 +1480,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { // Downgrade back to 2.7 servers(0).shutdown() servers(0).awaitShutdown() - servers = makeServers(1, interBrokerProtocolVersion = Some(KAFKA_2_7_IV0)) + servers = makeServers(1, interBrokerProtocolVersion = Some(IBP_2_7_IV0)) waitForPartitionState(tp, firstControllerEpoch, 0, LeaderAndIsr.InitialLeaderEpoch, "failed to get expected partition state upon topic creation") val topicIdAfterDowngrade = zkClient.getTopicIdsForTopics(Set(tp.topic())).get(tp.topic()) @@ -1611,7 +1613,7 @@ class ControllerIntegrationTest extends QuorumTestHarness { listeners : Option[String] = None, listenerSecurityProtocolMap : Option[String] = None, controlPlaneListenerName : Option[String] = None, - interBrokerProtocolVersion: Option[ApiVersion] = None, + interBrokerProtocolVersion: Option[MetadataVersion] = None, logDirCount: Int = 1, startingIdNumber: Int = 0): Seq[KafkaServer] = { val configs = TestUtils.createBrokerConfigs(numConfigs, zkConnect, enableControlledShutdown = enableControlledShutdown, logDirCount = logDirCount, startingIdNumber = startingIdNumber) diff --git a/core/src/test/scala/unit/kafka/controller/PartitionStateMachineTest.scala b/core/src/test/scala/unit/kafka/controller/PartitionStateMachineTest.scala index 70934d964..9f11d42e6 100644 --- a/core/src/test/scala/unit/kafka/controller/PartitionStateMachineTest.scala +++ b/core/src/test/scala/unit/kafka/controller/PartitionStateMachineTest.scala @@ -16,8 +16,6 @@ */ package kafka.controller -import kafka.api.KAFKA_3_1_IV0 -import kafka.api.KAFKA_3_2_IV0 import kafka.api.LeaderAndIsr import kafka.log.LogConfig import kafka.server.KafkaConfig @@ -26,6 +24,7 @@ import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult import kafka.zk.{KafkaZkClient, TopicPartitionStateZNode} import kafka.zookeeper._ import org.apache.kafka.common.TopicPartition +import org.apache.kafka.server.common.MetadataVersion.{IBP_3_1_IV0, IBP_3_2_IV0} import org.apache.zookeeper.KeeperException.Code import org.apache.zookeeper.data.Stat import org.junit.jupiter.api.Assertions._ @@ -294,7 +293,7 @@ class PartitionStateMachineTest { val partitionStateMachine = { - val apiVersion = if (isLeaderRecoverySupported) KAFKA_3_2_IV0 else KAFKA_3_1_IV0 + val apiVersion = if (isLeaderRecoverySupported) IBP_3_2_IV0 else IBP_3_1_IV0 val properties = TestUtils.createBrokerConfig(brokerId, "zkConnect") properties.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, apiVersion.toString) diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala index 2104fd3e5..250f22a24 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala @@ -24,7 +24,6 @@ import java.util.{Collections, Optional} import com.yammer.metrics.core.Gauge import javax.management.ObjectName -import kafka.api._ import kafka.cluster.Partition import kafka.common.OffsetAndMetadata import kafka.log.{AppendOrigin, LogAppendInfo, UnifiedLog} @@ -41,6 +40,8 @@ import org.apache.kafka.common.record._ import org.apache.kafka.common.requests.OffsetFetchResponse import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.utils.Utils +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion._ import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} @@ -91,7 +92,7 @@ class GroupMetadataManagerTest { metrics = new kMetrics() time = new MockTime replicaManager = mock(classOf[ReplicaManager]) - groupMetadataManager = new GroupMetadataManager(0, ApiVersion.latestVersion, offsetConfig, replicaManager, + groupMetadataManager = new GroupMetadataManager(0, MetadataVersion.latest, offsetConfig, replicaManager, time, metrics) groupMetadataManager.startup(() => numOffsetsPartitions, false) partition = mock(classOf[Partition]) @@ -106,7 +107,7 @@ class GroupMetadataManagerTest { def testLogInfoFromCleanupGroupMetadata(): Unit = { var expiredOffsets: Int = 0 var infoCount = 0 - val gmm = new GroupMetadataManager(0, ApiVersion.latestVersion, offsetConfig, replicaManager, time, metrics) { + val gmm = new GroupMetadataManager(0, MetadataVersion.latest, offsetConfig, replicaManager, time, metrics) { override def cleanupGroupMetadata(groups: Iterable[GroupMetadata], requestLocal: RequestLocal, selector: GroupMetadata => Map[TopicPartition, OffsetAndMetadata]): Int = expiredOffsets @@ -1054,17 +1055,17 @@ class GroupMetadataManagerTest { val protocol = "range" val memberId = "memberId" - for (apiVersion <- ApiVersion.allVersions) { - val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, apiVersion = apiVersion) + for (metadataVersion <- MetadataVersion.VALUES) { + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, metadataVersion = metadataVersion) val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time) // GROUP_METADATA_VALUE_SCHEMA_V2 or higher should correctly set the currentStateTimestamp - if (apiVersion >= KAFKA_2_1_IV0) + if (metadataVersion.isAtLeast(IBP_2_1_IV0)) assertEquals(Some(time.milliseconds()), deserializedGroupMetadata.currentStateTimestamp, - s"the apiVersion $apiVersion doesn't set the currentStateTimestamp correctly.") + s"the metadataVersion $metadataVersion doesn't set the currentStateTimestamp correctly.") else assertTrue(deserializedGroupMetadata.currentStateTimestamp.isEmpty, - s"the apiVersion $apiVersion should not set the currentStateTimestamp.") + s"the metadataVersion $metadataVersion should not set the currentStateTimestamp.") } } @@ -1073,10 +1074,10 @@ class GroupMetadataManagerTest { val generation = 1 val protocol = "range" val memberId = "memberId" - val oldApiVersions = Array(KAFKA_0_9_0, KAFKA_0_10_1_IV0, KAFKA_2_1_IV0) + val oldMetadataVersions = Array(IBP_0_9_0, IBP_0_10_1_IV0, IBP_2_1_IV0) - for (apiVersion <- oldApiVersions) { - val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, apiVersion = apiVersion) + for (metadataVersion <- oldMetadataVersions) { + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, metadataVersion = metadataVersion) val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time) assertEquals(groupId, deserializedGroupMetadata.groupId) @@ -2181,10 +2182,10 @@ class GroupMetadataManagerTest { new TopicPartition("bar", 0) -> 8992L ) - val apiVersion = KAFKA_1_1_IV0 - val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, apiVersion = apiVersion, retentionTimeOpt = Some(100)) + val metadataVersion = IBP_1_1_IV0 + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, metadataVersion = metadataVersion, retentionTimeOpt = Some(100)) val memberId = "98098230493" - val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, apiVersion = apiVersion) + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, metadataVersion = metadataVersion) val records = MemoryRecords.withRecords(startOffset, CompressionType.NONE, (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) @@ -2255,8 +2256,8 @@ class GroupMetadataManagerTest { commitTimestamp = time.milliseconds(), expireTimestamp = None) - def verifySerde(apiVersion: ApiVersion, expectedOffsetCommitValueVersion: Int): Unit = { - val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, apiVersion) + def verifySerde(metadataVersion: MetadataVersion, expectedOffsetCommitValueVersion: Int): Unit = { + val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion) val buffer = ByteBuffer.wrap(bytes) assertEquals(expectedOffsetCommitValueVersion, buffer.getShort(0).toInt) @@ -2275,10 +2276,10 @@ class GroupMetadataManagerTest { assertEquals(expectedLeaderEpoch, deserializedOffsetAndMetadata.leaderEpoch) } - for (version <- ApiVersion.allVersions) { + for (version <- MetadataVersion.VALUES) { val expectedSchemaVersion = version match { - case v if v < KAFKA_2_1_IV0 => 1 - case v if v < KAFKA_2_1_IV1 => 2 + case v if v.isLessThan(IBP_2_1_IV0) => 1 + case v if v.isLessThan(IBP_2_1_IV1) => 2 case _ => 3 } verifySerde(version, expectedSchemaVersion) @@ -2297,8 +2298,8 @@ class GroupMetadataManagerTest { commitTimestamp = time.milliseconds(), expireTimestamp = Some(time.milliseconds() + 1000)) - def verifySerde(apiVersion: ApiVersion): Unit = { - val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, apiVersion) + def verifySerde(metadataVersion: MetadataVersion): Unit = { + val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion) val buffer = ByteBuffer.wrap(bytes) assertEquals(1, buffer.getShort(0).toInt) @@ -2306,7 +2307,7 @@ class GroupMetadataManagerTest { assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata) } - for (version <- ApiVersion.allVersions) + for (version <- MetadataVersion.VALUES) verifySerde(version) } @@ -2319,13 +2320,13 @@ class GroupMetadataManagerTest { commitTimestamp = time.milliseconds(), expireTimestamp = None) - def verifySerde(apiVersion: ApiVersion): Unit = { - val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, apiVersion) + def verifySerde(metadataVersion: MetadataVersion): Unit = { + val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion) val buffer = ByteBuffer.wrap(bytes) val version = buffer.getShort(0).toInt - if (apiVersion < KAFKA_2_1_IV0) + if (metadataVersion.isLessThan(IBP_2_1_IV0)) assertEquals(1, version) - else if (apiVersion < KAFKA_2_1_IV1) + else if (metadataVersion.isLessThan(IBP_2_1_IV1)) assertEquals(2, version) else assertEquals(3, version) @@ -2334,7 +2335,7 @@ class GroupMetadataManagerTest { assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata) } - for (version <- ApiVersion.allVersions) + for (version <- MetadataVersion.VALUES) verifySerde(version) } @@ -2397,7 +2398,7 @@ class GroupMetadataManagerTest { val offsetCommitRecord = TestUtils.records(Seq( new SimpleRecord( GroupMetadataManager.offsetCommitKey(groupId, topicPartition), - GroupMetadataManager.offsetCommitValue(OffsetAndMetadata(35L, "", time.milliseconds()), ApiVersion.latestVersion) + GroupMetadataManager.offsetCommitValue(OffsetAndMetadata(35L, "", time.milliseconds()), MetadataVersion.latest) ) )).records.asScala.head val (keyStringOpt, valueStringOpt) = GroupMetadataManager.formatRecordKeyAndValue(offsetCommitRecord) @@ -2487,20 +2488,20 @@ class GroupMetadataManagerTest { protocol: String, memberId: String, assignmentBytes: Array[Byte] = Array.emptyByteArray, - apiVersion: ApiVersion = ApiVersion.latestVersion): SimpleRecord = { + metadataVersion: MetadataVersion = MetadataVersion.latest): SimpleRecord = { val memberProtocols = List((protocol, Array.emptyByteArray)) val member = new MemberMetadata(memberId, Some(groupInstanceId), "clientId", "clientHost", 30000, 10000, protocolType, memberProtocols) val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, memberId, - if (apiVersion >= KAFKA_2_1_IV0) Some(time.milliseconds()) else None, Seq(member), time) + if (metadataVersion.isAtLeast(IBP_2_1_IV0)) Some(time.milliseconds()) else None, Seq(member), time) val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId) - val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map(memberId -> assignmentBytes), apiVersion) + val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map(memberId -> assignmentBytes), metadataVersion) new SimpleRecord(groupMetadataKey, groupMetadataValue) } private def buildEmptyGroupRecord(generation: Int, protocolType: String): SimpleRecord = { val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, Seq.empty, time) val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId) - val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map.empty, ApiVersion.latestVersion) + val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map.empty, MetadataVersion.latest) new SimpleRecord(groupMetadataKey, groupMetadataValue) } @@ -2544,7 +2545,7 @@ class GroupMetadataManagerTest { private def createCommittedOffsetRecords(committedOffsets: Map[TopicPartition, Long], groupId: String = groupId, - apiVersion: ApiVersion = ApiVersion.latestVersion, + metadataVersion: MetadataVersion = MetadataVersion.latest, retentionTimeOpt: Option[Long] = None): Seq[SimpleRecord] = { committedOffsets.map { case (topicPartition, offset) => val commitTimestamp = time.milliseconds() @@ -2556,7 +2557,7 @@ class GroupMetadataManagerTest { OffsetAndMetadata(offset, "", commitTimestamp) } val offsetCommitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition) - val offsetCommitValue = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, apiVersion) + val offsetCommitValue = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, metadataVersion) new SimpleRecord(offsetCommitKey, offsetCommitValue) }.toSeq } diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala index 937730705..4229962b5 100755 --- a/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala @@ -19,13 +19,13 @@ package kafka.log import java.io.File import java.util.Properties -import kafka.api.KAFKA_0_11_0_IV0 -import kafka.api.{KAFKA_0_10_0_IV1, KAFKA_0_9_0} + import kafka.server.KafkaConfig import kafka.server.checkpoints.OffsetCheckpointFile import kafka.utils._ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.record._ +import org.apache.kafka.server.common.MetadataVersion.{IBP_0_9_0, IBP_0_10_0_IV1, IBP_0_11_0_IV0} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.extension.ExtensionContext import org.junit.jupiter.params.ParameterizedTest @@ -151,7 +151,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati val log = cleaner.logs.get(topicPartitions(0)) val props = logConfigProperties(maxMessageSize = maxMessageSize) - props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_9_0.version) + props.put(LogConfig.MessageFormatVersionProp, IBP_0_9_0.version) log.updateConfig(new LogConfig(props)) val appends = writeDups(numKeys = 100, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0) @@ -173,7 +173,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati val largeMessageOffset = appendInfo.firstOffset.map(_.messageOffset).get // also add some messages with version 1 and version 2 to check that we handle mixed format versions correctly - props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_11_0_IV0.version) + props.put(LogConfig.MessageFormatVersionProp, IBP_0_11_0_IV0.version) log.updateConfig(new LogConfig(props)) val dupsV1 = writeDups(startKey = 30, numKeys = 40, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1) val dupsV2 = writeDups(startKey = 15, numKeys = 5, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V2) @@ -194,7 +194,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati val log = cleaner.logs.get(topicPartitions(0)) val props = logConfigProperties(maxMessageSize = maxMessageSize, segmentSize = 256) - props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_9_0.version) + props.put(LogConfig.MessageFormatVersionProp, IBP_0_9_0.version) log.updateConfig(new LogConfig(props)) // with compression enabled, these messages will be written as a single message containing @@ -202,7 +202,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati var appendsV0 = writeDupsSingleMessageSet(numKeys = 2, numDups = 3, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0) appendsV0 ++= writeDupsSingleMessageSet(numKeys = 2, startKey = 3, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V0) - props.put(LogConfig.MessageFormatVersionProp, KAFKA_0_10_0_IV1.version) + props.put(LogConfig.MessageFormatVersionProp, IBP_0_10_0_IV1.version) log.updateConfig(new LogConfig(props)) var appendsV1 = writeDupsSingleMessageSet(startKey = 4, numKeys = 2, numDups = 2, log = log, codec = codec, magicValue = RecordBatch.MAGIC_VALUE_V1) diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala index f72bb9282..838c043ff 100644 --- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala @@ -17,7 +17,6 @@ package kafka.log -import kafka.api.KAFKA_3_0_IV1 import kafka.server.{KafkaConfig, ThrottledReplicaListValidator} import kafka.utils.TestUtils import org.apache.kafka.common.config.ConfigDef.Importance.MEDIUM @@ -25,8 +24,10 @@ import org.apache.kafka.common.config.ConfigDef.Type.INT import org.apache.kafka.common.config.{ConfigException, TopicConfig} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test - import java.util.{Collections, Properties} + +import org.apache.kafka.server.common.MetadataVersion.IBP_3_0_IV1 + import scala.annotation.nowarn class LogConfigTest { @@ -65,7 +66,7 @@ class LogConfigTest { assertEquals(2 * millisInHour, logProps.get(LogConfig.SegmentJitterMsProp)) assertEquals(2 * millisInHour, logProps.get(LogConfig.RetentionMsProp)) // The message format version should always be 3.0 if the inter-broker protocol version is 3.0 or higher - assertEquals(KAFKA_3_0_IV1.version, logProps.get(LogConfig.MessageFormatVersionProp)) + assertEquals(IBP_3_0_IV1.version, logProps.get(LogConfig.MessageFormatVersionProp)) } @Test diff --git a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala index a6b114320..fba8e9bad 100644 --- a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala @@ -21,7 +21,7 @@ import java.io.{BufferedWriter, File, FileWriter} import java.nio.ByteBuffer import java.nio.file.{Files, NoSuchFileException, Paths} import java.util.Properties -import kafka.api.{ApiVersion, KAFKA_0_11_0_IV0} + import kafka.server.epoch.{EpochEntry, LeaderEpochFileCache} import kafka.server.{BrokerTopicStats, FetchDataInfo, KafkaConfig, LogDirFailureChannel} import kafka.server.metadata.MockConfigRepository @@ -29,6 +29,8 @@ import kafka.utils.{CoreUtils, MockTime, Scheduler, TestUtils} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.record.{CompressionType, ControlRecordType, DefaultRecordBatch, MemoryRecords, RecordBatch, RecordVersion, SimpleRecord, TimestampType} import org.apache.kafka.common.utils.{Time, Utils} +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.IBP_0_11_0_IV0 import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotEquals, assertThrows, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.ArgumentMatchers @@ -177,12 +179,12 @@ class LogLoaderTest { @Test def testProducerSnapshotsRecoveryAfterUncleanShutdownV1(): Unit = { - testProducerSnapshotsRecoveryAfterUncleanShutdown(ApiVersion.minSupportedFor(RecordVersion.V1).version) + testProducerSnapshotsRecoveryAfterUncleanShutdown(MetadataVersion.minSupportedFor(RecordVersion.V1).version) } @Test def testProducerSnapshotsRecoveryAfterUncleanShutdownCurrentMessageFormat(): Unit = { - testProducerSnapshotsRecoveryAfterUncleanShutdown(ApiVersion.latestVersion.version) + testProducerSnapshotsRecoveryAfterUncleanShutdown(MetadataVersion.latest.version) } private def createLog(dir: File, @@ -267,7 +269,7 @@ class LogLoaderTest { val expectedSegmentsWithReads = mutable.Set[Long]() val expectedSnapshotOffsets = mutable.Set[Long]() - if (logConfig.messageFormatVersion < KAFKA_0_11_0_IV0) { + if (logConfig.messageFormatVersion.isLessThan(IBP_0_11_0_IV0)) { expectedSegmentsWithReads += activeSegmentOffset expectedSnapshotOffsets ++= log.logSegments.map(_.baseOffset).toVector.takeRight(2) :+ log.logEndOffset } else { diff --git a/core/src/test/scala/unit/kafka/log/LogValidatorTest.scala b/core/src/test/scala/unit/kafka/log/LogValidatorTest.scala index 57a7073ff..0ffa1d484 100644 --- a/core/src/test/scala/unit/kafka/log/LogValidatorTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogValidatorTest.scala @@ -19,7 +19,6 @@ package kafka.log import java.nio.ByteBuffer import java.util.concurrent.TimeUnit -import kafka.api.{ApiVersion, KAFKA_2_0_IV1, KAFKA_2_3_IV1} import kafka.common.{LongRef, RecordValidationException} import kafka.log.LogValidator.ValidationAndOffsetAssignResult import kafka.message._ @@ -29,6 +28,7 @@ import org.apache.kafka.common.errors.{InvalidTimestampException, UnsupportedCom import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Time import org.apache.kafka.common.{InvalidRecordException, TopicPartition} +import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.test.TestUtils import org.junit.jupiter.api.Assertions._ @@ -127,7 +127,7 @@ class LogValidatorTest { 1000L, RecordBatch.NO_PRODUCER_EPOCH, origin = AppendOrigin.Client, - KAFKA_2_3_IV1, + MetadataVersion.IBP_2_3_IV1, brokerTopicStats, RequestLocal.withThreadConfinedCaching) } @@ -159,7 +159,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) val validatedRecords = validatedResults.validatedRecords @@ -199,7 +199,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) val validatedRecords = validatedResults.validatedRecords @@ -248,7 +248,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) val validatedRecords = validatedResults.validatedRecords @@ -311,7 +311,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) } @@ -356,7 +356,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = partitionLeaderEpoch, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) val validatedRecords = validatingResults.validatedRecords @@ -429,7 +429,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = partitionLeaderEpoch, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) val validatedRecords = validatingResults.validatedRecords @@ -486,7 +486,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) val validatedRecords = validatedResults.validatedRecords @@ -532,7 +532,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) val validatedRecords = validatedResults.validatedRecords @@ -590,7 +590,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = partitionLeaderEpoch, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) val validatedRecords = validatedResults.validatedRecords @@ -644,7 +644,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching)) } @@ -668,7 +668,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching)) } @@ -692,7 +692,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching)) } @@ -716,7 +716,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching)) } @@ -739,7 +739,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -762,7 +762,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -786,7 +786,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords checkOffsets(messageWithOffset, offset) @@ -811,7 +811,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords checkOffsets(messageWithOffset, offset) @@ -837,7 +837,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords checkOffsets(compressedMessagesWithOffset, offset) @@ -863,7 +863,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords checkOffsets(compressedMessagesWithOffset, offset) @@ -887,7 +887,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) checkOffsets(validatedResults.validatedRecords, offset) @@ -913,7 +913,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) checkOffsets(validatedResults.validatedRecords, offset) @@ -939,7 +939,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) checkOffsets(validatedResults.validatedRecords, offset) @@ -965,7 +965,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) checkOffsets(validatedResults.validatedRecords, offset) @@ -991,7 +991,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching)) } @@ -1014,7 +1014,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Coordinator, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) val batches = TestUtils.toList(result.validatedRecords.batches) @@ -1042,7 +1042,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -1066,7 +1066,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -1089,7 +1089,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -1112,7 +1112,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -1136,7 +1136,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -1160,7 +1160,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -1186,7 +1186,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching)) } @@ -1212,7 +1212,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching)) } @@ -1236,7 +1236,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -1260,7 +1260,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching).validatedRecords, offset) } @@ -1282,7 +1282,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) ) @@ -1313,7 +1313,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = KAFKA_2_0_IV1, + interBrokerProtocolVersion = MetadataVersion.IBP_2_0_IV1, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching)) } @@ -1348,7 +1348,7 @@ class LogValidatorTest { timestampDiffMaxMs = 1000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching) ) @@ -1427,7 +1427,7 @@ class LogValidatorTest { timestampDiffMaxMs = 5000L, partitionLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH, origin = AppendOrigin.Client, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, brokerTopicStats = brokerTopicStats, requestLocal = RequestLocal.withThreadConfinedCaching)) } diff --git a/core/src/test/scala/unit/kafka/security/auth/ZkAuthorizationTest.scala b/core/src/test/scala/unit/kafka/security/auth/ZkAuthorizationTest.scala index 3bbce4d56..3c35fd7d4 100644 --- a/core/src/test/scala/unit/kafka/security/auth/ZkAuthorizationTest.scala +++ b/core/src/test/scala/unit/kafka/security/auth/ZkAuthorizationTest.scala @@ -18,6 +18,7 @@ package kafka.security.auth import java.nio.charset.StandardCharsets + import kafka.admin.ZkSecurityMigrator import kafka.server.QuorumTestHarness import kafka.utils.{Logging, TestUtils} @@ -30,12 +31,12 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import scala.util.{Failure, Success, Try} import javax.security.auth.login.Configuration -import kafka.api.ApiVersion import kafka.cluster.{Broker, EndPoint} import kafka.controller.ReplicaAssignment import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.Time +import org.apache.kafka.server.common.MetadataVersion import org.apache.zookeeper.client.ZKClientConfig import scala.jdk.CollectionConverters._ @@ -136,7 +137,7 @@ class ZkAuthorizationTest extends QuorumTestHarness with Logging { private def createBrokerInfo(id: Int, host: String, port: Int, securityProtocol: SecurityProtocol, rack: Option[String] = None): BrokerInfo = BrokerInfo(Broker(id, Seq(new EndPoint(host, port, ListenerName.forSecurityProtocol - (securityProtocol), securityProtocol)), rack = rack), ApiVersion.latestVersion, jmxPort = port + 10) + (securityProtocol), securityProtocol)), rack = rack), MetadataVersion.latest, jmxPort = port + 10) private def newKafkaZkClient(connectionString: String, isSecure: Boolean) = KafkaZkClient(connectionString, isSecure, 6000, 6000, Int.MaxValue, Time.SYSTEM, "ZkAuthorizationTest", diff --git a/core/src/test/scala/unit/kafka/security/authorizer/AclAuthorizerTest.scala b/core/src/test/scala/unit/kafka/security/authorizer/AclAuthorizerTest.scala index 9011eb616..4b33d3e25 100644 --- a/core/src/test/scala/unit/kafka/security/authorizer/AclAuthorizerTest.scala +++ b/core/src/test/scala/unit/kafka/security/authorizer/AclAuthorizerTest.scala @@ -22,8 +22,8 @@ import java.nio.charset.StandardCharsets.UTF_8 import java.nio.file.Files import java.util.{Collections, UUID} import java.util.concurrent.{Executors, Semaphore, TimeUnit} + import kafka.Kafka -import kafka.api.{ApiVersion, KAFKA_2_0_IV0, KAFKA_2_0_IV1} import kafka.security.authorizer.AclEntry.{WildcardHost, WildcardPrincipalString} import kafka.server.{KafkaConfig, QuorumTestHarness} import kafka.utils.TestUtils @@ -42,6 +42,8 @@ import org.apache.kafka.common.resource.PatternType.{LITERAL, MATCH, PREFIXED} import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.server.authorizer._ import org.apache.kafka.common.utils.{Time, SecurityUtils => JSecurityUtils} +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_2_0_IV0, IBP_2_0_IV1} import org.apache.zookeeper.client.ZKClientConfig import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} @@ -714,7 +716,7 @@ class AclAuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { @Test def testThrowsOnAddPrefixedAclIfInterBrokerProtocolVersionTooLow(): Unit = { - givenAuthorizerWithProtocolVersion(Option(KAFKA_2_0_IV0)) + givenAuthorizerWithProtocolVersion(Option(IBP_2_0_IV0)) val e = assertThrows(classOf[ApiException], () => addAcls(aclAuthorizer, Set(denyReadAcl), new ResourcePattern(TOPIC, "z_other", PREFIXED))) assertTrue(e.getCause.isInstanceOf[UnsupportedVersionException], s"Unexpected exception $e") @@ -736,7 +738,7 @@ class AclAuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { @Test def testWritesExtendedAclChangeEventWhenInterBrokerProtocolAtLeastKafkaV2(): Unit = { - givenAuthorizerWithProtocolVersion(Option(KAFKA_2_0_IV1)) + givenAuthorizerWithProtocolVersion(Option(IBP_2_0_IV1)) val resource = new ResourcePattern(TOPIC, "z_other", PREFIXED) val expected = new String(ZkAclStore(PREFIXED).changeStore .createChangeNode(resource).bytes, UTF_8) @@ -750,7 +752,7 @@ class AclAuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { @Test def testWritesLiteralWritesLiteralAclChangeEventWhenInterBrokerProtocolLessThanKafkaV2eralAclChangesForOlderProtocolVersions(): Unit = { - givenAuthorizerWithProtocolVersion(Option(KAFKA_2_0_IV0)) + givenAuthorizerWithProtocolVersion(Option(IBP_2_0_IV0)) val resource = new ResourcePattern(TOPIC, "z_other", LITERAL) val expected = new String(ZkAclStore(LITERAL).changeStore .createChangeNode(resource).bytes, UTF_8) @@ -764,7 +766,7 @@ class AclAuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { @Test def testWritesLiteralAclChangeEventWhenInterBrokerProtocolIsKafkaV2(): Unit = { - givenAuthorizerWithProtocolVersion(Option(KAFKA_2_0_IV1)) + givenAuthorizerWithProtocolVersion(Option(IBP_2_0_IV1)) val resource = new ResourcePattern(TOPIC, "z_other", LITERAL) val expected = new String(ZkAclStore(LITERAL).changeStore .createChangeNode(resource).bytes, UTF_8) @@ -994,7 +996,7 @@ class AclAuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { } } - private def givenAuthorizerWithProtocolVersion(protocolVersion: Option[ApiVersion]): Unit = { + private def givenAuthorizerWithProtocolVersion(protocolVersion: Option[MetadataVersion]): Unit = { aclAuthorizer.close() val props = TestUtils.createBrokerConfig(0, zkConnect) diff --git a/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala b/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala index d4a1b3566..e3c55c53f 100644 --- a/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala @@ -19,9 +19,7 @@ package kafka.server import java.util.Collections import java.util.stream.{Stream => JStream} -import kafka.api.ApiVersion -import kafka.api.KAFKA_2_7_IV2 -import kafka.api.KAFKA_3_2_IV0 + import kafka.api.LeaderAndIsr import kafka.utils.{MockScheduler, MockTime} import kafka.zk.KafkaZkClient @@ -33,6 +31,8 @@ import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.{AbstractRequest, AlterPartitionRequest, AlterPartitionResponse} import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_2_7_IV2, IBP_3_2_IV0} import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.BeforeEach @@ -43,6 +43,7 @@ import org.junit.jupiter.params.provider.MethodSource import org.mockito.ArgumentMatchers.{any, anyString} import org.mockito.Mockito.{mock, reset, times, verify} import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} + import scala.jdk.CollectionConverters._ class AlterPartitionManagerTest { @@ -64,10 +65,10 @@ class AlterPartitionManagerTest { } @ParameterizedTest - @MethodSource(Array("provideApiVersions")) - def testBasic(apiVersion: ApiVersion): Unit = { + @MethodSource(Array("provideMetadataVersions")) + def testBasic(metadataVersion: MetadataVersion): Unit = { val scheduler = new MockScheduler(time) - val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, apiVersion) + val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, metadataVersion) alterIsrManager.start() alterIsrManager.submit(tp0, new LeaderAndIsr(1, 1, List(1,2,3), LeaderRecoveryState.RECOVERED, 10), 0) verify(brokerToController).start() @@ -77,31 +78,31 @@ class AlterPartitionManagerTest { @ParameterizedTest @MethodSource(Array("provideLeaderRecoveryState")) def testBasicSentLeaderRecoveryState( - apiVersion: ApiVersion, + metadataVersion: MetadataVersion, leaderRecoveryState: LeaderRecoveryState ): Unit = { val requestCapture = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[AlterPartitionRequest]]) val scheduler = new MockScheduler(time) - val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, apiVersion) + val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, metadataVersion) alterIsrManager.start() alterIsrManager.submit(tp0, new LeaderAndIsr(1, 1, List(1), leaderRecoveryState, 10), 0) verify(brokerToController).start() verify(brokerToController).sendRequest(requestCapture.capture(), any()) val request = requestCapture.getValue.build() - val expectedLeaderRecoveryState = if (apiVersion >= KAFKA_3_2_IV0) leaderRecoveryState else LeaderRecoveryState.RECOVERED + val expectedLeaderRecoveryState = if (metadataVersion.isAtLeast(IBP_3_2_IV0)) leaderRecoveryState else LeaderRecoveryState.RECOVERED assertEquals(expectedLeaderRecoveryState.value, request.data.topics.get(0).partitions.get(0).leaderRecoveryState()) } @ParameterizedTest - @MethodSource(Array("provideApiVersions")) - def testOverwriteWithinBatch(apiVersion: ApiVersion): Unit = { + @MethodSource(Array("provideMetadataVersions")) + def testOverwriteWithinBatch(metadataVersion: MetadataVersion): Unit = { val capture: ArgumentCaptor[AbstractRequest.Builder[AlterPartitionRequest]] = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[AlterPartitionRequest]]) val callbackCapture: ArgumentCaptor[ControllerRequestCompletionHandler] = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) val scheduler = new MockScheduler(time) - val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, apiVersion) + val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, metadataVersion) alterIsrManager.start() // Only send one ISR update for a given topic+partition @@ -133,13 +134,13 @@ class AlterPartitionManagerTest { } @ParameterizedTest - @MethodSource(Array("provideApiVersions")) - def testSingleBatch(apiVersion: ApiVersion): Unit = { + @MethodSource(Array("provideMetadataVersions")) + def testSingleBatch(metadataVersion: MetadataVersion): Unit = { val capture: ArgumentCaptor[AbstractRequest.Builder[AlterPartitionRequest]] = ArgumentCaptor.forClass(classOf[AbstractRequest.Builder[AlterPartitionRequest]]) val callbackCapture: ArgumentCaptor[ControllerRequestCompletionHandler] = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) val scheduler = new MockScheduler(time) - val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, apiVersion) + val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, metadataVersion) alterIsrManager.start() // First request will send batch of one @@ -209,7 +210,7 @@ class AlterPartitionManagerTest { val callbackCapture: ArgumentCaptor[ControllerRequestCompletionHandler] = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) val scheduler = new MockScheduler(time) - val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, KAFKA_3_2_IV0) + val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, IBP_3_2_IV0) alterIsrManager.start() alterIsrManager.submit(tp0, leaderAndIsr, 0) @@ -269,7 +270,7 @@ class AlterPartitionManagerTest { reset(brokerToController) val scheduler = new MockScheduler(time) - val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, KAFKA_3_2_IV0) + val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, IBP_3_2_IV0) alterIsrManager.start() val future = alterIsrManager.submit(tp, LeaderAndIsr(1, 1, List(1,2,3), LeaderRecoveryState.RECOVERED, 10), 0) @@ -288,12 +289,12 @@ class AlterPartitionManagerTest { } @ParameterizedTest - @MethodSource(Array("provideApiVersions")) - def testOneInFlight(apiVersion: ApiVersion): Unit = { + @MethodSource(Array("provideMetadataVersions")) + def testOneInFlight(metadataVersion: MetadataVersion): Unit = { val callbackCapture: ArgumentCaptor[ControllerRequestCompletionHandler] = ArgumentCaptor.forClass(classOf[ControllerRequestCompletionHandler]) val scheduler = new MockScheduler(time) - val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, apiVersion) + val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => 2, metadataVersion) alterIsrManager.start() // First submit will send the request @@ -316,13 +317,13 @@ class AlterPartitionManagerTest { } @ParameterizedTest - @MethodSource(Array("provideApiVersions")) - def testPartitionMissingInResponse(apiVersion: ApiVersion): Unit = { + @MethodSource(Array("provideMetadataVersions")) + def testPartitionMissingInResponse(metadataVersion: MetadataVersion): Unit = { brokerToController = Mockito.mock(classOf[BrokerToControllerChannelManager]) val brokerEpoch = 2 val scheduler = new MockScheduler(time) - val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => brokerEpoch, apiVersion) + val alterIsrManager = new DefaultAlterPartitionManager(brokerToController, scheduler, time, brokerId, () => brokerEpoch, metadataVersion) alterIsrManager.start() def matchesAlterIsr(topicPartitions: Set[TopicPartition]): AbstractRequest.Builder[_ <: AbstractRequest] = { @@ -422,21 +423,21 @@ class AlterPartitionManagerTest { } object AlterPartitionManagerTest { - def provideApiVersions(): JStream[ApiVersion] = { + def provideMetadataVersions(): JStream[MetadataVersion] = { JStream.of( // Supports KIP-704: unclean leader recovery - KAFKA_3_2_IV0, + IBP_3_2_IV0, // Supports KIP-497: alter partition - KAFKA_2_7_IV2 + IBP_2_7_IV2 ) } def provideLeaderRecoveryState(): JStream[Arguments] = { - // Multiply apiVersions by leaderRecoveryState - provideApiVersions().flatMap { apiVersion => + // Multiply metadataVersions by leaderRecoveryState + provideMetadataVersions().flatMap { metadataVersion => JStream.of( - Arguments.of(apiVersion, LeaderRecoveryState.RECOVERED), - Arguments.of(apiVersion, LeaderRecoveryState.RECOVERING) + Arguments.of(metadataVersion, LeaderRecoveryState.RECOVERED), + Arguments.of(metadataVersion, LeaderRecoveryState.RECOVERING) ) } } diff --git a/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala b/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala index 8f8a7a0e3..e1a010e32 100644 --- a/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala @@ -16,10 +16,10 @@ */ package kafka.server -import kafka.api.ApiVersion import org.apache.kafka.clients.NodeApiVersions import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.protocol.ApiKeys +import org.apache.kafka.server.common.MetadataVersion import org.junit.jupiter.api.{Disabled, Test} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest @@ -37,7 +37,7 @@ class ApiVersionManagerTest { def testApiScope(apiScope: ListenerType): Unit = { val versionManager = new DefaultApiVersionManager( listenerType = apiScope, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, forwardingManager = None, features = brokerFeatures, featureCache = featureCache @@ -61,7 +61,7 @@ class ApiVersionManagerTest { val versionManager = new DefaultApiVersionManager( listenerType = ListenerType.ZK_BROKER, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, forwardingManager = Some(forwardingManager), features = brokerFeatures, featureCache = featureCache @@ -82,7 +82,7 @@ class ApiVersionManagerTest { for (forwardingManagerOpt <- Seq(Some(forwardingManager), None)) { val versionManager = new DefaultApiVersionManager( listenerType = ListenerType.BROKER, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, forwardingManager = forwardingManagerOpt, features = brokerFeatures, featureCache = featureCache @@ -104,7 +104,7 @@ class ApiVersionManagerTest { val versionManager = new DefaultApiVersionManager( listenerType = ListenerType.ZK_BROKER, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, forwardingManager = Some(forwardingManager), features = brokerFeatures, featureCache = featureCache @@ -123,7 +123,7 @@ class ApiVersionManagerTest { def testEnvelopeDisabledWhenForwardingManagerEmpty(): Unit = { val versionManager = new DefaultApiVersionManager( listenerType = ListenerType.ZK_BROKER, - interBrokerProtocolVersion = ApiVersion.latestVersion, + interBrokerProtocolVersion = MetadataVersion.latest, forwardingManager = None, features = brokerFeatures, featureCache = featureCache diff --git a/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala b/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala index 496f4dbcd..875b7605b 100644 --- a/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala +++ b/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala @@ -16,7 +16,6 @@ */ package kafka.server -import kafka.api.KAFKA_3_0_IV1 import java.net.InetAddress import java.nio.charset.StandardCharsets import java.util @@ -42,6 +41,7 @@ import org.apache.kafka.common.quota.ClientQuotaEntity.{CLIENT_ID, IP, USER} import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity} import org.apache.kafka.common.record.{CompressionType, RecordVersion} import org.apache.kafka.common.security.auth.KafkaPrincipal +import org.apache.kafka.server.common.MetadataVersion.IBP_3_0_IV1 import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest @@ -154,7 +154,7 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { "Topic metadata propagation failed") val log = server.logManager.getLog(tp).get // message format version should always be 3.0 if inter-broker protocol is 3.0 or higher - assertEquals(KAFKA_3_0_IV1, log.config.messageFormatVersion) + assertEquals(IBP_3_0_IV1, log.config.messageFormatVersion) assertEquals(RecordVersion.V2, log.config.recordVersion) val compressionType = CompressionType.LZ4.name @@ -165,7 +165,7 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { TestUtils.waitUntilTrue(() => server.logManager.getLog(tp).get.config.compressionType == compressionType, "Topic config change propagation failed") - assertEquals(KAFKA_3_0_IV1, log.config.messageFormatVersion) + assertEquals(IBP_3_0_IV1, log.config.messageFormatVersion) assertEquals(RecordVersion.V2, log.config.recordVersion) } diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestWithLegacyMessageFormatTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestWithLegacyMessageFormatTest.scala index 2f78b9d10..82e0449c8 100644 --- a/core/src/test/scala/unit/kafka/server/FetchRequestWithLegacyMessageFormatTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchRequestWithLegacyMessageFormatTest.scala @@ -16,16 +16,16 @@ */ package kafka.server -import kafka.api.KAFKA_0_10_2_IV0 import kafka.log.LogConfig import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{FetchRequest, FetchResponse} import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} import org.junit.jupiter.api.Test - import java.util.Properties +import org.apache.kafka.server.common.MetadataVersion.IBP_0_10_2_IV0 + import scala.annotation.nowarn import scala.collection.Seq import scala.jdk.CollectionConverters._ @@ -49,7 +49,7 @@ class FetchRequestWithLegacyMessageFormatTest extends BaseFetchRequestTest { val maxPartitionBytes = 200 // Fetch v2 down-converts if the message format is >= 0.11 and we want to avoid // that as it affects the size of the returned buffer - val topicConfig = Map(LogConfig.MessageFormatVersionProp -> KAFKA_0_10_2_IV0.version) + val topicConfig = Map(LogConfig.MessageFormatVersionProp -> IBP_0_10_2_IV0.version) val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1, topicConfig).head val topicIds = getTopicIds().asJava val topicNames = topicIds.asScala.map(_.swap).asJava diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index c421d9b8d..972fc2179 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -23,7 +23,8 @@ import java.util import java.util.Arrays.asList import java.util.concurrent.TimeUnit import java.util.{Collections, Optional, Properties, Random} -import kafka.api.{ApiVersion, KAFKA_0_10_2_IV0, KAFKA_2_2_IV1, LeaderAndIsr} + +import kafka.api.LeaderAndIsr import kafka.cluster.Broker import kafka.controller.{ControllerContext, KafkaController} import kafka.coordinator.group.GroupCoordinatorConcurrencyTest.{JoinGroupCallback, SyncGroupCallback} @@ -91,8 +92,9 @@ import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} import scala.collection.{Map, Seq, mutable} import scala.jdk.CollectionConverters._ - import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_0_10_2_IV0, IBP_2_2_IV1} class KafkaApisTest { private val requestChannel: RequestChannel = mock(classOf[RequestChannel]) @@ -132,7 +134,7 @@ class KafkaApisTest { metrics.close() } - def createKafkaApis(interBrokerProtocolVersion: ApiVersion = ApiVersion.latestVersion, + def createKafkaApis(interBrokerProtocolVersion: MetadataVersion = MetadataVersion.latest, authorizer: Option[Authorizer] = None, enableForwarding: Boolean = false, configRepository: ConfigRepository = new MockConfigRepository(), @@ -1649,31 +1651,31 @@ class KafkaApisTest { @Test def shouldThrowUnsupportedVersionExceptionOnHandleAddOffsetToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = { assertThrows(classOf[UnsupportedVersionException], - () => createKafkaApis(KAFKA_0_10_2_IV0).handleAddOffsetsToTxnRequest(null, RequestLocal.withThreadConfinedCaching)) + () => createKafkaApis(IBP_0_10_2_IV0).handleAddOffsetsToTxnRequest(null, RequestLocal.withThreadConfinedCaching)) } @Test def shouldThrowUnsupportedVersionExceptionOnHandleAddPartitionsToTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = { assertThrows(classOf[UnsupportedVersionException], - () => createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null, RequestLocal.withThreadConfinedCaching)) + () => createKafkaApis(IBP_0_10_2_IV0).handleAddPartitionToTxnRequest(null, RequestLocal.withThreadConfinedCaching)) } @Test def shouldThrowUnsupportedVersionExceptionOnHandleTxnOffsetCommitRequestWhenInterBrokerProtocolNotSupported(): Unit = { assertThrows(classOf[UnsupportedVersionException], - () => createKafkaApis(KAFKA_0_10_2_IV0).handleAddPartitionToTxnRequest(null, RequestLocal.withThreadConfinedCaching)) + () => createKafkaApis(IBP_0_10_2_IV0).handleAddPartitionToTxnRequest(null, RequestLocal.withThreadConfinedCaching)) } @Test def shouldThrowUnsupportedVersionExceptionOnHandleEndTxnRequestWhenInterBrokerProtocolNotSupported(): Unit = { assertThrows(classOf[UnsupportedVersionException], - () => createKafkaApis(KAFKA_0_10_2_IV0).handleEndTxnRequest(null, RequestLocal.withThreadConfinedCaching)) + () => createKafkaApis(IBP_0_10_2_IV0).handleEndTxnRequest(null, RequestLocal.withThreadConfinedCaching)) } @Test def shouldThrowUnsupportedVersionExceptionOnHandleWriteTxnMarkersRequestWhenInterBrokerProtocolNotSupported(): Unit = { assertThrows(classOf[UnsupportedVersionException], - () => createKafkaApis(KAFKA_0_10_2_IV0).handleWriteTxnMarkersRequest(null, RequestLocal.withThreadConfinedCaching)) + () => createKafkaApis(IBP_0_10_2_IV0).handleWriteTxnMarkersRequest(null, RequestLocal.withThreadConfinedCaching)) } @Test @@ -2782,7 +2784,7 @@ class KafkaApisTest { val requestChannelRequest = buildRequest(joinGroupRequest) - createKafkaApis(KAFKA_2_2_IV1).handleJoinGroupRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) + createKafkaApis(IBP_2_2_IV1).handleJoinGroupRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) val capturedResponse = verifyNoThrottling(requestChannelRequest) val response = capturedResponse.getValue.asInstanceOf[JoinGroupResponse] @@ -2801,7 +2803,7 @@ class KafkaApisTest { val requestChannelRequest = buildRequest(syncGroupRequest) - createKafkaApis(KAFKA_2_2_IV1).handleSyncGroupRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) + createKafkaApis(IBP_2_2_IV1).handleSyncGroupRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) val capturedResponse = verifyNoThrottling(requestChannelRequest) val response = capturedResponse.getValue.asInstanceOf[SyncGroupResponse] @@ -2819,7 +2821,7 @@ class KafkaApisTest { ).build() val requestChannelRequest = buildRequest(heartbeatRequest) - createKafkaApis(KAFKA_2_2_IV1).handleHeartbeatRequest(requestChannelRequest) + createKafkaApis(IBP_2_2_IV1).handleHeartbeatRequest(requestChannelRequest) val capturedResponse = verifyNoThrottling(requestChannelRequest) val response = capturedResponse.getValue.asInstanceOf[HeartbeatResponse] @@ -2849,7 +2851,7 @@ class KafkaApisTest { val requestChannelRequest = buildRequest(offsetCommitRequest) - createKafkaApis(KAFKA_2_2_IV1).handleOffsetCommitRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) + createKafkaApis(IBP_2_2_IV1).handleOffsetCommitRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) val expectedTopicErrors = Collections.singletonList( new OffsetCommitResponseData.OffsetCommitResponseTopic() @@ -2991,7 +2993,7 @@ class KafkaApisTest { val requestChannelRequest = buildRequest(initProducerIdRequest) - createKafkaApis(KAFKA_2_2_IV1).handleInitProducerIdRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) + createKafkaApis(IBP_2_2_IV1).handleInitProducerIdRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) val capturedResponse = verifyNoThrottling(requestChannelRequest) val response = capturedResponse.getValue.asInstanceOf[InitProducerIdResponse] @@ -3009,7 +3011,7 @@ class KafkaApisTest { ).build() val requestChannelRequest = buildRequest(initProducerIdRequest) - createKafkaApis(KAFKA_2_2_IV1).handleInitProducerIdRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) + createKafkaApis(IBP_2_2_IV1).handleInitProducerIdRequest(requestChannelRequest, RequestLocal.withThreadConfinedCaching) val capturedResponse = verifyNoThrottling(requestChannelRequest) val response = capturedResponse.getValue.asInstanceOf[InitProducerIdResponse] diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala index ed31dba41..3b433f6dd 100755 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala @@ -17,7 +17,6 @@ package kafka.server -import kafka.api.{ApiVersion, KAFKA_0_8_2, KAFKA_3_0_IV1} import kafka.cluster.EndPoint import kafka.log.LogConfig import kafka.message._ @@ -32,11 +31,13 @@ import org.apache.kafka.raft.RaftConfig import org.apache.kafka.raft.RaftConfig.{AddressSpec, InetAddressSpec, UNKNOWN_ADDRESS_SPEC_INSTANCE} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test - import java.net.InetSocketAddress import java.util import java.util.{Collections, Properties} + import org.apache.kafka.common.Node +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.{IBP_0_8_2, IBP_3_0_IV1} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.junit.jupiter.api.function.Executable @@ -547,23 +548,23 @@ class KafkaConfigTest { props.put(KafkaConfig.BrokerIdProp, "1") props.put(KafkaConfig.ZkConnectProp, "localhost:2181") val conf = KafkaConfig.fromProps(props) - assertEquals(ApiVersion.latestVersion, conf.interBrokerProtocolVersion) + assertEquals(MetadataVersion.latest, conf.interBrokerProtocolVersion) props.put(KafkaConfig.InterBrokerProtocolVersionProp, "0.8.2.0") // We need to set the message format version to make the configuration valid. props.put(KafkaConfig.LogMessageFormatVersionProp, "0.8.2.0") val conf2 = KafkaConfig.fromProps(props) - assertEquals(KAFKA_0_8_2, conf2.interBrokerProtocolVersion) + assertEquals(IBP_0_8_2, conf2.interBrokerProtocolVersion) // check that 0.8.2.0 is the same as 0.8.2.1 props.put(KafkaConfig.InterBrokerProtocolVersionProp, "0.8.2.1") // We need to set the message format version to make the configuration valid props.put(KafkaConfig.LogMessageFormatVersionProp, "0.8.2.1") val conf3 = KafkaConfig.fromProps(props) - assertEquals(KAFKA_0_8_2, conf3.interBrokerProtocolVersion) + assertEquals(IBP_0_8_2, conf3.interBrokerProtocolVersion) //check that latest is newer than 0.8.2 - assertTrue(ApiVersion.latestVersion >= conf3.interBrokerProtocolVersion) + assertTrue(MetadataVersion.latest.isAtLeast(conf3.interBrokerProtocolVersion)) } private def isValidKafkaConfig(props: Properties): Boolean = { @@ -690,20 +691,20 @@ class KafkaConfigTest { @nowarn("cat=deprecation") @Test def testInterBrokerVersionMessageFormatCompatibility(): Unit = { - def buildConfig(interBrokerProtocol: ApiVersion, messageFormat: ApiVersion): KafkaConfig = { + def buildConfig(interBrokerProtocol: MetadataVersion, messageFormat: MetadataVersion): KafkaConfig = { val props = TestUtils.createBrokerConfig(0, TestUtils.MockZkConnect, port = 8181) props.put(KafkaConfig.InterBrokerProtocolVersionProp, interBrokerProtocol.version) props.put(KafkaConfig.LogMessageFormatVersionProp, messageFormat.version) KafkaConfig.fromProps(props) } - ApiVersion.allVersions.foreach { interBrokerVersion => - ApiVersion.allVersions.foreach { messageFormatVersion => - if (interBrokerVersion.recordVersion.value >= messageFormatVersion.recordVersion.value) { + MetadataVersion.VALUES.foreach { interBrokerVersion => + MetadataVersion.VALUES.foreach { messageFormatVersion => + if (interBrokerVersion.highestSupportedRecordVersion.value >= messageFormatVersion.highestSupportedRecordVersion.value) { val config = buildConfig(interBrokerVersion, messageFormatVersion) assertEquals(interBrokerVersion, config.interBrokerProtocolVersion) - if (interBrokerVersion >= KAFKA_3_0_IV1) - assertEquals(KAFKA_3_0_IV1, config.logMessageFormatVersion) + if (interBrokerVersion.isAtLeast(IBP_3_0_IV1)) + assertEquals(IBP_3_0_IV1, config.logMessageFormatVersion) else assertEquals(messageFormatVersion, config.logMessageFormatVersion) } else { diff --git a/core/src/test/scala/unit/kafka/server/KafkaServerTest.scala b/core/src/test/scala/unit/kafka/server/KafkaServerTest.scala index 89cbd04fe..5a84820bf 100755 --- a/core/src/test/scala/unit/kafka/server/KafkaServerTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaServerTest.scala @@ -17,14 +17,14 @@ package kafka.server -import kafka.api.ApiVersion import kafka.utils.TestUtils import org.apache.kafka.common.security.JaasUtils import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertThrows, fail} import org.junit.jupiter.api.Test - import java.util.Properties +import org.apache.kafka.server.common.MetadataVersion + class KafkaServerTest extends QuorumTestHarness { @Test @@ -126,7 +126,7 @@ class KafkaServerTest extends QuorumTestHarness { @Test def testAlterIsrManager(): Unit = { val props = TestUtils.createBrokerConfigs(1, zkConnect).head - props.put(KafkaConfig.InterBrokerProtocolVersionProp, ApiVersion.latestVersion.toString) + props.put(KafkaConfig.InterBrokerProtocolVersionProp, MetadataVersion.latest.toString) val server = TestUtils.createServer(KafkaConfig.fromProps(props)) server.replicaManager.alterPartitionManager match { diff --git a/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala index efbb0157c..3d897b81d 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala @@ -16,7 +16,6 @@ */ package kafka.server -import kafka.api.{ApiVersion, KAFKA_2_6_IV0} import kafka.cluster.{BrokerEndPoint, Partition} import kafka.log.{LogAppendInfo, LogManager, UnifiedLog} import kafka.server.AbstractFetcherThread.ResultWithPartitions @@ -40,10 +39,13 @@ import org.junit.jupiter.api.{AfterEach, Test} import org.mockito.ArgumentCaptor import org.mockito.ArgumentMatchers.{any, anyBoolean, anyLong} import org.mockito.Mockito.{mock, never, times, verify, when} - import java.nio.charset.StandardCharsets import java.util import java.util.{Collections, Optional} + +import org.apache.kafka.server.common.MetadataVersion +import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 + import scala.collection.{Map, mutable} import scala.jdk.CollectionConverters._ @@ -250,15 +252,15 @@ class ReplicaFetcherThreadTest { @Test def shouldFetchLeaderEpochOnFirstFetchOnlyIfLeaderEpochKnownToBothIbp26(): Unit = { - verifyFetchLeaderEpochOnFirstFetch(KAFKA_2_6_IV0) + verifyFetchLeaderEpochOnFirstFetch(IBP_2_6_IV0) } @Test def shouldNotFetchLeaderEpochOnFirstFetchWithTruncateOnFetch(): Unit = { - verifyFetchLeaderEpochOnFirstFetch(ApiVersion.latestVersion, epochFetchCount = 0) + verifyFetchLeaderEpochOnFirstFetch(MetadataVersion.latest, epochFetchCount = 0) } - private def verifyFetchLeaderEpochOnFirstFetch(ibp: ApiVersion, epochFetchCount: Int = 1): Unit = { + private def verifyFetchLeaderEpochOnFirstFetch(ibp: MetadataVersion, epochFetchCount: Int = 1): Unit = { val props = TestUtils.createBrokerConfig(1, "localhost:1234") props.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, ibp.version) val config = KafkaConfig.fromProps(props) @@ -1074,7 +1076,7 @@ class ReplicaFetcherThreadTest { private def kafkaConfigNoTruncateOnFetch: KafkaConfig = { val props = TestUtils.createBrokerConfig(1, "localhost:1234") - props.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, KAFKA_2_6_IV0.version) + props.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, IBP_2_6_IV0.version) KafkaConfig.fromProps(props) } } diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index 6c17503a6..dd005b091 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -54,6 +54,7 @@ import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.image.{AclsImage, ClientQuotasImage, ClusterImageTest, ConfigurationsImage, FeaturesImage, MetadataImage, ProducerIdsImage, TopicsDelta, TopicsImage} import org.apache.kafka.raft.{OffsetAndEpoch => RaftOffsetAndEpoch} +import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.junit.jupiter.params.ParameterizedTest @@ -1084,7 +1085,7 @@ class ReplicaManagerTest { @Test def testBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdateIbp26(): Unit = { val extraProps = new Properties - extraProps.put(KafkaConfig.InterBrokerProtocolVersionProp, KAFKA_2_6_IV0.version) + extraProps.put(KafkaConfig.InterBrokerProtocolVersionProp, IBP_2_6_IV0.version) verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(extraProps, expectTruncation = true) } diff --git a/core/src/test/scala/unit/kafka/server/TopicIdWithOldInterBrokerProtocolTest.scala b/core/src/test/scala/unit/kafka/server/TopicIdWithOldInterBrokerProtocolTest.scala index 7a844fa27..25c7c7bd0 100644 --- a/core/src/test/scala/unit/kafka/server/TopicIdWithOldInterBrokerProtocolTest.scala +++ b/core/src/test/scala/unit/kafka/server/TopicIdWithOldInterBrokerProtocolTest.scala @@ -19,7 +19,6 @@ package kafka.server import java.util.{Arrays, LinkedHashMap, Optional, Properties} -import kafka.api.KAFKA_2_7_IV0 import kafka.network.SocketServer import kafka.utils.TestUtils import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} @@ -27,6 +26,7 @@ import org.apache.kafka.common.message.DeleteTopicsRequestData import org.apache.kafka.common.message.DeleteTopicsRequestData.DeleteTopicState import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.{DeleteTopicsRequest, DeleteTopicsResponse, FetchRequest, FetchResponse, MetadataRequest, MetadataResponse} +import org.apache.kafka.server.common.MetadataVersion.{IBP_2_7_IV0} import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} @@ -36,7 +36,7 @@ import scala.jdk.CollectionConverters._ class TopicIdWithOldInterBrokerProtocolTest extends BaseRequestTest { override def brokerPropertyOverrides(properties: Properties): Unit = { - properties.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, KAFKA_2_7_IV0.toString) + properties.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, IBP_2_7_IV0.toString) properties.setProperty(KafkaConfig.OffsetsTopicPartitionsProp, "1") properties.setProperty(KafkaConfig.DefaultReplicationFactorProp, "2") properties.setProperty(KafkaConfig.RackProp, s"rack/${properties.getProperty(KafkaConfig.BrokerIdProp)}") diff --git a/core/src/test/scala/unit/kafka/server/UpdateFeaturesTest.scala b/core/src/test/scala/unit/kafka/server/UpdateFeaturesTest.scala index a7085850b..a84082b55 100644 --- a/core/src/test/scala/unit/kafka/server/UpdateFeaturesTest.scala +++ b/core/src/test/scala/unit/kafka/server/UpdateFeaturesTest.scala @@ -20,7 +20,6 @@ package kafka.server import java.util.{Optional, Properties} import java.util.concurrent.ExecutionException -import kafka.api.KAFKA_2_7_IV0 import kafka.utils.TestUtils import kafka.zk.{FeatureZNode, FeatureZNodeStatus, ZkVersion} import kafka.utils.TestUtils.waitUntilTrue @@ -33,8 +32,9 @@ import org.apache.kafka.common.message.UpdateFeaturesRequestData.FeatureUpdateKe import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{UpdateFeaturesRequest, UpdateFeaturesResponse} import org.apache.kafka.common.utils.Utils +import org.apache.kafka.server.common.MetadataVersion.IBP_2_7_IV0 import org.junit.jupiter.api.Test -import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotEquals, assertNotNull, assertTrue, assertThrows} +import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotEquals, assertNotNull, assertThrows, assertTrue} import scala.jdk.CollectionConverters._ import scala.reflect.ClassTag @@ -45,7 +45,7 @@ class UpdateFeaturesTest extends BaseRequestTest { override def brokerCount = 3 override def brokerPropertyOverrides(props: Properties): Unit = { - props.put(KafkaConfig.InterBrokerProtocolVersionProp, KAFKA_2_7_IV0.toString) + props.put(KafkaConfig.InterBrokerProtocolVersionProp, IBP_2_7_IV0.toString) } private def defaultSupportedFeatures(): Features[SupportedVersionRange] = { diff --git a/core/src/test/scala/unit/kafka/server/epoch/EpochDrivenReplicationProtocolAcceptanceTest.scala b/core/src/test/scala/unit/kafka/server/epoch/EpochDrivenReplicationProtocolAcceptanceTest.scala index f02f87ba8..079c0d563 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/EpochDrivenReplicationProtocolAcceptanceTest.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/EpochDrivenReplicationProtocolAcceptanceTest.scala @@ -19,7 +19,6 @@ package kafka.server.epoch import java.io.{File, RandomAccessFile} import java.util.Properties -import kafka.api.ApiVersion import kafka.log.{UnifiedLog, LogLoader} import kafka.server.KafkaConfig._ import kafka.server.{KafkaConfig, KafkaServer} @@ -32,6 +31,7 @@ import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.serialization.ByteArrayDeserializer +import org.apache.kafka.server.common.MetadataVersion import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} @@ -49,8 +49,8 @@ import scala.collection.Seq */ class EpochDrivenReplicationProtocolAcceptanceTest extends QuorumTestHarness with Logging { - // Set this to KAFKA_0_11_0_IV1 to demonstrate the tests failing in the pre-KIP-101 case - val apiVersion = ApiVersion.latestVersion + // Set this to IBP_0_11_0_IV1 to demonstrate the tests failing in the pre-KIP-101 case + val metadataVersion = MetadataVersion.latest val topic = "topic1" val msg = new Array[Byte](1000) val msgBigger = new Array[Byte](10000) @@ -178,7 +178,7 @@ class EpochDrivenReplicationProtocolAcceptanceTest extends QuorumTestHarness wit assertEquals(getLogFile(brokers(0), 0).length, getLogFile(brokers(1), 0).length, "Log files should match Broker0 vs Broker 1") } - //We can reproduce the pre-KIP-101 failure of this test by setting KafkaConfig.InterBrokerProtocolVersionProp = KAFKA_0_11_0_IV1 + //We can reproduce the pre-KIP-101 failure of this test by setting KafkaConfig.InterBrokerProtocolVersionProp = IBP_0_11_0_IV1 @Test def offsetsShouldNotGoBackwards(): Unit = { @@ -465,7 +465,7 @@ class EpochDrivenReplicationProtocolAcceptanceTest extends QuorumTestHarness wit private def createBrokerForId(id: Int, enableUncleanLeaderElection: Boolean = false): KafkaServer = { val config = createBrokerConfig(id, zkConnect) - TestUtils.setIbpAndMessageFormatVersions(config, apiVersion) + TestUtils.setIbpAndMessageFormatVersions(config, metadataVersion) config.setProperty(KafkaConfig.UncleanLeaderElectionEnableProp, enableUncleanLeaderElection.toString) createServer(fromProps(config)) } diff --git a/core/src/test/scala/unit/kafka/server/epoch/EpochDrivenReplicationProtocolAcceptanceWithIbp26Test.scala b/core/src/test/scala/unit/kafka/server/epoch/EpochDrivenReplicationProtocolAcceptanceWithIbp26Test.scala index 2ad4776bb..9b0eb4c67 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/EpochDrivenReplicationProtocolAcceptanceWithIbp26Test.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/EpochDrivenReplicationProtocolAcceptanceWithIbp26Test.scala @@ -17,7 +17,7 @@ package kafka.server.epoch -import kafka.api.KAFKA_2_6_IV0 +import org.apache.kafka.server.common.MetadataVersion.IBP_2_6_IV0 /** * With IBP 2.7 onwards, we truncate based on diverging epochs returned in fetch responses. @@ -25,5 +25,5 @@ import kafka.api.KAFKA_2_6_IV0 * verifies that we handle older IBP versions with truncation on leader/follower change correctly. */ class EpochDrivenReplicationProtocolAcceptanceWithIbp26Test extends EpochDrivenReplicationProtocolAcceptanceTest { - override val apiVersion = KAFKA_2_6_IV0 + override val metadataVersion = IBP_2_6_IV0 } diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 877b3b2a2..bc51644cb 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -69,6 +69,7 @@ import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{KafkaFuture, TopicPartition} import org.apache.kafka.controller.QuorumController import org.apache.kafka.server.authorizer.{AuthorizableRequestContext, Authorizer => JAuthorizer} +import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.test.{TestSslUtils, TestUtils => JTestUtils} import org.apache.zookeeper.KeeperException.SessionExpiredException @@ -361,7 +362,7 @@ object TestUtils extends Logging { } @nowarn("cat=deprecation") - def setIbpAndMessageFormatVersions(config: Properties, version: ApiVersion): Unit = { + def setIbpAndMessageFormatVersions(config: Properties, version: MetadataVersion): Unit = { config.setProperty(KafkaConfig.InterBrokerProtocolVersionProp, version.version) // for clarity, only set the log message format version if it's not ignored if (!LogConfig.shouldIgnoreMessageFormatVersion(version)) @@ -808,7 +809,7 @@ object TestUtils extends Logging { Broker(b.id, Seq(EndPoint("localhost", 6667, listenerName, protocol)), b.rack) } brokers.foreach(b => zkClient.registerBroker(BrokerInfo(Broker(b.id, b.endPoints, rack = b.rack), - ApiVersion.latestVersion, jmxPort = -1))) + MetadataVersion.latest, jmxPort = -1))) brokers } @@ -1233,7 +1234,7 @@ object TestUtils extends Logging { configRepository: ConfigRepository = new MockConfigRepository, cleanerConfig: CleanerConfig = CleanerConfig(enableCleaner = false), time: MockTime = new MockTime(), - interBrokerProtocolVersion: ApiVersion = ApiVersion.latestVersion): LogManager = { + interBrokerProtocolVersion: MetadataVersion = MetadataVersion.latest): LogManager = { new LogManager(logDirs = logDirs.map(_.getAbsoluteFile), initialOfflineDirs = Array.empty[File], configRepository = configRepository, diff --git a/core/src/test/scala/unit/kafka/zk/KafkaZkClientTest.scala b/core/src/test/scala/unit/kafka/zk/KafkaZkClientTest.scala index 6411c706f..7b7ddfbc5 100644 --- a/core/src/test/scala/unit/kafka/zk/KafkaZkClientTest.scala +++ b/core/src/test/scala/unit/kafka/zk/KafkaZkClientTest.scala @@ -19,7 +19,8 @@ package kafka.zk import java.nio.charset.StandardCharsets.UTF_8 import java.util.concurrent.{CountDownLatch, TimeUnit} import java.util.{Collections, Properties} -import kafka.api.{ApiVersion, LeaderAndIsr} + +import kafka.api.LeaderAndIsr import kafka.cluster.{Broker, EndPoint} import kafka.controller.{LeaderIsrAndControllerEpoch, ReplicaAssignment} import kafka.log.LogConfig @@ -42,6 +43,7 @@ import org.apache.kafka.common.security.token.delegation.TokenInformation import org.apache.kafka.common.utils.{SecurityUtils, Time} import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.metadata.LeaderRecoveryState +import org.apache.kafka.server.common.MetadataVersion import org.apache.zookeeper.KeeperException.{Code, NoAuthException, NoNodeException, NodeExistsException} import org.apache.zookeeper.ZooDefs import org.apache.zookeeper.client.ZKClientConfig @@ -51,6 +53,7 @@ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource + import scala.collection.mutable.ArrayBuffer import scala.collection.{Seq, mutable} import scala.jdk.CollectionConverters._ @@ -808,7 +811,7 @@ class KafkaZkClientTest extends QuorumTestHarness { Seq(new EndPoint(host, port, ListenerName.forSecurityProtocol(securityProtocol), securityProtocol)), rack = rack, features = features), - ApiVersion.latestVersion, jmxPort = port + 10) + MetadataVersion.latest, jmxPort = port + 10) @Test def testRegisterBrokerInfo(): Unit = { diff --git a/docs/upgrade.html b/docs/upgrade.html index 033c8b9ff..226013df8 100644 --- a/docs/upgrade.html +++ b/docs/upgrade.html @@ -73,7 +73,13 @@
Notable changes in 3 via Connect worker and/or connector configuration. Connect may enable idempotent producers by default in a future major release.
  • Kafka has replaced log4j and slf4j-log4j12 with reload4j and slf4j-reload4j due to security concerns. - More information can be found at reload4j.
  • + This only affects modules that specify a logging backend (connect-runtime and kafka-tools are two such examples). + A number of modules, including kafka-clients, leave it to the application to specify the logging backend. + More information can be found at reload4j. + Projects that depend on the affected modules from the Kafka project should use + slf4j-log4j12 version 1.7.35 or above or + slf4j-reload4j to avoid + possible compatibility issues originating from the logging framework.
  • The example connectors, FileStreamSourceConnector and FileStreamSinkConnector, have been removed from the default classpath. To use them in Kafka Connect standalone or distributed mode they need to be explicitly added, for example CLASSPATH=./lib/connect-file-3.2.0.jar ./bin/connect-distributed.sh.
  • diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/fetcher/ReplicaFetcherThreadBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/fetcher/ReplicaFetcherThreadBenchmark.java index 1dec000f3..7571a069f 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/fetcher/ReplicaFetcherThreadBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/fetcher/ReplicaFetcherThreadBenchmark.java @@ -17,8 +17,6 @@ package org.apache.kafka.jmh.fetcher; -import kafka.api.ApiVersion; -import kafka.api.ApiVersion$; import kafka.cluster.BrokerEndPoint; import kafka.cluster.DelayedOperations; import kafka.cluster.AlterPartitionListener; @@ -69,6 +67,7 @@ import org.apache.kafka.common.requests.UpdateMetadataRequest; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.common.MetadataVersion; import org.mockito.Mockito; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -145,7 +144,7 @@ public void setup() throws IOException { setFlushStartOffsetCheckpointMs(10000L). setRetentionCheckMs(1000L). setMaxPidExpirationMs(60000). - setInterBrokerProtocolVersion(ApiVersion.latestVersion()). + setInterBrokerProtocolVersion(MetadataVersion.latest()). setScheduler(scheduler). setBrokerTopicStats(brokerTopicStats). setLogDirFailureChannel(logDirFailureChannel). @@ -174,7 +173,7 @@ public void setup() throws IOException { OffsetCheckpoints offsetCheckpoints = Mockito.mock(OffsetCheckpoints.class); Mockito.when(offsetCheckpoints.fetch(logDir.getAbsolutePath(), tp)).thenReturn(Option.apply(0L)); AlterPartitionManager isrChannelManager = Mockito.mock(AlterPartitionManager.class); - Partition partition = new Partition(tp, 100, ApiVersion$.MODULE$.latestVersion(), + Partition partition = new Partition(tp, 100, MetadataVersion.latest(), 0, Time.SYSTEM, alterPartitionListener, new DelayedOperationsMock(tp), Mockito.mock(MetadataCache.class), logManager, isrChannelManager); diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/PartitionMakeFollowerBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/PartitionMakeFollowerBenchmark.java index 1bc695ecb..4daddd29b 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/PartitionMakeFollowerBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/PartitionMakeFollowerBenchmark.java @@ -17,8 +17,6 @@ package org.apache.kafka.jmh.partition; -import kafka.api.ApiVersion; -import kafka.api.ApiVersion$; import kafka.cluster.DelayedOperations; import kafka.cluster.AlterPartitionListener; import kafka.cluster.Partition; @@ -42,6 +40,7 @@ import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.common.MetadataVersion; import org.mockito.Mockito; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -111,7 +110,7 @@ public void setup() throws IOException { setFlushStartOffsetCheckpointMs(10000L). setRetentionCheckMs(1000L). setMaxPidExpirationMs(60000). - setInterBrokerProtocolVersion(ApiVersion.latestVersion()). + setInterBrokerProtocolVersion(MetadataVersion.latest()). setScheduler(scheduler). setBrokerTopicStats(brokerTopicStats). setLogDirFailureChannel(logDirFailureChannel). @@ -125,7 +124,7 @@ public void setup() throws IOException { AlterPartitionListener alterPartitionListener = Mockito.mock(AlterPartitionListener.class); AlterPartitionManager alterPartitionManager = Mockito.mock(AlterPartitionManager.class); partition = new Partition(tp, 100, - ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, + MetadataVersion.latest(), 0, Time.SYSTEM, alterPartitionListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterPartitionManager); partition.createLogIfNotExists(true, false, offsetCheckpoints, topicId); diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/UpdateFollowerFetchStateBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/UpdateFollowerFetchStateBenchmark.java index cf7201d4c..f1f3d76ba 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/UpdateFollowerFetchStateBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/partition/UpdateFollowerFetchStateBenchmark.java @@ -17,8 +17,6 @@ package org.apache.kafka.jmh.partition; -import kafka.api.ApiVersion; -import kafka.api.ApiVersion$; import kafka.cluster.DelayedOperations; import kafka.cluster.AlterPartitionListener; import kafka.cluster.Partition; @@ -39,6 +37,7 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.server.common.MetadataVersion; import org.mockito.Mockito; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -97,7 +96,7 @@ public void setUp() { setFlushStartOffsetCheckpointMs(10000L). setRetentionCheckMs(1000L). setMaxPidExpirationMs(60000). - setInterBrokerProtocolVersion(ApiVersion.latestVersion()). + setInterBrokerProtocolVersion(MetadataVersion.latest()). setScheduler(scheduler). setBrokerTopicStats(brokerTopicStats). setLogDirFailureChannel(logDirFailureChannel). @@ -124,7 +123,7 @@ public void setUp() { AlterPartitionListener alterPartitionListener = Mockito.mock(AlterPartitionListener.class); AlterPartitionManager alterPartitionManager = Mockito.mock(AlterPartitionManager.class); partition = new Partition(topicPartition, 100, - ApiVersion$.MODULE$.latestVersion(), 0, Time.SYSTEM, + MetadataVersion.latest(), 0, Time.SYSTEM, alterPartitionListener, delayedOperations, Mockito.mock(MetadataCache.class), logManager, alterPartitionManager); partition.makeLeader(partitionState, offsetCheckpoints, topicId); diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/CompressedRecordBatchValidationBenchmark.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/CompressedRecordBatchValidationBenchmark.java index 24ac53e78..cfbc66b66 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/CompressedRecordBatchValidationBenchmark.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/record/CompressedRecordBatchValidationBenchmark.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.jmh.record; -import kafka.api.ApiVersion; import kafka.common.LongRef; import kafka.log.AppendOrigin; import kafka.log.LogValidator; @@ -26,6 +25,7 @@ import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.Time; +import org.apache.kafka.server.common.MetadataVersion; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Fork; import org.openjdk.jmh.annotations.Measurement; @@ -58,7 +58,7 @@ public void measureValidateMessagesAndAssignOffsetsCompressed(Blackhole bh) { CompressionCodec.getCompressionCodec(compressionType.id), false, messageVersion, TimestampType.CREATE_TIME, Long.MAX_VALUE, 0, new AppendOrigin.Client$(), - ApiVersion.latestVersion(), + MetadataVersion.latest(), brokerTopicStats, requestLocal); } diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/CheckpointBench.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/CheckpointBench.java index 919179ac3..dcbacac7e 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/CheckpointBench.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/CheckpointBench.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.jmh.server; -import kafka.api.ApiVersion; import kafka.cluster.Partition; import kafka.log.CleanerConfig; import kafka.log.LogConfig; @@ -39,6 +38,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.common.MetadataVersion; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Fork; import org.openjdk.jmh.annotations.Level; @@ -107,7 +107,7 @@ public void setup() { this.logManager = TestUtils.createLogManager(JavaConverters.asScalaBuffer(files), LogConfig.apply(), new MockConfigRepository(), CleanerConfig.apply(1, 4 * 1024 * 1024L, 0.9d, 1024 * 1024, 32 * 1024 * 1024, - Double.MAX_VALUE, 15 * 1000, true, "MD5"), time, ApiVersion.latestVersion()); + Double.MAX_VALUE, 15 * 1000, true, "MD5"), time, MetadataVersion.latest()); scheduler.startup(); final BrokerTopicStats brokerTopicStats = new BrokerTopicStats(); final MetadataCache metadataCache = diff --git a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/PartitionCreationBench.java b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/PartitionCreationBench.java index ac9a7f4c5..e1649db22 100644 --- a/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/PartitionCreationBench.java +++ b/jmh-benchmarks/src/main/java/org/apache/kafka/jmh/server/PartitionCreationBench.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.jmh.server; -import kafka.api.ApiVersion; import kafka.cluster.Partition; import kafka.log.CleanerConfig; import kafka.log.Defaults; @@ -44,6 +43,7 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.common.MetadataVersion; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; import org.openjdk.jmh.annotations.Fork; @@ -134,7 +134,7 @@ public void setup() { setFlushStartOffsetCheckpointMs(10000L). setRetentionCheckMs(1000L). setMaxPidExpirationMs(60000). - setInterBrokerProtocolVersion(ApiVersion.latestVersion()). + setInterBrokerProtocolVersion(MetadataVersion.latest()). setScheduler(scheduler). setBrokerTopicStats(brokerTopicStats). setLogDirFailureChannel(failureChannel). diff --git a/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java b/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java new file mode 100644 index 000000000..0292dab1d --- /dev/null +++ b/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersion.java @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.common; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.regex.Pattern; +import org.apache.kafka.common.record.RecordVersion; + +/** + * This class contains the different Kafka versions. + * Right now, we use them for upgrades - users can configure the version of the API brokers will use to communicate between themselves. + * This is only for inter-broker communications - when communicating with clients, the client decides on the API version. + * + * Note that the ID we initialize for each version is important. + * We consider a version newer than another if it is lower in the enum list (to avoid depending on lexicographic order) + * + * Since the api protocol may change more than once within the same release and to facilitate people deploying code from + * trunk, we have the concept of internal versions (first introduced during the 0.10.0 development cycle). For example, + * the first time we introduce a version change in a release, say 0.10.0, we will add a config value "0.10.0-IV0" and a + * corresponding enum constant IBP_0_10_0-IV0. We will also add a config value "0.10.0" that will be mapped to the + * latest internal version object, which is IBP_0_10_0-IV0. When we change the protocol a second time while developing + * 0.10.0, we will add a new config value "0.10.0-IV1" and a corresponding enum constant IBP_0_10_0-IV1. We will change + * the config value "0.10.0" to map to the latest internal version IBP_0_10_0-IV1. The config value of + * "0.10.0-IV0" is still mapped to IBP_0_10_0-IV0. This way, if people are deploying from trunk, they can use + * "0.10.0-IV0" and "0.10.0-IV1" to upgrade one internal version at a time. For most people who just want to use + * released version, they can use "0.10.0" when upgrading to the 0.10.0 release. + */ +public enum MetadataVersion { + IBP_0_8_0(-1, "0.8.0", ""), + IBP_0_8_1(-1, "0.8.1", ""), + IBP_0_8_2(-1, "0.8.2", ""), + IBP_0_9_0(-1, "0.9.0", ""), + + // 0.10.0-IV0 is introduced for KIP-31/32 which changes the message format. + IBP_0_10_0_IV0(-1, "0.10.0", "IV0"), + + // 0.10.0-IV1 is introduced for KIP-36(rack awareness) and KIP-43(SASL handshake). + IBP_0_10_0_IV1(-1, "0.10.0", "IV1"), + + // introduced for JoinGroup protocol change in KIP-62 + IBP_0_10_1_IV0(-1, "0.10.1", "IV0"), + + // 0.10.1-IV1 is introduced for KIP-74(fetch response size limit). + IBP_0_10_1_IV1(-1, "0.10.1", "IV1"), + + // introduced ListOffsetRequest v1 in KIP-79 + IBP_0_10_1_IV2(-1, "0.10.1", "IV2"), + + // introduced UpdateMetadataRequest v3 in KIP-103 + IBP_0_10_2_IV0(-1, "0.10.2", "IV0"), + + // KIP-98 (idempotent and transactional producer support) + IBP_0_11_0_IV0(-1, "0.11.0", "IV0"), + + // introduced DeleteRecordsRequest v0 and FetchRequest v4 in KIP-107 + IBP_0_11_0_IV1(-1, "0.11.0", "IV1"), + + // Introduced leader epoch fetches to the replica fetcher via KIP-101 + IBP_0_11_0_IV2(-1, "0.11.0", "IV2"), + + // Introduced LeaderAndIsrRequest V1, UpdateMetadataRequest V4 and FetchRequest V6 via KIP-112 + IBP_1_0_IV0(-1, "1.0", "IV0"), + + // Introduced DeleteGroupsRequest V0 via KIP-229, plus KIP-227 incremental fetch requests, + // and KafkaStorageException for fetch requests. + IBP_1_1_IV0(-1, "1.1", "IV0"), + + // Introduced OffsetsForLeaderEpochRequest V1 via KIP-279 (Fix log divergence between leader and follower after fast leader fail over) + IBP_2_0_IV0(-1, "2.0", "IV0"), + + // Several request versions were bumped due to KIP-219 (Improve quota communication) + IBP_2_0_IV1(-1, "2.0", "IV1"), + + // Introduced new schemas for group offset (v2) and group metadata (v2) (KIP-211) + IBP_2_1_IV0(-1, "2.1", "IV0"), + + // New Fetch, OffsetsForLeaderEpoch, and ListOffsets schemas (KIP-320) + IBP_2_1_IV1(-1, "2.1", "IV1"), + + // Support ZStandard Compression Codec (KIP-110) + IBP_2_1_IV2(-1, "2.1", "IV2"), + + // Introduced broker generation (KIP-380), and + // LeaderAdnIsrRequest V2, UpdateMetadataRequest V5, StopReplicaRequest V1 + IBP_2_2_IV0(-1, "2.2", "IV0"), + + // New error code for ListOffsets when a new leader is lagging behind former HW (KIP-207) + IBP_2_2_IV1(-1, "2.2", "IV1"), + + // Introduced static membership. + IBP_2_3_IV0(-1, "2.3", "IV0"), + + // Add rack_id to FetchRequest, preferred_read_replica to FetchResponse, and replica_id to OffsetsForLeaderRequest + IBP_2_3_IV1(-1, "2.3", "IV1"), + + // Add adding_replicas and removing_replicas fields to LeaderAndIsrRequest + IBP_2_4_IV0(-1, "2.4", "IV0"), + + // Flexible version support in inter-broker APIs + IBP_2_4_IV1(-1, "2.4", "IV1"), + + // No new APIs, equivalent to 2.4-IV1 + IBP_2_5_IV0(-1, "2.5", "IV0"), + + // Introduced StopReplicaRequest V3 containing the leader epoch for each partition (KIP-570) + IBP_2_6_IV0(-1, "2.6", "IV0"), + + // Introduced feature versioning support (KIP-584) + IBP_2_7_IV0(-1, "2.7", "IV0"), + + // Bup Fetch protocol for Raft protocol (KIP-595) + IBP_2_7_IV1(-1, "2.7", "IV1"), + + // Introduced AlterPartition (KIP-497) + IBP_2_7_IV2(-1, "2.7", "IV2"), + + // Flexible versioning on ListOffsets, WriteTxnMarkers and OffsetsForLeaderEpoch. Also adds topic IDs (KIP-516) + IBP_2_8_IV0(-1, "2.8", "IV0"), + + // Introduced topic IDs to LeaderAndIsr and UpdateMetadata requests/responses (KIP-516) + IBP_2_8_IV1(-1, "2.8", "IV1"), + + // Introduce AllocateProducerIds (KIP-730) + IBP_3_0_IV0(1, "3.0", "IV0"), + + // Introduce ListOffsets V7 which supports listing offsets by max timestamp (KIP-734) + // Assume message format version is 3.0 (KIP-724) + IBP_3_0_IV1(2, "3.0", "IV1"), + + // Adds topic IDs to Fetch requests/responses (KIP-516) + IBP_3_1_IV0(3, "3.1", "IV0"), + + // Support for leader recovery for unclean leader election (KIP-704) + IBP_3_2_IV0(4, "3.2", "IV0"); + + public static final MetadataVersion[] VALUES = MetadataVersion.values(); + private final Optional featureLevel; + private final String release; + private final String ibpVersion; + + MetadataVersion(int featureLevel, String release, String subVersion) { + if (featureLevel > 0) { + this.featureLevel = Optional.of((short) featureLevel); + } else { + this.featureLevel = Optional.empty(); + } + this.release = release; + if (subVersion.isEmpty()) { + this.ibpVersion = release; + } else { + this.ibpVersion = String.format("%s-%s", release, subVersion); + } + } + + public Optional featureLevel() { + return featureLevel; + } + + public boolean isSaslInterBrokerHandshakeRequestEnabled() { + return this.isAtLeast(IBP_0_10_0_IV1); + } + + public boolean isOffsetForLeaderEpochSupported() { + return this.isAtLeast(IBP_0_11_0_IV2); + } + + public boolean isFeatureVersioningSupported() { + return this.isAtLeast(IBP_2_7_IV0); + } + + public boolean isTruncationOnFetchSupported() { + return this.isAtLeast(IBP_2_7_IV1); + } + + public boolean isAlterIsrSupported() { + return this.isAtLeast(IBP_2_7_IV2); + } + + public boolean isTopicIdsSupported() { + return this.isAtLeast(IBP_2_8_IV0); + } + + public boolean isAllocateProducerIdsSupported() { + return this.isAtLeast(IBP_3_0_IV0); + } + + + public RecordVersion highestSupportedRecordVersion() { + if (this.isLessThan(IBP_0_10_0_IV0)) { + return RecordVersion.V0; + } else if (this.isLessThan(IBP_0_11_0_IV0)) { + return RecordVersion.V1; + } else { + return RecordVersion.V2; + } + } + + private static final Map IBP_VERSIONS; + static { + { + IBP_VERSIONS = new HashMap<>(); + Map maxInterVersion = new HashMap<>(); + for (MetadataVersion metadataVersion : VALUES) { + maxInterVersion.put(metadataVersion.release, metadataVersion); + IBP_VERSIONS.put(metadataVersion.ibpVersion, metadataVersion); + } + IBP_VERSIONS.putAll(maxInterVersion); + } + } + + public String shortVersion() { + return release; + } + + public String version() { + return ibpVersion; + } + + /** + * Return an `MetadataVersion` instance for `versionString`, which can be in a variety of formats (e.g. "0.8.0", "0.8.0.x", + * "0.10.0", "0.10.0-IV1"). `IllegalArgumentException` is thrown if `versionString` cannot be mapped to an `MetadataVersion`. + * Note that 'misconfigured' values such as "1.0.1" will be parsed to `IBP_1_0_IV0` as we ignore anything after the first + * two digits for versions that don't start with "0." + */ + public static MetadataVersion fromVersionString(String versionString) { + String[] versionSegments = versionString.split(Pattern.quote(".")); + int numSegments = (versionString.startsWith("0.")) ? 3 : 2; + String key; + if (numSegments >= versionSegments.length) { + key = versionString; + } else { + key = String.join(".", Arrays.copyOfRange(versionSegments, 0, numSegments)); + } + return Optional.ofNullable(IBP_VERSIONS.get(key)).orElseThrow(() -> + new IllegalArgumentException("Version " + versionString + " is not a valid version") + ); + } + + /** + * Return the minimum `MetadataVersion` that supports `RecordVersion`. + */ + public static MetadataVersion minSupportedFor(RecordVersion recordVersion) { + switch (recordVersion) { + case V0: + return IBP_0_8_0; + case V1: + return IBP_0_10_0_IV0; + case V2: + return IBP_0_11_0_IV0; + default: + throw new IllegalArgumentException("Invalid message format version " + recordVersion); + } + } + + public static MetadataVersion latest() { + return VALUES[VALUES.length - 1]; + } + + public boolean isAtLeast(MetadataVersion otherVersion) { + return this.compareTo(otherVersion) >= 0; + } + + public boolean isLessThan(MetadataVersion otherVersion) { + return this.compareTo(otherVersion) < 0; + } + + @Override + public String toString() { + return ibpVersion; + } +} diff --git a/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersionValidator.java b/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersionValidator.java new file mode 100644 index 000000000..d685dd018 --- /dev/null +++ b/server-common/src/main/java/org/apache/kafka/server/common/MetadataVersionValidator.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.server.common; + +import java.util.Arrays; +import java.util.stream.Collectors; +import org.apache.kafka.common.config.ConfigDef.Validator; +import org.apache.kafka.common.config.ConfigException; + +public class MetadataVersionValidator implements Validator { + + @Override + public void ensureValid(String name, Object value) { + try { + MetadataVersion.fromVersionString(value.toString()); + } catch (IllegalArgumentException e) { + throw new ConfigException(name, value.toString(), e.getMessage()); + } + } + + @Override + public String toString() { + return "[" + Arrays.stream(MetadataVersion.VALUES).map(MetadataVersion::version).collect( + Collectors.joining(", ")) + "]"; + } +} diff --git a/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java b/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java new file mode 100644 index 000000000..7ad8754b7 --- /dev/null +++ b/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionTest.java @@ -0,0 +1,343 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.server.common; + +import java.util.Arrays; +import java.util.HashSet; +import org.apache.kafka.common.feature.Features; +import org.apache.kafka.common.feature.FinalizedVersionRange; +import org.apache.kafka.common.feature.SupportedVersionRange; +import org.apache.kafka.common.message.ApiMessageType.ListenerType; +import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersion; +import org.apache.kafka.common.message.ApiVersionsResponseData.FinalizedFeatureKey; +import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.record.RecordVersion; +import org.apache.kafka.common.requests.AbstractResponse; +import org.apache.kafka.common.requests.ApiVersionsResponse; +import org.apache.kafka.common.utils.Utils; + +import static org.apache.kafka.server.common.MetadataVersion.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import org.junit.jupiter.api.Test; + +class MetadataVersionTest { + + @Test + public void testFeatureLevel() { + int firstFeatureLevelIndex = Arrays.asList(MetadataVersion.VALUES).indexOf(IBP_3_0_IV0); + for (int i = 0; i < firstFeatureLevelIndex; i++) { + assertFalse(MetadataVersion.VALUES[i].featureLevel().isPresent()); + } + short expectedFeatureLevel = 1; + for (int i = firstFeatureLevelIndex; i < MetadataVersion.VALUES.length; i++) { + MetadataVersion metadataVersion = MetadataVersion.VALUES[i]; + short featureLevel = metadataVersion.featureLevel().orElseThrow(() -> + new IllegalArgumentException( + String.format("Metadata version %s must have a non-null feature level", metadataVersion.version()))); + assertEquals(expectedFeatureLevel, featureLevel, + String.format("Metadata version %s should have feature level %s", metadataVersion.version(), expectedFeatureLevel)); + expectedFeatureLevel += 1; + } + } + + @Test + public void testFromVersionString() { + assertEquals(IBP_0_8_0, MetadataVersion.fromVersionString("0.8.0")); + assertEquals(IBP_0_8_0, MetadataVersion.fromVersionString("0.8.0.0")); + assertEquals(IBP_0_8_0, MetadataVersion.fromVersionString("0.8.0.1")); + // should throw an exception as long as IBP_8_0_IV0 is not defined + assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("8.0")); + + assertEquals(IBP_0_8_1, MetadataVersion.fromVersionString("0.8.1")); + assertEquals(IBP_0_8_1, MetadataVersion.fromVersionString("0.8.1.0")); + assertEquals(IBP_0_8_1, MetadataVersion.fromVersionString("0.8.1.1")); + + assertEquals(IBP_0_8_2, MetadataVersion.fromVersionString("0.8.2")); + assertEquals(IBP_0_8_2, MetadataVersion.fromVersionString("0.8.2.0")); + assertEquals(IBP_0_8_2, MetadataVersion.fromVersionString("0.8.2.1")); + + assertEquals(IBP_0_9_0, MetadataVersion.fromVersionString("0.9.0")); + assertEquals(IBP_0_9_0, MetadataVersion.fromVersionString("0.9.0.0")); + assertEquals(IBP_0_9_0, MetadataVersion.fromVersionString("0.9.0.1")); + + assertEquals(IBP_0_10_0_IV0, MetadataVersion.fromVersionString("0.10.0-IV0")); + + assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0")); + assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0.0")); + assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0.0-IV0")); + assertEquals(IBP_0_10_0_IV1, MetadataVersion.fromVersionString("0.10.0.1")); + + assertEquals(IBP_0_10_1_IV0, MetadataVersion.fromVersionString("0.10.1-IV0")); + assertEquals(IBP_0_10_1_IV1, MetadataVersion.fromVersionString("0.10.1-IV1")); + + assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1")); + assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1.0")); + assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1-IV2")); + assertEquals(IBP_0_10_1_IV2, MetadataVersion.fromVersionString("0.10.1.1")); + + assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2")); + assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2.0")); + assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2-IV0")); + assertEquals(IBP_0_10_2_IV0, MetadataVersion.fromVersionString("0.10.2.1")); + + assertEquals(IBP_0_11_0_IV0, MetadataVersion.fromVersionString("0.11.0-IV0")); + assertEquals(IBP_0_11_0_IV1, MetadataVersion.fromVersionString("0.11.0-IV1")); + + assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0")); + assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0.0")); + assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0-IV2")); + assertEquals(IBP_0_11_0_IV2, MetadataVersion.fromVersionString("0.11.0.1")); + + assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0")); + assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0.0")); + assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0.0-IV0")); + assertEquals(IBP_1_0_IV0, MetadataVersion.fromVersionString("1.0.1")); + assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0")); + assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0.0")); + assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0-IV0")); + assertThrows(IllegalArgumentException.class, () -> MetadataVersion.fromVersionString("0.1.0.0-IV0")); + + assertEquals(IBP_1_1_IV0, MetadataVersion.fromVersionString("1.1-IV0")); + + assertEquals(IBP_2_0_IV1, MetadataVersion.fromVersionString("2.0")); + assertEquals(IBP_2_0_IV0, MetadataVersion.fromVersionString("2.0-IV0")); + assertEquals(IBP_2_0_IV1, MetadataVersion.fromVersionString("2.0-IV1")); + + assertEquals(IBP_2_1_IV2, MetadataVersion.fromVersionString("2.1")); + assertEquals(IBP_2_1_IV0, MetadataVersion.fromVersionString("2.1-IV0")); + assertEquals(IBP_2_1_IV1, MetadataVersion.fromVersionString("2.1-IV1")); + assertEquals(IBP_2_1_IV2, MetadataVersion.fromVersionString("2.1-IV2")); + + assertEquals(IBP_2_2_IV1, MetadataVersion.fromVersionString("2.2")); + assertEquals(IBP_2_2_IV0, MetadataVersion.fromVersionString("2.2-IV0")); + assertEquals(IBP_2_2_IV1, MetadataVersion.fromVersionString("2.2-IV1")); + + assertEquals(IBP_2_3_IV1, MetadataVersion.fromVersionString("2.3")); + assertEquals(IBP_2_3_IV0, MetadataVersion.fromVersionString("2.3-IV0")); + assertEquals(IBP_2_3_IV1, MetadataVersion.fromVersionString("2.3-IV1")); + + assertEquals(IBP_2_4_IV1, MetadataVersion.fromVersionString("2.4")); + assertEquals(IBP_2_4_IV0, MetadataVersion.fromVersionString("2.4-IV0")); + assertEquals(IBP_2_4_IV1, MetadataVersion.fromVersionString("2.4-IV1")); + + assertEquals(IBP_2_5_IV0, MetadataVersion.fromVersionString("2.5")); + assertEquals(IBP_2_5_IV0, MetadataVersion.fromVersionString("2.5-IV0")); + + assertEquals(IBP_2_6_IV0, MetadataVersion.fromVersionString("2.6")); + assertEquals(IBP_2_6_IV0, MetadataVersion.fromVersionString("2.6-IV0")); + + assertEquals(IBP_2_7_IV0, MetadataVersion.fromVersionString("2.7-IV0")); + assertEquals(IBP_2_7_IV1, MetadataVersion.fromVersionString("2.7-IV1")); + assertEquals(IBP_2_7_IV2, MetadataVersion.fromVersionString("2.7-IV2")); + + assertEquals(IBP_2_8_IV1, MetadataVersion.fromVersionString("2.8")); + assertEquals(IBP_2_8_IV0, MetadataVersion.fromVersionString("2.8-IV0")); + assertEquals(IBP_2_8_IV1, MetadataVersion.fromVersionString("2.8-IV1")); + + assertEquals(IBP_3_0_IV1, MetadataVersion.fromVersionString("3.0")); + assertEquals(IBP_3_0_IV0, MetadataVersion.fromVersionString("3.0-IV0")); + assertEquals(IBP_3_0_IV1, MetadataVersion.fromVersionString("3.0-IV1")); + + assertEquals(IBP_3_1_IV0, MetadataVersion.fromVersionString("3.1")); + assertEquals(IBP_3_1_IV0, MetadataVersion.fromVersionString("3.1-IV0")); + + assertEquals(IBP_3_2_IV0, MetadataVersion.fromVersionString("3.2")); + assertEquals(IBP_3_2_IV0, MetadataVersion.fromVersionString("3.2-IV0")); + } + + @Test + public void testMinSupportedVersionFor() { + assertEquals(IBP_0_8_0, MetadataVersion.minSupportedFor(RecordVersion.V0)); + assertEquals(IBP_0_10_0_IV0, MetadataVersion.minSupportedFor(RecordVersion.V1)); + assertEquals(IBP_0_11_0_IV0, MetadataVersion.minSupportedFor(RecordVersion.V2)); + + // Ensure that all record versions have a defined min version so that we remember to update the method + for (RecordVersion recordVersion : RecordVersion.values()) { + assertNotNull(MetadataVersion.minSupportedFor(recordVersion)); + } + } + + @Test + public void testShortVersion() { + assertEquals("0.8.0", IBP_0_8_0.shortVersion()); + assertEquals("0.10.0", IBP_0_10_0_IV0.shortVersion()); + assertEquals("0.10.0", IBP_0_10_0_IV1.shortVersion()); + assertEquals("0.11.0", IBP_0_11_0_IV0.shortVersion()); + assertEquals("0.11.0", IBP_0_11_0_IV1.shortVersion()); + assertEquals("0.11.0", IBP_0_11_0_IV2.shortVersion()); + assertEquals("1.0", IBP_1_0_IV0.shortVersion()); + assertEquals("1.1", IBP_1_1_IV0.shortVersion()); + assertEquals("2.0", IBP_2_0_IV0.shortVersion()); + assertEquals("2.0", IBP_2_0_IV1.shortVersion()); + assertEquals("2.1", IBP_2_1_IV0.shortVersion()); + assertEquals("2.1", IBP_2_1_IV1.shortVersion()); + assertEquals("2.1", IBP_2_1_IV2.shortVersion()); + assertEquals("2.2", IBP_2_2_IV0.shortVersion()); + assertEquals("2.2", IBP_2_2_IV1.shortVersion()); + assertEquals("2.3", IBP_2_3_IV0.shortVersion()); + assertEquals("2.3", IBP_2_3_IV1.shortVersion()); + assertEquals("2.4", IBP_2_4_IV0.shortVersion()); + assertEquals("2.5", IBP_2_5_IV0.shortVersion()); + assertEquals("2.6", IBP_2_6_IV0.shortVersion()); + assertEquals("2.7", IBP_2_7_IV2.shortVersion()); + assertEquals("2.8", IBP_2_8_IV0.shortVersion()); + assertEquals("2.8", IBP_2_8_IV1.shortVersion()); + assertEquals("3.0", IBP_3_0_IV0.shortVersion()); + assertEquals("3.0", IBP_3_0_IV1.shortVersion()); + assertEquals("3.1", IBP_3_1_IV0.shortVersion()); + assertEquals("3.2", IBP_3_2_IV0.shortVersion()); + } + + @Test + public void testVersion() { + assertEquals("0.8.0", IBP_0_8_0.version()); + assertEquals("0.8.2", IBP_0_8_2.version()); + assertEquals("0.10.0-IV0", IBP_0_10_0_IV0.version()); + assertEquals("0.10.0-IV1", IBP_0_10_0_IV1.version()); + assertEquals("0.11.0-IV0", IBP_0_11_0_IV0.version()); + assertEquals("0.11.0-IV1", IBP_0_11_0_IV1.version()); + assertEquals("0.11.0-IV2", IBP_0_11_0_IV2.version()); + assertEquals("1.0-IV0", IBP_1_0_IV0.version()); + assertEquals("1.1-IV0", IBP_1_1_IV0.version()); + assertEquals("2.0-IV0", IBP_2_0_IV0.version()); + assertEquals("2.0-IV1", IBP_2_0_IV1.version()); + assertEquals("2.1-IV0", IBP_2_1_IV0.version()); + assertEquals("2.1-IV1", IBP_2_1_IV1.version()); + assertEquals("2.1-IV2", IBP_2_1_IV2.version()); + assertEquals("2.2-IV0", IBP_2_2_IV0.version()); + assertEquals("2.2-IV1", IBP_2_2_IV1.version()); + assertEquals("2.3-IV0", IBP_2_3_IV0.version()); + assertEquals("2.3-IV1", IBP_2_3_IV1.version()); + assertEquals("2.4-IV0", IBP_2_4_IV0.version()); + assertEquals("2.5-IV0", IBP_2_5_IV0.version()); + assertEquals("2.6-IV0", IBP_2_6_IV0.version()); + assertEquals("2.7-IV2", IBP_2_7_IV2.version()); + assertEquals("2.8-IV0", IBP_2_8_IV0.version()); + assertEquals("2.8-IV1", IBP_2_8_IV1.version()); + assertEquals("3.0-IV0", IBP_3_0_IV0.version()); + assertEquals("3.0-IV1", IBP_3_0_IV1.version()); + assertEquals("3.1-IV0", IBP_3_1_IV0.version()); + assertEquals("3.2-IV0", IBP_3_2_IV0.version()); + } + + @Test + public void shouldCreateApiResponseOnlyWithKeysSupportedByMagicValue() { + ApiVersionsResponse response = ApiVersionsResponse.createApiVersionsResponse( + 10, + RecordVersion.V1, + Features.emptySupportedFeatures(), + Features.emptyFinalizedFeatures(), + ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, + null, + ListenerType.ZK_BROKER + ); + verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1); + assertEquals(10, response.throttleTimeMs()); + assertTrue(response.data().supportedFeatures().isEmpty()); + assertTrue(response.data().finalizedFeatures().isEmpty()); + assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, response.data().finalizedFeaturesEpoch()); + } + + @Test + public void shouldReturnFeatureKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle() { + ApiVersionsResponse response = ApiVersionsResponse.createApiVersionsResponse( + 10, + RecordVersion.V1, + Features.supportedFeatures( + Utils.mkMap(Utils.mkEntry("feature", new SupportedVersionRange((short) 1, (short) 4)))), + Features.finalizedFeatures( + Utils.mkMap(Utils.mkEntry("feature", new FinalizedVersionRange((short) 2, (short) 3)))), + 10L, + null, + ListenerType.ZK_BROKER + ); + + verifyApiKeysForMagic(response, RecordBatch.MAGIC_VALUE_V1); + assertEquals(10, response.throttleTimeMs()); + assertEquals(1, response.data().supportedFeatures().size()); + SupportedFeatureKey sKey = response.data().supportedFeatures().find("feature"); + assertNotNull(sKey); + assertEquals(1, sKey.minVersion()); + assertEquals(4, sKey.maxVersion()); + assertEquals(1, response.data().finalizedFeatures().size()); + FinalizedFeatureKey fKey = response.data().finalizedFeatures().find("feature"); + assertNotNull(fKey); + assertEquals(2, fKey.minVersionLevel()); + assertEquals(3, fKey.maxVersionLevel()); + assertEquals(10, response.data().finalizedFeaturesEpoch()); + } + + private void verifyApiKeysForMagic(ApiVersionsResponse response, Byte maxMagic) { + for (ApiVersion version : response.data().apiKeys()) { + assertTrue(ApiKeys.forId(version.apiKey()).minRequiredInterBrokerMagic <= maxMagic); + } + } + + @Test + public void shouldReturnAllKeysWhenMagicIsCurrentValueAndThrottleMsIsDefaultThrottle() { + ApiVersionsResponse response = ApiVersionsResponse.createApiVersionsResponse( + AbstractResponse.DEFAULT_THROTTLE_TIME, + RecordVersion.current(), + Features.emptySupportedFeatures(), + Features.emptyFinalizedFeatures(), + ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, + null, + ListenerType.ZK_BROKER + ); + assertEquals(new HashSet(ApiKeys.zkBrokerApis()), apiKeysInResponse(response)); + assertEquals(AbstractResponse.DEFAULT_THROTTLE_TIME, response.throttleTimeMs()); + assertTrue(response.data().supportedFeatures().isEmpty()); + assertTrue(response.data().finalizedFeatures().isEmpty()); + assertEquals(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, response.data().finalizedFeaturesEpoch()); + } + + @Test + public void testMetadataQuorumApisAreDisabled() { + ApiVersionsResponse response = ApiVersionsResponse.createApiVersionsResponse( + AbstractResponse.DEFAULT_THROTTLE_TIME, + RecordVersion.current(), + Features.emptySupportedFeatures(), + Features.emptyFinalizedFeatures(), + ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH, + null, + ListenerType.ZK_BROKER + ); + + // Ensure that APIs needed for the KRaft mode are not exposed through ApiVersions until we are ready for them + HashSet exposedApis = apiKeysInResponse(response); + assertFalse(exposedApis.contains(ApiKeys.ENVELOPE)); + assertFalse(exposedApis.contains(ApiKeys.VOTE)); + assertFalse(exposedApis.contains(ApiKeys.BEGIN_QUORUM_EPOCH)); + assertFalse(exposedApis.contains(ApiKeys.END_QUORUM_EPOCH)); + assertFalse(exposedApis.contains(ApiKeys.DESCRIBE_QUORUM)); + } + + private HashSet apiKeysInResponse(ApiVersionsResponse apiVersions) { + HashSet apiKeys = new HashSet<>(); + for (ApiVersion version : apiVersions.data().apiKeys()) { + apiKeys.add(ApiKeys.forId(version.apiKey())); + } + return apiKeys; + } +} diff --git a/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionValidatorTest.java b/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionValidatorTest.java new file mode 100644 index 000000000..c4255946b --- /dev/null +++ b/server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionValidatorTest.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.server.common; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +import org.junit.jupiter.api.Test; + +public class MetadataVersionValidatorTest { + + @Test + public void testMetadataVersionValidator() { + String str = new MetadataVersionValidator().toString(); + String[] apiVersions = str.substring(1).split(","); + assertEquals(MetadataVersion.VALUES.length, apiVersions.length); + } + +} diff --git a/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java b/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java index 2f1134b43..23c021c63 100644 --- a/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java +++ b/streams/src/main/java/org/apache/kafka/streams/StreamsConfig.java @@ -1090,6 +1090,9 @@ public static class InternalConfig { // Private API used to control the emit latency for left/outer join results (https://issues.apache.org/jira/browse/KAFKA-10847) public static final String EMIT_INTERVAL_MS_KSTREAMS_OUTER_JOIN_SPURIOUS_RESULTS_FIX = "__emit.interval.ms.kstreams.outer.join.spurious.results.fix__"; + // Private API used to control the emit latency for windowed aggregation results for ON_WINDOW_CLOSE emit strategy + public static final String EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION = "__emit.interval.ms.kstreams.windowed.aggregation__"; + // Private API used to control the usage of consistency offset vectors public static final String IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED = "__iq.consistency.offset" + ".vector.enabled__"; diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/EmitStrategy.java b/streams/src/main/java/org/apache/kafka/streams/kstream/EmitStrategy.java new file mode 100644 index 000000000..a10b95061 --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/EmitStrategy.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.kstream; + +import org.apache.kafka.streams.kstream.internals.UnlimitedWindow; +import org.apache.kafka.streams.kstream.internals.emitstrategy.WindowCloseStrategy; +import org.apache.kafka.streams.kstream.internals.emitstrategy.WindowUpdateStrategy; + +/** + * This interface controls the strategy that can be used to control how we emit results in a processor. + */ +public interface EmitStrategy { + + enum StrategyType { + ON_WINDOW_CLOSE, + ON_WINDOW_UPDATE + } + + /** + * Returns the strategy type + * @return Emit strategy type + */ + StrategyType type(); + + /** + * This strategy indicates that the aggregated result for a window will only be emitted when the + * window closes instead of when there's an update to the window. Window close means that current + * event time is larger than (window end time + grace period). + * + *

    This strategy should only be used for windows which can close. An exception will be thrown + * if it's used with {@link UnlimitedWindow}. + * + * @see TimeWindows + * @see SlidingWindows + * @see SessionWindows + * @see UnlimitedWindows + * @see WindowUpdateStrategy + * + * @return WindowCloseStrategy instance + */ + static EmitStrategy onWindowClose() { + return new WindowCloseStrategy(); + } + + /** + * This strategy indicates that the aggregated result for a window will be emitted every time + * when there's an update to the window instead of when the window closes. + * + * @see TimeWindows + * @see SlidingWindows + * @see SessionWindows + * @see UnlimitedWindows + * @see WindowCloseStrategy + * + * @return WindowCloseStrategy instance + */ + static EmitStrategy onWindowUpdate() { + return new WindowUpdateStrategy(); + } +} \ No newline at end of file diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregate.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregate.java index 8e88b5de7..7750e3b65 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregate.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregate.java @@ -18,11 +18,18 @@ import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsConfig; import org.apache.kafka.streams.kstream.Aggregator; +import org.apache.kafka.streams.kstream.EmitStrategy; +import org.apache.kafka.streams.kstream.EmitStrategy.StrategyType; import org.apache.kafka.streams.kstream.Initializer; +import org.apache.kafka.streams.kstream.TimeWindows; import org.apache.kafka.streams.kstream.Window; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.Windows; +import org.apache.kafka.streams.kstream.internals.KStreamImplJoin.TimeTracker; import org.apache.kafka.streams.processor.api.ContextualProcessor; import org.apache.kafka.streams.processor.api.Processor; import org.apache.kafka.streams.processor.api.ProcessorContext; @@ -30,6 +37,7 @@ import org.apache.kafka.streams.processor.api.RecordMetadata; import org.apache.kafka.streams.processor.internals.InternalProcessorContext; import org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl; +import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.TimestampedWindowStore; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.slf4j.Logger; @@ -37,6 +45,9 @@ import java.util.Map; +import static org.apache.kafka.streams.StreamsConfig.InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION; +import static org.apache.kafka.streams.processor.internals.metrics.ProcessorNodeMetrics.emitFinalLatencySensor; +import static org.apache.kafka.streams.processor.internals.metrics.ProcessorNodeMetrics.emittedRecordsSensor; import static org.apache.kafka.streams.processor.internals.metrics.TaskMetrics.droppedRecordsSensor; import static org.apache.kafka.streams.state.ValueAndTimestamp.getValueOrNull; @@ -48,6 +59,7 @@ public class KStreamWindowAggregate implements private final Windows windows; private final Initializer initializer; private final Aggregator aggregator; + private final EmitStrategy emitStrategy; private boolean sendOldValues = false; @@ -55,10 +67,26 @@ public KStreamWindowAggregate(final Windows windows, final String storeName, final Initializer initializer, final Aggregator aggregator) { + this(windows, storeName, EmitStrategy.onWindowUpdate(), initializer, aggregator); + } + + public KStreamWindowAggregate(final Windows windows, + final String storeName, + final EmitStrategy emitStrategy, + final Initializer initializer, + final Aggregator aggregator) { this.windows = windows; this.storeName = storeName; + this.emitStrategy = emitStrategy; this.initializer = initializer; this.aggregator = aggregator; + + if (emitStrategy.type() == StrategyType.ON_WINDOW_CLOSE) { + if (!(windows instanceof TimeWindows)) { + throw new IllegalArgumentException("ON_WINDOW_CLOSE strategy is only supported for " + + "TimeWindows and SlidingWindows for TimeWindowedKStream"); + } + } } @Override @@ -80,22 +108,54 @@ private class KStreamWindowAggregateProcessor extends ContextualProcessor windowStore; private TimestampedTupleForwarder, VAgg> tupleForwarder; private Sensor droppedRecordsSensor; + private Sensor emittedRecordsSensor; + private Sensor emitFinalLatencySensor; private long observedStreamTime = ConsumerRecord.NO_TIMESTAMP; + private long lastEmitWindowCloseTime = ConsumerRecord.NO_TIMESTAMP; + private InternalProcessorContext, Change> internalProcessorContext; + private final TimeTracker timeTracker = new TimeTracker(); + private final Time time = Time.SYSTEM; @Override public void init(final ProcessorContext, Change> context) { super.init(context); - final InternalProcessorContext, Change> internalProcessorContext = - (InternalProcessorContext, Change>) context; + internalProcessorContext = (InternalProcessorContext, Change>) context; final StreamsMetricsImpl metrics = internalProcessorContext.metrics(); final String threadId = Thread.currentThread().getName(); droppedRecordsSensor = droppedRecordsSensor(threadId, context.taskId().toString(), metrics); + emittedRecordsSensor = emittedRecordsSensor(threadId, context.taskId().toString(), + internalProcessorContext.currentNode().name(), metrics); + emitFinalLatencySensor = emitFinalLatencySensor(threadId, context.taskId().toString(), + internalProcessorContext.currentNode().name(), metrics); windowStore = context.getStateStore(storeName); - tupleForwarder = new TimestampedTupleForwarder<>( - windowStore, - context, - new TimestampedCacheFlushListener<>(context), - sendOldValues); + + if (emitStrategy.type() == StrategyType.ON_WINDOW_CLOSE) { + // Don't set flush lister which emit cache results + tupleForwarder = new TimestampedTupleForwarder<>( + windowStore, + context, + sendOldValues); + } else { + tupleForwarder = new TimestampedTupleForwarder<>( + windowStore, + context, + new TimestampedCacheFlushListener<>(context), + sendOldValues); + } + + // Restore last emit close time for ON_WINDOW_CLOSE strategy + if (emitStrategy.type() == StrategyType.ON_WINDOW_CLOSE) { + final Long lastEmitTime = internalProcessorContext.processorMetadataForKey(storeName); + if (lastEmitTime != null) { + lastEmitWindowCloseTime = lastEmitTime; + } + final long emitInterval = StreamsConfig.InternalConfig.getLong( + context.appConfigs(), + EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, + 1000L + ); + timeTracker.setEmitInterval(emitInterval); + } } @Override @@ -120,15 +180,16 @@ public void process(final Record record) { // first get the matching windows final long timestamp = record.timestamp(); observedStreamTime = Math.max(observedStreamTime, timestamp); - final long closeTime = observedStreamTime - windows.gracePeriodMs(); + final long windowCloseTime = observedStreamTime - windows.gracePeriodMs(); final Map matchedWindows = windows.windowsFor(timestamp); - // try update the window, and create the new window for the rest of unmatched window that do not exist yet + // try update the window whose end time is still larger than the window close time, + // and create the new window for the rest of unmatched window that do not exist yet; for (final Map.Entry entry : matchedWindows.entrySet()) { final Long windowStart = entry.getKey(); final long windowEnd = entry.getValue().end(); - if (windowEnd > closeTime) { + if (windowEnd > windowCloseTime) { final ValueAndTimestamp oldAggAndTimestamp = windowStore.fetch(record.key(), windowStart); VAgg oldAgg = getValueOrNull(oldAggAndTimestamp); @@ -146,10 +207,12 @@ public void process(final Record record) { // update the store with the new value windowStore.put(record.key(), ValueAndTimestamp.make(newAgg, newTimestamp), windowStart); - tupleForwarder.maybeForward( - record.withKey(new Windowed<>(record.key(), entry.getValue())) - .withValue(new Change<>(newAgg, sendOldValues ? oldAgg : null)) - .withTimestamp(newTimestamp)); + if (emitStrategy.type() == StrategyType.ON_WINDOW_UPDATE) { + tupleForwarder.maybeForward( + record.withKey(new Windowed<>(record.key(), entry.getValue())) + .withValue(new Change<>(newAgg, sendOldValues ? oldAgg : null)) + .withTimestamp(newTimestamp)); + } } else { if (context().recordMetadata().isPresent()) { final RecordMetadata recordMetadata = context().recordMetadata().get(); @@ -165,7 +228,7 @@ public void process(final Record record) { recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset(), record.timestamp(), windowStart, windowEnd, - closeTime, + windowCloseTime, observedStreamTime ); } else { @@ -177,13 +240,88 @@ public void process(final Record record) { "streamTime=[{}]", record.timestamp(), windowStart, windowEnd, - closeTime, + windowCloseTime, observedStreamTime ); } droppedRecordsSensor.record(); } } + + tryEmitFinalResult(record, windowCloseTime); + } + + private void tryEmitFinalResult(final Record record, final long windowCloseTime) { + if (emitStrategy.type() != StrategyType.ON_WINDOW_CLOSE) { + return; + } + + final long now = internalProcessorContext.currentSystemTimeMs(); + // Throttle emit frequency as an optimization, the tradeoff is that we need to remember the + // window close time when we emitted last time so that we can restart from there in the next emit + if (now < timeTracker.nextTimeToEmit) { + return; + } + + // Schedule next emit time based on now to avoid the case that if system time jumps a lot, + // this can be triggered every time + timeTracker.nextTimeToEmit = now; + timeTracker.advanceNextTimeToEmit(); + + // Window close time has not progressed, there will be no windows to close hence no records to emit + if (lastEmitWindowCloseTime != ConsumerRecord.NO_TIMESTAMP && lastEmitWindowCloseTime >= windowCloseTime) { + return; + } + + final long emitRangeUpperBoundInclusive = windowCloseTime - windows.size(); + // No window has ever closed and hence no need to emit any records + if (emitRangeUpperBoundInclusive < 0) { + return; + } + + + // Set emitRangeLowerBoundInclusive to -1L if lastEmitWindowCloseTime was not set so that + // we would fetch from 0L for the first time; otherwise set it to lastEmitWindowCloseTime - windows.size(). + // + // Note if we get here, it means emitRangeUpperBoundInclusive > 0, which means windowCloseTime > windows.size(), + // Because we always set lastEmitWindowCloseTime to windowCloseTime before, it means + // lastEmitWindowCloseTime - windows.size() should always > 0 + // As a result, emitRangeLowerBoundInclusive is always >= 0 + final long emitRangeLowerBoundInclusive = lastEmitWindowCloseTime == ConsumerRecord.NO_TIMESTAMP ? + -1L : lastEmitWindowCloseTime - windows.size(); + + if (lastEmitWindowCloseTime != ConsumerRecord.NO_TIMESTAMP) { + final Map matchedCloseWindows = windows.windowsFor(emitRangeUpperBoundInclusive); + final Map matchedEmitWindows = windows.windowsFor(emitRangeLowerBoundInclusive); + + // Don't fetch store if there is no new stores that are closed since the last time we emitted + if (matchedCloseWindows.equals(matchedEmitWindows)) { + log.trace("no new windows to emit. LastEmitCloseTime={}, newCloseTime={}", + lastEmitWindowCloseTime, windowCloseTime); + return; + } + } + + final long startMs = time.milliseconds(); + + final KeyValueIterator, ValueAndTimestamp> windowToEmit = windowStore + .fetchAll(emitRangeLowerBoundInclusive + 1, emitRangeUpperBoundInclusive); + + int emittedCount = 0; + while (windowToEmit.hasNext()) { + emittedCount++; + final KeyValue, ValueAndTimestamp> kv = windowToEmit.next(); + tupleForwarder.maybeForward( + record.withKey(kv.key) + .withValue(new Change<>(kv.value.value(), null)) + .withTimestamp(kv.value.timestamp()) + .withHeaders(record.headers())); + } + emittedRecordsSensor.record(emittedCount); + emitFinalLatencySensor.record(time.milliseconds() - startMs); + + lastEmitWindowCloseTime = windowCloseTime; + internalProcessorContext.addProcessorMetadataKeyValue(storeName, windowCloseTime); } } diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java index 7a82d0834..16d689099 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImpl.java @@ -20,12 +20,15 @@ import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.kstream.Aggregator; +import org.apache.kafka.streams.kstream.EmitStrategy; +import org.apache.kafka.streams.kstream.EmitStrategy.StrategyType; import org.apache.kafka.streams.kstream.Initializer; import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.Named; import org.apache.kafka.streams.kstream.Reducer; import org.apache.kafka.streams.kstream.TimeWindowedKStream; +import org.apache.kafka.streams.kstream.UnlimitedWindows; import org.apache.kafka.streams.kstream.Window; import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.kstream.Windows; @@ -39,6 +42,7 @@ import java.time.Duration; import java.util.Objects; import java.util.Set; +import org.apache.kafka.streams.state.internals.RocksDbIndexedTimeOrderedWindowBytesStoreSupplier; import static org.apache.kafka.streams.kstream.internals.KGroupedStreamImpl.AGGREGATE_NAME; import static org.apache.kafka.streams.kstream.internals.KGroupedStreamImpl.REDUCE_NAME; @@ -47,6 +51,7 @@ public class TimeWindowedKStreamImpl extends AbstractStr private final Windows windows; private final GroupedStreamAggregateBuilder aggregateBuilder; + private EmitStrategy emitStrategy = EmitStrategy.onWindowUpdate(); TimeWindowedKStreamImpl(final Windows windows, final InternalStreamsBuilder builder, @@ -107,7 +112,7 @@ private KTable, Long> doCount(final Named named, return aggregateBuilder.build( new NamedInternal(aggregateName), materialize(materializedInternal), - new KStreamWindowAggregate<>(windows, materializedInternal.storeName(), aggregateBuilder.countInitializer, aggregateBuilder.countAggregator), + new KStreamWindowAggregate<>(windows, materializedInternal.storeName(), emitStrategy, aggregateBuilder.countInitializer, aggregateBuilder.countAggregator), materializedInternal.queryableStoreName(), materializedInternal.keySerde() != null ? new FullTimeWindowedSerde<>(materializedInternal.keySerde(), windows.size()) : null, materializedInternal.valueSerde()); @@ -155,7 +160,7 @@ public KTable, VR> aggregate(final Initializer initializer, return aggregateBuilder.build( new NamedInternal(aggregateName), materialize(materializedInternal), - new KStreamWindowAggregate<>(windows, materializedInternal.storeName(), initializer, aggregator), + new KStreamWindowAggregate<>(windows, materializedInternal.storeName(), emitStrategy, initializer, aggregator), materializedInternal.queryableStoreName(), materializedInternal.keySerde() != null ? new FullTimeWindowedSerde<>(materializedInternal.keySerde(), windows.size()) : null, materializedInternal.valueSerde()); @@ -202,12 +207,22 @@ public KTable, V> reduce(final Reducer reducer, return aggregateBuilder.build( new NamedInternal(reduceName), materialize(materializedInternal), - new KStreamWindowAggregate<>(windows, materializedInternal.storeName(), aggregateBuilder.reduceInitializer, aggregatorForReducer(reducer)), + new KStreamWindowAggregate<>(windows, materializedInternal.storeName(), emitStrategy, aggregateBuilder.reduceInitializer, aggregatorForReducer(reducer)), materializedInternal.queryableStoreName(), materializedInternal.keySerde() != null ? new FullTimeWindowedSerde<>(materializedInternal.keySerde(), windows.size()) : null, materializedInternal.valueSerde()); } + //@Override + public TimeWindowedKStream emitStrategy(final EmitStrategy emitStrategy) { + if (this.windows instanceof UnlimitedWindows + && emitStrategy.type() == StrategyType.ON_WINDOW_CLOSE) { + throw new IllegalArgumentException("ON_WINDOW_CLOSE emit strategy cannot be used for UnlimitedWindows"); + } + this.emitStrategy = emitStrategy; + return this; + } + private StoreBuilder> materialize(final MaterializedInternal> materialized) { WindowBytesStoreSupplier supplier = (WindowBytesStoreSupplier) materialized.storeSupplier(); if (supplier == null) { @@ -232,11 +247,19 @@ private StoreBuilder> materialize(final Mater ); break; case ROCKS_DB: - supplier = Stores.persistentTimestampedWindowStore( - materialized.storeName(), - Duration.ofMillis(retentionPeriod), - Duration.ofMillis(windows.size()), - false + supplier = emitStrategy.type() == StrategyType.ON_WINDOW_CLOSE ? + RocksDbIndexedTimeOrderedWindowBytesStoreSupplier.create( + materialized.storeName(), + Duration.ofMillis(retentionPeriod), + Duration.ofMillis(windows.size()), + false, + false + ) : + Stores.persistentTimestampedWindowStore( + materialized.storeName(), + Duration.ofMillis(retentionPeriod), + Duration.ofMillis(windows.size()), + false ); break; default: diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimestampedTupleForwarder.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimestampedTupleForwarder.java index 49f2ab157..ed6cfefdc 100644 --- a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimestampedTupleForwarder.java +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/TimestampedTupleForwarder.java @@ -45,6 +45,14 @@ class TimestampedTupleForwarder { cachingEnabled = ((WrappedStateStore) store).setFlushListener(flushListener, sendOldValues); } + TimestampedTupleForwarder(final StateStore store, + final ProcessorContext> context, + final boolean sendOldValues) { + this.context = (InternalProcessorContext>) context; + this.sendOldValues = sendOldValues; + cachingEnabled = false; + } + public void maybeForward(final Record> record) { if (!cachingEnabled) { if (sendOldValues) { diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/emitstrategy/WindowCloseStrategy.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/emitstrategy/WindowCloseStrategy.java new file mode 100644 index 000000000..ddbf1090a --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/emitstrategy/WindowCloseStrategy.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.kstream.internals.emitstrategy; + +import org.apache.kafka.streams.kstream.EmitStrategy; + +/** + * An emit strategy which indicates only output when a window closes. + */ +public class WindowCloseStrategy implements EmitStrategy { + + @Override + public StrategyType type() { + return StrategyType.ON_WINDOW_CLOSE; + } + +} \ No newline at end of file diff --git a/streams/src/main/java/org/apache/kafka/streams/kstream/internals/emitstrategy/WindowUpdateStrategy.java b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/emitstrategy/WindowUpdateStrategy.java new file mode 100644 index 000000000..0f87ab22f --- /dev/null +++ b/streams/src/main/java/org/apache/kafka/streams/kstream/internals/emitstrategy/WindowUpdateStrategy.java @@ -0,0 +1,30 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.kstream.internals.emitstrategy; + +import org.apache.kafka.streams.kstream.EmitStrategy; + +/** + * An emit strategy which indicates output everytime when a window gets an update. + */ +public class WindowUpdateStrategy implements EmitStrategy { + + @Override + public StrategyType type() { + return StrategyType.ON_WINDOW_UPDATE; + } +} \ No newline at end of file diff --git a/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ProcessorNodeMetrics.java b/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ProcessorNodeMetrics.java index 231d9a627..8dcd265a2 100644 --- a/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ProcessorNodeMetrics.java +++ b/streams/src/main/java/org/apache/kafka/streams/processor/internals/metrics/ProcessorNodeMetrics.java @@ -21,6 +21,9 @@ import java.util.Map; +import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.AVG_LATENCY_DESCRIPTION; +import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.LATENCY_SUFFIX; +import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.MAX_LATENCY_DESCRIPTION; import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.PROCESSOR_NODE_LEVEL_GROUP; import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.RECORD_E2E_LATENCY; import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.RECORD_E2E_LATENCY_AVG_DESCRIPTION; @@ -31,6 +34,8 @@ import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.TOTAL_DESCRIPTION; import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.addAvgAndMinAndMaxToSensor; import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.addInvocationRateAndCountToSensor; +import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.addRateOfSumAndSumMetricsToSensor; +import static org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl.addAvgAndMaxToSensor; public class ProcessorNodeMetrics { private ProcessorNodeMetrics() {} @@ -62,6 +67,17 @@ private ProcessorNodeMetrics() {} private static final String FORWARD_RATE_DESCRIPTION = RATE_DESCRIPTION_PREFIX + FORWARD_DESCRIPTION + RATE_DESCRIPTION_SUFFIX; + private static final String EMITTED_RECORDS = "window-aggregate-final-emit"; + private static final String EMITTED_RECORDS_DESCRIPTION = "emit final records"; + private static final String EMITTED_RECORDS_TOTAL_DESCRIPTION = TOTAL_DESCRIPTION + EMITTED_RECORDS_DESCRIPTION; + private static final String EMITTED_RECORDS_RATE_DESCRIPTION = + RATE_DESCRIPTION_PREFIX + EMITTED_RECORDS_DESCRIPTION + RATE_DESCRIPTION_SUFFIX; + + private static final String EMIT_FINAL_LATENCY = EMITTED_RECORDS + LATENCY_SUFFIX; + private static final String EMIT_FINAL_DESCRIPTION = "calls to emit final"; + private static final String EMIT_FINAL_AVG_LATENCY_DESCRIPTION = AVG_LATENCY_DESCRIPTION + EMIT_FINAL_DESCRIPTION; + private static final String EMIT_FINAL_MAX_LATENCY_DESCRIPTION = MAX_LATENCY_DESCRIPTION + EMIT_FINAL_DESCRIPTION; + public static Sensor suppressionEmitSensor(final String threadId, final String taskId, final String processorNodeId, @@ -165,6 +181,42 @@ public static Sensor e2ELatencySensor(final String threadId, return sensor; } + public static Sensor emitFinalLatencySensor(final String threadId, + final String taskId, + final String processorNodeId, + final StreamsMetricsImpl streamsMetrics) { + final String sensorName = processorNodeId + "-" + EMIT_FINAL_LATENCY; + final Sensor sensor = streamsMetrics.nodeLevelSensor(threadId, taskId, processorNodeId, sensorName, RecordingLevel.DEBUG); + final Map tagMap = streamsMetrics.nodeLevelTagMap(threadId, taskId, processorNodeId); + addAvgAndMaxToSensor( + sensor, + PROCESSOR_NODE_LEVEL_GROUP, + tagMap, + EMIT_FINAL_LATENCY, + EMIT_FINAL_AVG_LATENCY_DESCRIPTION, + EMIT_FINAL_MAX_LATENCY_DESCRIPTION + ); + return sensor; + } + + public static Sensor emittedRecordsSensor(final String threadId, + final String taskId, + final String processorNodeId, + final StreamsMetricsImpl streamsMetrics) { + final String sensorName = processorNodeId + "-" + EMITTED_RECORDS; + final Sensor sensor = streamsMetrics.nodeLevelSensor(threadId, taskId, processorNodeId, sensorName, RecordingLevel.DEBUG); + final Map tagMap = streamsMetrics.nodeLevelTagMap(threadId, taskId, processorNodeId); + addRateOfSumAndSumMetricsToSensor( + sensor, + PROCESSOR_NODE_LEVEL_GROUP, + tagMap, + EMITTED_RECORDS, + EMITTED_RECORDS_RATE_DESCRIPTION, + EMITTED_RECORDS_TOTAL_DESCRIPTION + ); + return sensor; + } + private static Sensor throughputParentSensor(final String threadId, final String taskId, final String metricNamePrefix, @@ -207,4 +259,6 @@ private static Sensor throughputSensor(final String threadId, ); return sensor; } + + } diff --git a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowStore.java b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowStore.java index ceabd15e5..4f2587d1e 100644 --- a/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowStore.java +++ b/streams/src/main/java/org/apache/kafka/streams/state/internals/RocksDBTimeOrderedWindowStore.java @@ -21,6 +21,10 @@ import org.apache.kafka.streams.kstream.Windowed; import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.processor.StateStoreContext; +import org.apache.kafka.streams.query.PositionBound; +import org.apache.kafka.streams.query.Query; +import org.apache.kafka.streams.query.QueryConfig; +import org.apache.kafka.streams.query.QueryResult; import org.apache.kafka.streams.state.KeyValueIterator; import org.apache.kafka.streams.state.TimestampedBytesStore; import org.apache.kafka.streams.state.WindowStore; @@ -34,6 +38,8 @@ public class RocksDBTimeOrderedWindowStore private final boolean retainDuplicates; private final long windowSize; + + private StateStoreContext stateStoreContext; private int seqnum = 0; RocksDBTimeOrderedWindowStore( @@ -49,6 +55,7 @@ public class RocksDBTimeOrderedWindowStore @Override public void init(final StateStoreContext context, final StateStore root) { + stateStoreContext = context; wrapped().init(context, root); } @@ -168,6 +175,21 @@ public boolean hasIndex() { return wrapped().hasIndex(); } + @Override + public QueryResult query(final Query query, + final PositionBound positionBound, + final QueryConfig config) { + + return StoreQueryUtils.handleBasicQueries( + query, + positionBound, + config, + this, + getPosition(), + stateStoreContext + ); + } + private void maybeUpdateSeqnumForDups() { if (retainDuplicates) { seqnum = (seqnum + 1) & 0x7FFFFFFF; diff --git a/streams/src/test/java/org/apache/kafka/streams/integration/TimeWindowedKStreamIntegrationTest.java b/streams/src/test/java/org/apache/kafka/streams/integration/TimeWindowedKStreamIntegrationTest.java new file mode 100644 index 000000000..9abefce6a --- /dev/null +++ b/streams/src/test/java/org/apache/kafka/streams/integration/TimeWindowedKStreamIntegrationTest.java @@ -0,0 +1,509 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.integration; + +import java.util.Collection; +import java.util.Optional; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serde; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.Serdes.StringSerde; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.streams.KafkaStreams; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.KeyValueTimestamp; +import org.apache.kafka.streams.StreamsConfig.InternalConfig; +import org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster; +import org.apache.kafka.streams.integration.utils.IntegrationTestUtils; +import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.EmitStrategy; +import org.apache.kafka.streams.kstream.EmitStrategy.StrategyType; +import org.apache.kafka.streams.kstream.JoinWindows; +import org.apache.kafka.streams.kstream.KStream; +import org.apache.kafka.streams.kstream.Materialized; +import org.apache.kafka.streams.kstream.Produced; +import org.apache.kafka.streams.kstream.SessionWindowedDeserializer; +import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; +import org.apache.kafka.streams.kstream.TimeWindows; +import org.apache.kafka.streams.kstream.UnlimitedWindows; +import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.kstream.WindowedSerdes; +import org.apache.kafka.streams.kstream.internals.TimeWindow; +import org.apache.kafka.streams.kstream.internals.TimeWindowedKStreamImpl; +import org.apache.kafka.test.IntegrationTest; +import org.apache.kafka.test.MockAggregator; +import org.apache.kafka.test.MockInitializer; +import org.apache.kafka.test.TestUtils; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.TestName; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; + +import static java.time.Duration.ofMillis; +import static java.time.Instant.ofEpochMilli; +import static java.util.Arrays.asList; +import static org.apache.kafka.common.utils.Utils.mkEntry; +import static org.apache.kafka.common.utils.Utils.mkMap; +import static org.apache.kafka.common.utils.Utils.mkProperties; +import static org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertThrows; + +@SuppressWarnings({"unchecked"}) +@Category({IntegrationTest.class}) +@RunWith(Parameterized.class) +public class TimeWindowedKStreamIntegrationTest { + private static final int NUM_BROKERS = 1; + + public static final EmbeddedKafkaCluster CLUSTER = new EmbeddedKafkaCluster(NUM_BROKERS, + mkProperties( + mkMap(mkEntry("log.retention.hours", "-1"), mkEntry("log.retention.bytes", "-1")) // Don't expire records since we manipulate timestamp + ) + ); + + @BeforeClass + public static void startCluster() throws IOException { + CLUSTER.start(); + } + + @AfterClass + public static void closeCluster() { + CLUSTER.stop(); + } + + + private StreamsBuilder builder; + private Properties streamsConfiguration; + private KafkaStreams kafkaStreams; + private String streamOneInput; + private String streamTwoInput; + private String outputTopic; + + @Rule + public TestName testName = new TestName(); + + @Parameter + public StrategyType type; + + @Parameter(1) + public boolean withCache; + + @Parameter(2) + public EmitStrategy emitStrategy; + + private boolean emitFinal; + + @Parameterized.Parameters(name = "{0}_{1}") + public static Collection getEmitStrategy() { + return asList(new Object[][] { + {StrategyType.ON_WINDOW_UPDATE, true, EmitStrategy.onWindowUpdate()}, + {StrategyType.ON_WINDOW_UPDATE, false, EmitStrategy.onWindowUpdate()}, + {StrategyType.ON_WINDOW_CLOSE, true, EmitStrategy.onWindowClose()}, + {StrategyType.ON_WINDOW_CLOSE, false, EmitStrategy.onWindowClose()} + }); + } + + @Before + public void before() throws InterruptedException { + builder = new StreamsBuilder(); + createTopics(); + streamsConfiguration = new Properties(); + final String safeTestName = safeUniqueTestName(getClass(), testName); + streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "app-" + safeTestName); + streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); + streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()); + streamsConfiguration.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, 0); + streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L); + streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass()); + streamsConfiguration.put(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 0); // Always process + streamsConfiguration.put(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, Long.MAX_VALUE); // Don't expire changelog + + emitFinal = emitStrategy.type() == StrategyType.ON_WINDOW_CLOSE; + } + + @After + public void whenShuttingDown() throws IOException { + if (kafkaStreams != null) { + kafkaStreams.close(); + kafkaStreams.cleanUp(); + } + IntegrationTestUtils.purgeLocalStreamsState(streamsConfiguration); + } + + @Test + public void shouldAggregateWindowedWithNoGrace() throws Exception { + produceMessages( + streamOneInput, + new KeyValueTimestamp<>("A", "1", 0), + new KeyValueTimestamp<>("A", "2", 5), + new KeyValueTimestamp<>("A", "3", 10), // close [0, 10) + new KeyValueTimestamp<>("B", "4", 6), // late and skip for [0, 10) + new KeyValueTimestamp<>("B", "5", 11), + new KeyValueTimestamp<>("B", "6", 15), // close [5, 15) + new KeyValueTimestamp<>("C", "7", 25) // close [10, 20), [15, 25) + ); + + final Serde> windowedSerde = WindowedSerdes.timeWindowedSerdeFrom(String.class, 10L); + // TODO: remove this cast https://issues.apache.org/jira/browse/KAFKA-13800 + final TimeWindowedKStreamImpl windowedStream = (TimeWindowedKStreamImpl) builder + .stream(streamOneInput, Consumed.with(Serdes.String(), Serdes.String())) + .groupByKey() + .windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(10L)).advanceBy(ofMillis(5L))); + windowedStream.emitStrategy(emitStrategy) + .aggregate( + MockInitializer.STRING_INIT, + MockAggregator.TOSTRING_ADDER, + getMaterialized() + ) + .toStream() + .to(outputTopic, Produced.with(windowedSerde, new StringSerde())); + + startStreams(); + + final List, String>> windowedMessages = receiveMessagesWithTimestamp( + new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), + new StringDeserializer(), + 10L, + String.class, + emitFinal ? 6 : 12); + + final List, String>> expectResult; + if (emitFinal) { + expectResult = asList( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+1+2", 5), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+2+3", 10), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+4+5", 11), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(10L, 20L)), "0+3", 10), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), "0+5+6", 15), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(15L, 25L)), "0+6", 15) + ); + } else { + expectResult = asList( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+1", 0), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+1+2", 5), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+2", 5), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+2+3", 10), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(10L, 20L)), "0+3", 10), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+4", 6), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+4+5", 11), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), "0+5", 11), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), "0+5+6", 15), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(15L, 25L)), "0+6", 15), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(20L, 30L)), "0+7", 25), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(25L, 35L)), "0+7", 25) + ); + } + + assertThat(windowedMessages, is(expectResult)); + } + + @Test + public void shouldAggregateWindowedWithGrace() throws Exception { + produceMessages( + streamOneInput, + new KeyValueTimestamp<>("A", "1", 0), + new KeyValueTimestamp<>("A", "2", 5), + new KeyValueTimestamp<>("A", "3", 10), + new KeyValueTimestamp<>("B", "4", 6), + new KeyValueTimestamp<>("B", "5", 11), + new KeyValueTimestamp<>("B", "6", 15), // close [0, 10), output A, B [0, 10) + new KeyValueTimestamp<>("C", "7", 25) // close [5, 15), [10, 20) + ); + + final Serde> windowedSerde = WindowedSerdes.timeWindowedSerdeFrom(String.class, 10L); + final TimeWindowedKStreamImpl windowedStream = (TimeWindowedKStreamImpl) builder + .stream(streamOneInput, Consumed.with(Serdes.String(), Serdes.String())) + .groupByKey() + .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10L), ofMillis(5)).advanceBy(ofMillis(5L))); + windowedStream.emitStrategy(emitStrategy) + .aggregate( + MockInitializer.STRING_INIT, + MockAggregator.TOSTRING_ADDER, + getMaterialized() + ) + .toStream() + .to(outputTopic, Produced.with(windowedSerde, new StringSerde())); + + startStreams(); + + final List, String>> windowedMessages = receiveMessagesWithTimestamp( + new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), + new StringDeserializer(), + 10L, + String.class, + emitFinal ? 6 : 13); + + final List, String>> expectResult; + if (emitFinal) { + expectResult = asList( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+1+2", 5), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0L, 10L)), "0+4", 6), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+2+3", 10), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+4+5", 11), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(10L, 20L)), "0+3", 10), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), "0+5+6", 15) + ); + } else { + expectResult = asList( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+1", 0), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+1+2", 5), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+2", 5), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+2+3", 10), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(10L, 20L)), "0+3", 10), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0L, 10L)), "0+4", 6), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+4", 6), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+4+5", 11), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), "0+5", 11), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), "0+5+6", 15), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(15L, 25L)), "0+6", 15), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(20L, 30L)), "0+7", 25), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(25L, 35L)), "0+7", 25) + ); + } + + assertThat(windowedMessages, is(expectResult)); + } + + @Test + public void shouldRestoreAfterJoinRestart() throws Exception { + produceMessages( + streamOneInput, + new KeyValueTimestamp<>("A", "L1", 0), + new KeyValueTimestamp<>("A", "L1", 5), + new KeyValueTimestamp<>("B", "L2", 11), // close [0, 10) + new KeyValueTimestamp<>("B", "L2", 15), // close [5, 15) + new KeyValueTimestamp<>("C", "L3", 25) // close [15, 25), [10, 20) + ); + + produceMessages( + streamTwoInput, + new KeyValueTimestamp<>("A", "R1", 0), + new KeyValueTimestamp<>("A", "R1", 5), + new KeyValueTimestamp<>("B", "R2", 11), // close [0, 10) + new KeyValueTimestamp<>("B", "R2", 15), // close [5, 15) + new KeyValueTimestamp<>("C", "R3", 25) // close [15, 25), [10, 20) + ); + + final Serde> windowedSerde = WindowedSerdes.timeWindowedSerdeFrom( + String.class, 10L); + final KStream streamOne = builder.stream(streamOneInput, + Consumed.with(Serdes.String(), Serdes.String())); + final KStream streamTwo = builder.stream(streamTwoInput, + Consumed.with(Serdes.String(), Serdes.String())); + + final KStream joinedStream = streamOne + .join(streamTwo, (v1, v2) -> v1 + "," + v2, + JoinWindows.ofTimeDifferenceWithNoGrace(ofMillis(2))); + + final TimeWindowedKStreamImpl windowedStream = (TimeWindowedKStreamImpl) joinedStream + .groupByKey() + .windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(10L)).advanceBy(ofMillis(5L))); + + windowedStream.emitStrategy(emitStrategy) + .aggregate( + MockInitializer.STRING_INIT, + MockAggregator.TOSTRING_ADDER, + getMaterialized() + ) + .toStream() + .to(outputTopic, Produced.with(windowedSerde, new StringSerde())); + + startStreams(); + + List, String>> windowedMessages = receiveMessagesWithTimestamp( + new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), + new StringDeserializer(), + 10L, + String.class, + emitFinal ? 5 : 9); + + List, String>> expectResult; + if (emitFinal) { + expectResult = asList( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), + "0+L1,R1+L1,R1", 5), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+L1,R1", + 5), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+L2,R2", + 11), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), + "0+L2,R2+L2,R2", 15), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(15L, 25L)), + "0+L2,R2", 15) + ); + } else { + expectResult = asList( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), "0+L1,R1", + 0), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0L, 10L)), + "0+L1,R1+L1,R1", 5), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5L, 15L)), "0+L1,R1", + 5), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5L, 15L)), "0+L2,R2", + 11), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), + "0+L2,R2", 11), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10L, 20L)), + "0+L2,R2+L2,R2", 15), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(15L, 25L)), + "0+L2,R2", 15), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(20L, 30L)), + "0+L3,R3", 25), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(25L, 35L)), + "0+L3,R3", 25) + ); + } + + assertThat(windowedMessages, is(expectResult)); + + kafkaStreams.close(); + kafkaStreams.cleanUp(); // Purge store to force restoration + + produceMessages( + streamOneInput, + new KeyValueTimestamp<>("C", "L3", 35) // close [20, 30), [25, 35) + ); + produceMessages( + streamTwoInput, + new KeyValueTimestamp<>("C", "R3", 35) // close [20, 30), [25, 35) + ); + + // Restart + startStreams(); + + windowedMessages = receiveMessagesWithTimestamp( + new TimeWindowedDeserializer<>(new StringDeserializer(), 10L), + new StringDeserializer(), + 10L, + String.class, + 2); + + if (emitFinal) { + // Output just new closed window for C + expectResult = asList( + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(20L, 30L)), + "0+L3,R3", 25), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(25L, 35L)), + "0+L3,R3", 25) + ); + } else { + expectResult = asList( + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(30L, 40L)), + "0+L3,R3", 35), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(35L, 45L)), + "0+L3,R3", 35) + ); + } + + assertThat(windowedMessages, is(expectResult)); + } + + @Test + public void shouldThrowUnlimitedWindows() { + final Serde> windowedSerde = WindowedSerdes.timeWindowedSerdeFrom(String.class, 10L); + final TimeWindowedKStreamImpl windowedStream = (TimeWindowedKStreamImpl) builder + .stream(streamOneInput, Consumed.with(Serdes.String(), Serdes.String())) + .groupByKey() + .windowedBy( + UnlimitedWindows.of().startOn(ofEpochMilli(0)) + ); + + if (emitFinal) { + assertThrows(IllegalArgumentException.class, () -> windowedStream.emitStrategy(emitStrategy)); + } else { + windowedStream.emitStrategy(emitStrategy); + } + } + + + private void produceMessages(final String topic, final KeyValueTimestamp... records) { + IntegrationTestUtils.produceSynchronously( + TestUtils.producerConfig( + CLUSTER.bootstrapServers(), + StringSerializer.class, + StringSerializer.class), + false, + topic, + Optional.empty(), + Arrays.asList(records) + ); + } + + private Materialized getMaterialized() { + if (withCache) { + return Materialized.with(null, new StringSerde()).withCachingEnabled(); + } + return Materialized.with(null, new StringSerde()).withCachingDisabled(); + } + + private void createTopics() throws InterruptedException { + final String safeTestName = safeUniqueTestName(getClass(), testName); + streamOneInput = "stream-one-" + safeTestName; + streamTwoInput = "stream-two-" + safeTestName; + outputTopic = "output-" + safeTestName; + CLUSTER.createTopic(streamOneInput, 1, 1); + CLUSTER.createTopic(streamTwoInput, 1, 1); + CLUSTER.createTopic(outputTopic); + } + + private void startStreams() { + kafkaStreams = new KafkaStreams(builder.build(), streamsConfiguration); + kafkaStreams.start(); + } + + private List> receiveMessagesWithTimestamp(final Deserializer keyDeserializer, + final Deserializer valueDeserializer, + final long windowSize, + final Class innerClass, + final int numMessages) throws Exception { + final String safeTestName = safeUniqueTestName(getClass(), testName); + final Properties consumerProperties = new Properties(); + consumerProperties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); + consumerProperties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group-" + safeTestName); + consumerProperties.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + consumerProperties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass().getName()); + consumerProperties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass().getName()); + consumerProperties.put(StreamsConfig.WINDOW_SIZE_MS_CONFIG, windowSize); + if (keyDeserializer instanceof TimeWindowedDeserializer || keyDeserializer instanceof SessionWindowedDeserializer) { + consumerProperties.setProperty(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, + Serdes.serdeFrom(innerClass).getClass().getName()); + } + return IntegrationTestUtils.waitUntilMinKeyValueWithTimestampRecordsReceived( + consumerProperties, + outputTopic, + numMessages, + 60 * 1000); + } +} diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java index 750f7f508..d6fadd5dc 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/KStreamWindowAggregateTest.java @@ -16,38 +16,62 @@ */ package org.apache.kafka.streams.kstream.internals; +import java.io.File; +import java.io.IOException; +import java.util.Collection; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; +import org.apache.kafka.common.utils.Utils; import org.apache.kafka.streams.KeyValue; import org.apache.kafka.streams.KeyValueTimestamp; import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig.InternalConfig; import org.apache.kafka.streams.TestOutputTopic; import org.apache.kafka.streams.TopologyTestDriver; import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.EmitStrategy; +import org.apache.kafka.streams.kstream.EmitStrategy.StrategyType; import org.apache.kafka.streams.kstream.Grouped; -import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.KTable; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.TimeWindows; +import org.apache.kafka.streams.kstream.UnlimitedWindows; import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.kstream.Windows; +import org.apache.kafka.streams.processor.StateStore; +import org.apache.kafka.streams.processor.TaskId; +import org.apache.kafka.streams.processor.api.MockProcessorContext.CapturedForward; +import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.ProcessorNode; import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; +import org.apache.kafka.streams.state.Stores; +import org.apache.kafka.streams.state.TimestampedWindowStore; +import org.apache.kafka.streams.state.WindowBytesStoreSupplier; import org.apache.kafka.streams.state.WindowStore; import org.apache.kafka.streams.TestInputTopic; +import org.apache.kafka.streams.state.internals.RocksDbIndexedTimeOrderedWindowBytesStoreSupplier; import org.apache.kafka.streams.test.TestRecord; import org.apache.kafka.test.MockAggregator; import org.apache.kafka.test.MockApiProcessor; import org.apache.kafka.test.MockApiProcessorSupplier; import org.apache.kafka.test.MockInitializer; +import org.apache.kafka.test.MockInternalNewProcessorContext; import org.apache.kafka.test.StreamsTestUtils; +import org.apache.kafka.test.TestUtils; import org.hamcrest.Matcher; +import org.junit.Before; import org.junit.Test; import java.time.Duration; import java.util.List; import java.util.Properties; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; import static java.time.Duration.ofMillis; import static java.util.Arrays.asList; @@ -60,22 +84,58 @@ import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.MatcherAssert.assertThat; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; +@RunWith(Parameterized.class) public class KStreamWindowAggregateTest { + private static final String WINDOW_STORE_NAME = "dummy-store-name"; private final Properties props = StreamsTestUtils.getStreamsConfig(Serdes.String(), Serdes.String()); private final String threadId = Thread.currentThread().getName(); + @Parameter + public StrategyType type; + + @Parameter(1) + public boolean withCache; + + @Parameter(2) + public EmitStrategy emitStrategy; + + private boolean emitFinal; + + @Parameterized.Parameters(name = "{0}_{1}") + public static Collection getEmitStrategy() { + return asList(new Object[][] { + {StrategyType.ON_WINDOW_UPDATE, true, EmitStrategy.onWindowUpdate()}, + {StrategyType.ON_WINDOW_UPDATE, false, EmitStrategy.onWindowUpdate()}, + {StrategyType.ON_WINDOW_CLOSE, true, EmitStrategy.onWindowClose()}, + {StrategyType.ON_WINDOW_CLOSE, false, EmitStrategy.onWindowClose()} + }); + } + + @Before + public void before() { + emitFinal = type.equals(StrategyType.ON_WINDOW_CLOSE); + // Set interval to 0 so that it always tries to emit + props.setProperty(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, "0"); + } + + @SuppressWarnings("unchecked") @Test public void testAggBasic() { final StreamsBuilder builder = new StreamsBuilder(); final String topic1 = "topic1"; - final KTable, String> table2 = builder + // TODO: remove this cast after we add emitStrategy to public api + final TimeWindowedKStreamImpl windowedStream = (TimeWindowedKStreamImpl) builder .stream(topic1, Consumed.with(Serdes.String(), Serdes.String())) .groupByKey(Grouped.with(Serdes.String(), Serdes.String())) - .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(100)).advanceBy(ofMillis(5))) - .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.>as("topic1-Canonized").withValueSerde(Serdes.String())); + .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(100)).advanceBy(ofMillis(5))); + + final KTable, String> table2 = windowedStream + .emitStrategy(emitStrategy) + .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, setMaterializedCache(Materialized.>as("topic1-Canonized").withValueSerde(Serdes.String()))); final MockApiProcessorSupplier, String, Void, Void> supplier = new MockApiProcessorSupplier<>(); table2.toStream().process(supplier); @@ -106,63 +166,89 @@ public void testAggBasic() { inputTopic1.pipeInput("B", "3", 9L); } - assertEquals( - asList( - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1", 0), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2", 1), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3", 2), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4", 3), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1", 4), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1+1", 5), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1", 5), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2", 6), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2", 6), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4+4", 7), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4", 7), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2", 8), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2", 8), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3+3", 9), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(5, 15)), "0+3", 9), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1", 10), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(10, 20)), "0+1", 10), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2+2", 11), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10, 20)), "0+2", 11), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4+4", 12), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(10, 20)), "0+4", 12), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2+2+2", 13), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10, 20)), "0+2+2", 13), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(5, 15)), "0+3+3", 14), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(10, 20)), "0+3", 14), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2+1", 8), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2+1+2", 8), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2+1+2+3", 9), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2+2+2+3", 13) + if (emitFinal) { + // Nothing processed since grace is 100L and no window closes + assertTrue(supplier.theCapturedProcessor().processed().isEmpty()); + } else { + assertEquals( + asList( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1", 0), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2", 1), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3", 2), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4", 3), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1", 4), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1+1", + 5), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1", 5), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2", 6), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2", 6), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4+4", 7), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4", 7), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2", + 8), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2", 8), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3+3", 9), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(5, 15)), "0+3", 9), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1", + 10), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(10, 20)), "0+1", 10), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2+2", + 11), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10, 20)), "0+2", 11), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4+4", + 12), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(10, 20)), "0+4", 12), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2+2+2", + 13), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(10, 20)), "0+2+2", + 13), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(5, 15)), "0+3+3", + 14), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(10, 20)), "0+3", 14), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2+1", + 8), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), + "0+2+2+2+1+2", 8), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), + "0+2+2+2+1+2+3", 9), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), + "0+2+2+2+2+3", 13) ), - supplier.theCapturedProcessor().processed() - ); + supplier.theCapturedProcessor().processed() + ); + } } + @SuppressWarnings("unchecked") @Test public void testJoin() { final StreamsBuilder builder = new StreamsBuilder(); final String topic1 = "topic1"; final String topic2 = "topic2"; + final long grace = emitFinal ? 5L : 100L; - final KTable, String> table1 = builder + // TODO: remove this cast after we add emitStrategy to public api + final TimeWindowedKStreamImpl windowedStream1 = (TimeWindowedKStreamImpl) builder .stream(topic1, Consumed.with(Serdes.String(), Serdes.String())) .groupByKey(Grouped.with(Serdes.String(), Serdes.String())) - .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(100)).advanceBy(ofMillis(5))) - .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.>as("topic1-Canonized").withValueSerde(Serdes.String())); + .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(grace)).advanceBy(ofMillis(5))); + + final KTable, String> table1 = windowedStream1 + .emitStrategy(emitStrategy) + .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, setMaterializedCache(Materialized.>as("topic1-Canonized").withValueSerde(Serdes.String()))); final MockApiProcessorSupplier, String, Void, Void> supplier = new MockApiProcessorSupplier<>(); table1.toStream().process(supplier); - final KTable, String> table2 = builder + final TimeWindowedKStreamImpl windowedStream2 = (TimeWindowedKStreamImpl) builder .stream(topic2, Consumed.with(Serdes.String(), Serdes.String())) .groupByKey(Grouped.with(Serdes.String(), Serdes.String())) - .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(100)).advanceBy(ofMillis(5))) - .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, Materialized.>as("topic2-Canonized").withValueSerde(Serdes.String())); + .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(grace)).advanceBy(ofMillis(5))); + + final KTable, String> table2 = windowedStream2 + .emitStrategy(emitStrategy) + .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, setMaterializedCache(Materialized.>as("topic2-Canonized").withValueSerde(Serdes.String()))); table2.toStream().process(supplier); table1.join(table2, (p1, p2) -> p1 + "%" + p2).toStream().process(supplier); @@ -172,110 +258,221 @@ public void testJoin() { driver.createInputTopic(topic1, new StringSerializer(), new StringSerializer()); final TestInputTopic inputTopic2 = driver.createInputTopic(topic2, new StringSerializer(), new StringSerializer()); - inputTopic1.pipeInput("A", "1", 0L); - inputTopic1.pipeInput("B", "2", 1L); - inputTopic1.pipeInput("C", "3", 2L); - inputTopic1.pipeInput("D", "4", 3L); - inputTopic1.pipeInput("A", "1", 9L); - final List, String, Void, Void>> processors = supplier.capturedProcessors(3); + if (emitFinal) { + processEmitFinalJoin(inputTopic1, inputTopic2, supplier); + } else { + processEmitUpdateJoin(inputTopic1, inputTopic2, supplier); + } + } + } - processors.get(0).checkAndClearProcessResult( - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1", 0), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2", 1), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3", 2), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4", 3), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1", 9), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1", 9) - ); - processors.get(1).checkAndClearProcessResult(); - processors.get(2).checkAndClearProcessResult(); + private void processEmitFinalJoin(final TestInputTopic inputTopic1, + final TestInputTopic inputTopic2, + final MockApiProcessorSupplier, String, Void, Void> supplier) { + inputTopic1.pipeInput("A", "1", 0L); + inputTopic1.pipeInput("B", "2", 1L); + inputTopic1.pipeInput("C", "3", 2L); + inputTopic1.pipeInput("D", "4", 3L); + inputTopic1.pipeInput("A", "1", 9L); + inputTopic1.pipeInput("A", "1", 15L); - inputTopic1.pipeInput("A", "1", 5L); - inputTopic1.pipeInput("B", "2", 6L); - inputTopic1.pipeInput("D", "4", 7L); - inputTopic1.pipeInput("B", "2", 8L); - inputTopic1.pipeInput("C", "3", 9L); + final List, String, Void, Void>> processors = supplier.capturedProcessors( + 3); - processors.get(0).checkAndClearProcessResult( - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1+1", 9), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1", 9), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2", 6), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2", 6), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4+4", 7), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4", 7), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2", 8), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2", 8), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3+3", 9), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(5, 15)), "0+3", 9) - ); - processors.get(1).checkAndClearProcessResult(); - processors.get(2).checkAndClearProcessResult(); - - inputTopic2.pipeInput("A", "a", 0L); - inputTopic2.pipeInput("B", "b", 1L); - inputTopic2.pipeInput("C", "c", 2L); - inputTopic2.pipeInput("D", "d", 20L); - inputTopic2.pipeInput("A", "a", 20L); - - processors.get(0).checkAndClearProcessResult(); - processors.get(1).checkAndClearProcessResult( - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+a", 0), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+b", 1), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+c", 2), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(15, 25)), "0+d", 20), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(20, 30)), "0+d", 20), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(15, 25)), "0+a", 20), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(20, 30)), "0+a", 20) - ); - processors.get(2).checkAndClearProcessResult( - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1+1%0+a", 9), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2%0+b", 8), - new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3+3%0+c", 9)); - - inputTopic2.pipeInput("A", "a", 5L); - inputTopic2.pipeInput("B", "b", 6L); - inputTopic2.pipeInput("D", "d", 7L); - inputTopic2.pipeInput("D", "d", 18L); - inputTopic2.pipeInput("A", "a", 21L); - - processors.get(0).checkAndClearProcessResult(); - processors.get(1).checkAndClearProcessResult( - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+a+a", 5), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+a", 5), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+b+b", 6), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+b", 6), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+d", 7), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+d", 7), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(10, 20)), "0+d", 18), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(15, 25)), "0+d+d", 20), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(15, 25)), "0+a+a", 21), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(20, 30)), "0+a+a", 21) - ); - processors.get(2).checkAndClearProcessResult( - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1+1%0+a+a", 9), - new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1%0+a", 9), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2%0+b+b", 8), - new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2%0+b", 8), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4+4%0+d", 7), - new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4%0+d", 7) - ); - } + processors.get(0).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1", 9), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2", 1), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3", 2), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4", 3) + ); + processors.get(1).checkAndClearProcessResult(); + processors.get(2).checkAndClearProcessResult(); + + inputTopic1.pipeInput("A", "1", 10L); + inputTopic1.pipeInput("B", "2", 11L); + inputTopic1.pipeInput("D", "4", 12L); + inputTopic1.pipeInput("B", "2", 13L); + inputTopic1.pipeInput("C", "3", 14L); + inputTopic1.pipeInput("A", "1", 20L); + + processors.get(0).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1", 10), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2", 13), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(5, 15)), "0+3", 14), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4", 12) + ); + processors.get(1).checkAndClearProcessResult(); + processors.get(2).checkAndClearProcessResult(); + + inputTopic2.pipeInput("A", "a", 0L); + inputTopic2.pipeInput("B", "b", 1L); + inputTopic2.pipeInput("C", "c", 2L); + inputTopic2.pipeInput("D", "d", 10L); + inputTopic2.pipeInput("A", "a", 15L); + + processors.get(0).checkAndClearProcessResult(); + processors.get(1).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+a", 0), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+b", 1), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+c", 2) + ); + processors.get(2).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), + "0+1+1%0+a", 9), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), + "0+2%0+b", 1), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3%0+c", + 2)); + + inputTopic2.pipeInput("A", "a", 5L); + inputTopic2.pipeInput("B", "b", 6L); + inputTopic2.pipeInput("D", "d", 7L); + inputTopic2.pipeInput("D", "d", 18L); + inputTopic2.pipeInput("A", "a", 21L); + + processors.get(0).checkAndClearProcessResult(); + processors.get(1).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+a", 5), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+b", 6), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+d+d", 10) + ); + processors.get(2).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1%0+a", + 10), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2%0+b", + 13), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4%0+d+d", + 12) + ); } + private void processEmitUpdateJoin(final TestInputTopic inputTopic1, + final TestInputTopic inputTopic2, + final MockApiProcessorSupplier, String, Void, Void> supplier) { + inputTopic1.pipeInput("A", "1", 0L); + inputTopic1.pipeInput("B", "2", 1L); + inputTopic1.pipeInput("C", "3", 2L); + inputTopic1.pipeInput("D", "4", 3L); + inputTopic1.pipeInput("A", "1", 9L); + + final List, String, Void, Void>> processors = supplier.capturedProcessors( + 3); + + processors.get(0).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1", 0), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2", 1), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3", 2), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4", 3), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1", 9), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1", 9) + ); + processors.get(1).checkAndClearProcessResult(); + processors.get(2).checkAndClearProcessResult(); + + inputTopic1.pipeInput("A", "1", 5L); + inputTopic1.pipeInput("B", "2", 6L); + inputTopic1.pipeInput("D", "4", 7L); + inputTopic1.pipeInput("B", "2", 8L); + inputTopic1.pipeInput("C", "3", 9L); + + processors.get(0).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+1+1+1", + 9), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1", 9), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2", 6), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2", 6), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4+4", 7), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4", 7), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+2+2+2", + 8), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2", 8), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3+3", 9), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(5, 15)), "0+3", 9) + ); + processors.get(1).checkAndClearProcessResult(); + processors.get(2).checkAndClearProcessResult(); + + inputTopic2.pipeInput("A", "a", 0L); + inputTopic2.pipeInput("B", "b", 1L); + inputTopic2.pipeInput("C", "c", 2L); + inputTopic2.pipeInput("D", "d", 20L); + inputTopic2.pipeInput("A", "a", 20L); + + processors.get(0).checkAndClearProcessResult(); + processors.get(1).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+a", 0), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+b", 1), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+c", 2), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(15, 25)), "0+d", 20), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(20, 30)), "0+d", 20), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(15, 25)), "0+a", 20), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(20, 30)), "0+a", 20) + ); + processors.get(2).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), + "0+1+1+1%0+a", 9), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), + "0+2+2+2%0+b", 8), + new KeyValueTimestamp<>(new Windowed<>("C", new TimeWindow(0, 10)), "0+3+3%0+c", + 9)); + + inputTopic2.pipeInput("A", "a", 5L); + inputTopic2.pipeInput("B", "b", 6L); + inputTopic2.pipeInput("D", "d", 7L); + inputTopic2.pipeInput("D", "d", 18L); + inputTopic2.pipeInput("A", "a", 21L); + + processors.get(0).checkAndClearProcessResult(); + processors.get(1).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), "0+a+a", 5), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+a", 5), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), "0+b+b", 6), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+b", 6), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+d", 7), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+d", 7), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(10, 20)), "0+d", 18), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(15, 25)), "0+d+d", + 20), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(15, 25)), "0+a+a", + 21), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(20, 30)), "0+a+a", + 21) + ); + processors.get(2).checkAndClearProcessResult( + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(0, 10)), + "0+1+1+1%0+a+a", 9), + new KeyValueTimestamp<>(new Windowed<>("A", new TimeWindow(5, 15)), "0+1+1%0+a", + 9), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(0, 10)), + "0+2+2+2%0+b+b", 8), + new KeyValueTimestamp<>(new Windowed<>("B", new TimeWindow(5, 15)), "0+2+2%0+b", + 8), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(0, 10)), "0+4+4%0+d", + 7), + new KeyValueTimestamp<>(new Windowed<>("D", new TimeWindow(5, 15)), "0+4%0+d", + 7) + ); + } + + @SuppressWarnings("unchecked") @Test public void shouldLogAndMeterWhenSkippingNullKey() { final StreamsBuilder builder = new StreamsBuilder(); final String topic = "topic"; - builder + // TODO: remove this cast after we add emitStrategy to public api + final TimeWindowedKStreamImpl windowedStream = (TimeWindowedKStreamImpl) builder .stream(topic, Consumed.with(Serdes.String(), Serdes.String())) .groupByKey(Grouped.with(Serdes.String(), Serdes.String())) - .windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(10)).advanceBy(ofMillis(5))) + .windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(10)).advanceBy(ofMillis(5))); + + windowedStream + .emitStrategy(emitStrategy) .aggregate( MockInitializer.STRING_INIT, MockAggregator.toStringInstance("+"), - Materialized.>as("topic1-Canonicalized").withValueSerde(Serdes.String()) + setMaterializedCache(Materialized.>as("topic1-Canonicalized").withValueSerde(Serdes.String())) ); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamWindowAggregate.class); @@ -289,29 +486,34 @@ public void shouldLogAndMeterWhenSkippingNullKey() { } } + @SuppressWarnings("unchecked") @Test public void shouldLogAndMeterWhenSkippingExpiredWindow() { final StreamsBuilder builder = new StreamsBuilder(); final String topic = "topic"; - final KStream stream1 = builder.stream(topic, Consumed.with(Serdes.String(), Serdes.String())); - stream1.groupByKey(Grouped.with(Serdes.String(), Serdes.String())) - .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(90)).advanceBy(ofMillis(5))) - .aggregate( - () -> "", - MockAggregator.toStringInstance("+"), - Materialized.>as("topic1-Canonicalized") - .withValueSerde(Serdes.String()) - .withCachingDisabled() - .withLoggingDisabled() - .withRetention(Duration.ofMillis(100)) - ) - .toStream() - .map((key, value) -> new KeyValue<>(key.toString(), value)) - .to("output"); + // TODO: remove this cast after we add emitStrategy to public api + final TimeWindowedKStreamImpl windowedStream = (TimeWindowedKStreamImpl) builder + .stream(topic, Consumed.with(Serdes.String(), Serdes.String())) + .groupByKey(Grouped.with(Serdes.String(), Serdes.String())) + .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(90)).advanceBy(ofMillis(5))); + + windowedStream + .emitStrategy(emitStrategy) + .aggregate( + () -> "", + MockAggregator.toStringInstance("+"), + setMaterializedCache(Materialized.>as("topic1-Canonicalized") + .withValueSerde(Serdes.String()) + .withLoggingDisabled() + .withRetention(Duration.ofMillis(100))) + ) + .toStream() + .map((key, value) -> new KeyValue<>(key.toString(), value)) + .to("output"); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamWindowAggregate.class); - final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { + final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic inputTopic = driver.createInputTopic(topic, new StringSerializer(), new StringSerializer()); @@ -323,12 +525,14 @@ public void shouldLogAndMeterWhenSkippingExpiredWindow() { inputTopic.pipeInput("k", "4", 4L); inputTopic.pipeInput("k", "5", 5L); inputTopic.pipeInput("k", "6", 6L); + inputTopic.pipeInput("k", "105", 105L); + inputTopic.pipeInput("k", "106", 106L); assertLatenessMetrics( driver, is(7.0), // how many events get dropped is(100.0), // k:0 is 100ms late, since its time is 0, but it arrives at stream time 100. - is(84.875) // (0 + 100 + 99 + 98 + 97 + 96 + 95 + 94) / 8 + is(67.9) // (0 + 100 + 99 + 98 + 97 + 96 + 95 + 94 + 0) / 10 ); assertThat(appender.getMessages(), hasItems( @@ -344,30 +548,55 @@ public void shouldLogAndMeterWhenSkippingExpiredWindow() { final TestOutputTopic outputTopic = driver.createOutputTopic("output", new StringDeserializer(), new StringDeserializer()); - assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("[k@95/105]", "+100", null, 100L))); - assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("[k@100/110]", "+100", null, 100L))); - assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("[k@5/15]", "+5", null, 5L))); - assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("[k@5/15]", "+5+6", null, 6L))); + if (emitFinal) { + // Window close time is 15 when timestamp is 105 + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@5/15]", "+5+6", null, 6L))); + assertEmittedMetrics(driver, is(1.0)); + } else { + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@95/105]", "+100", null, 100L))); + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@100/110]", "+100", null, 100L))); + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@5/15]", "+5", null, 5L))); + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@5/15]", "+5+6", null, 6L))); + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@100/110]", "+100+105", null, 105L))); + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@105/115]", "+105", null, 105L))); + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@100/110]", "+100+105+106", null, 106L))); + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@105/115]", "+105+106", null, 106L))); + } assertTrue(outputTopic.isEmpty()); } } + @SuppressWarnings("unchecked") @Test public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() { final StreamsBuilder builder = new StreamsBuilder(); final String topic = "topic"; - final KStream stream1 = builder.stream(topic, Consumed.with(Serdes.String(), Serdes.String())); - stream1.groupByKey(Grouped.with(Serdes.String(), Serdes.String())) - .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(90L)).advanceBy(ofMillis(10))) - .aggregate( - () -> "", - MockAggregator.toStringInstance("+"), - Materialized.>as("topic1-Canonicalized").withValueSerde(Serdes.String()).withCachingDisabled().withLoggingDisabled() - ) - .toStream() - .map((key, value) -> new KeyValue<>(key.toString(), value)) - .to("output"); + // TODO: remove this cast after we add emitStrategy to public api + final TimeWindowedKStreamImpl windowedStream = (TimeWindowedKStreamImpl) builder + .stream(topic, Consumed.with(Serdes.String(), Serdes.String())) + .groupByKey(Grouped.with(Serdes.String(), Serdes.String())) + .windowedBy(TimeWindows.ofSizeAndGrace(ofMillis(10), ofMillis(90)).advanceBy(ofMillis(10))); + + windowedStream + .emitStrategy(emitStrategy) + .aggregate( + () -> "", + MockAggregator.toStringInstance("+"), + setMaterializedCache(Materialized.>as("topic1-Canonicalized").withValueSerde(Serdes.String()).withLoggingDisabled()) + ) + .toStream() + .map((key, value) -> new KeyValue<>(key.toString(), value)) + .to("output"); try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(KStreamWindowAggregate.class); final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { @@ -395,13 +624,417 @@ public void shouldLogAndMeterWhenSkippingExpiredWindowByGrace() { "Skipping record for expired window. topic=[topic] partition=[0] offset=[7] timestamp=[6] window=[0,10) expiration=[110] streamTime=[200]" )); - final TestOutputTopic outputTopic = - driver.createOutputTopic("output", new StringDeserializer(), new StringDeserializer()); - assertThat(outputTopic.readRecord(), equalTo(new TestRecord<>("[k@200/210]", "+100", null, 200L))); - assertTrue(outputTopic.isEmpty()); + if (!emitFinal) { + final TestOutputTopic outputTopic = + driver.createOutputTopic("output", new StringDeserializer(), + new StringDeserializer()); + assertThat(outputTopic.readRecord(), + equalTo(new TestRecord<>("[k@200/210]", "+100", null, 200L))); + assertTrue(outputTopic.isEmpty()); + } } } + @Test + public void shouldNotEmitFinalIfNotProgressEnough() throws IOException { + final File stateDir = TestUtils.tempDirectory(); + final long windowSize = 10L; + final Windows windows = TimeWindows.ofSizeAndGrace(ofMillis(windowSize), ofMillis(5)).advanceBy(ofMillis(5)); + + try { + // Always process + props.put(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 0); + final MockInternalNewProcessorContext, Change> context = makeContext(stateDir, windowSize); + final KStreamWindowAggregate processorSupplier = new KStreamWindowAggregate<>( + windows, + WINDOW_STORE_NAME, + emitStrategy, + MockInitializer.STRING_INIT, + MockAggregator.TOSTRING_ADDER + ); + final Processor, Change> processor = processorSupplier.get(); + processor.init(context); + + context.setSystemTimeMs(0L); + processor.process(new Record<>("A", "1", 0)); + processor.process(new Record<>("B", "2", 5)); + processor.process(new Record<>("C", "3", 15)); + + List, ? extends Change>> forwarded = context.forwarded(); + List, ? extends Change>> expected; + if (emitFinal) { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1", null), 0)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(0, 10)), + new Change<>("0+2", null), 5)) + ); + } else { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1", null), 0)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(0, 10)), + new Change<>("0+2", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(5, 15)), + new Change<>("0+2", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("C", new TimeWindow(10, 20)), + new Change<>("0+3", null), 15)), + new CapturedForward<>(new Record<>(new Windowed<>("C", new TimeWindow(15, 25)), + new Change<>("0+3", null), 15)) + ); + } + assertThat(forwarded, is(expected)); + context.resetForwards(); + + processor.process(new Record<>("D", "4", 15)); + forwarded = context.forwarded(); + if (emitFinal) { + // None emitted because observedTime doesn't progress + assertTrue(forwarded.isEmpty()); + } else { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("D", new TimeWindow(10, 20)), + new Change<>("0+4", null), 15)), + new CapturedForward<>(new Record<>(new Windowed<>("D", new TimeWindow(15, 25)), + new Change<>("0+4", null), 15)) + ); + assertThat(forwarded, is(expected)); + } + context.resetForwards(); + + processor.process(new Record<>("E", "5", 19)); + forwarded = context.forwarded(); + if (emitFinal) { + // None emitted because observedTime doesn't progress enough to cover new windows + assertTrue(forwarded.isEmpty()); + } else { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("E", new TimeWindow(10, 20)), + new Change<>("0+5", null), 19)), + new CapturedForward<>(new Record<>(new Windowed<>("E", new TimeWindow(15, 25)), + new Change<>("0+5", null), 19)) + ); + assertThat(forwarded, is(expected)); + } + + context.getStateStore(WINDOW_STORE_NAME).close(); + } finally { + Utils.delete(stateDir); + } + } + + @Test + public void shouldEmitWithInterval0() throws IOException { + final File stateDir = TestUtils.tempDirectory(); + final long windowSize = 10L; + final Windows windows = TimeWindows.ofSizeAndGrace(ofMillis(windowSize), ofMillis(5)).advanceBy(ofMillis(5)); + + try { + // Always process + props.put(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 0); + final MockInternalNewProcessorContext, Change> context = makeContext(stateDir, windowSize); + final KStreamWindowAggregate processorSupplier = new KStreamWindowAggregate<>( + windows, + WINDOW_STORE_NAME, + emitStrategy, + MockInitializer.STRING_INIT, + MockAggregator.TOSTRING_ADDER + ); + final Processor, Change> processor = processorSupplier.get(); + processor.init(context); + + context.setSystemTimeMs(0L); + processor.process(new Record<>("A", "1", 0)); + processor.process(new Record<>("A", "1", 5)); + processor.process(new Record<>("B", "2", 10)); + processor.process(new Record<>("C", "3", 15)); + processor.process(new Record<>("D", "4", 20)); + + final List, ? extends Change>> forwarded = context.forwarded(); + final List, ? extends Change>> expected; + if (emitFinal) { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1+1", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(5, 15)), + new Change<>("0+1", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(5, 15)), + new Change<>("0+2", null), 10)) + ); + } else { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1", null), 0)), + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1+1", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(5, 15)), + new Change<>("0+1", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(5, 15)), + new Change<>("0+2", null), 10)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(10, 20)), + new Change<>("0+2", null), 10)), + new CapturedForward<>(new Record<>(new Windowed<>("C", new TimeWindow(10, 20)), + new Change<>("0+3", null), 15)), + new CapturedForward<>(new Record<>(new Windowed<>("C", new TimeWindow(15, 25)), + new Change<>("0+3", null), 15)), + new CapturedForward<>(new Record<>(new Windowed<>("D", new TimeWindow(15, 25)), + new Change<>("0+4", null), 20)), + new CapturedForward<>(new Record<>(new Windowed<>("D", new TimeWindow(20, 30)), + new Change<>("0+4", null), 20)) + ); + } + assertThat(forwarded, is(expected)); + context.getStateStore(WINDOW_STORE_NAME).close(); + } finally { + Utils.delete(stateDir); + } + } + + @Test + public void shouldEmitWithLargeInterval() throws IOException { + final File stateDir = TestUtils.tempDirectory(); + final long windowSize = 10L; + final Windows windows = TimeWindows.ofSizeAndGrace(ofMillis(windowSize), ofMillis(5)).advanceBy(ofMillis(5)); + + try { + // Emit final every second + props.put(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 1000L); + final MockInternalNewProcessorContext, Change> context = makeContext(stateDir, windowSize); + final KStreamWindowAggregate processorSupplier = new KStreamWindowAggregate<>( + windows, + WINDOW_STORE_NAME, + emitStrategy, + MockInitializer.STRING_INIT, + MockAggregator.TOSTRING_ADDER + ); + final Processor, Change> processor = processorSupplier.get(); + processor.init(context); + + context.setSystemTimeMs(0L); + processor.process(new Record<>("A", "1", 0)); + processor.process(new Record<>("A", "1", 5)); + processor.process(new Record<>("B", "2", 10)); + processor.process(new Record<>("C", "3", 15)); + + List, ? extends Change>> forwarded = context.forwarded(); + if (emitFinal) { + assertTrue(forwarded.isEmpty()); + } else { + final List, ? extends Change>> expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1", null), 0)), + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1+1", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(5, 15)), + new Change<>("0+1", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(5, 15)), + new Change<>("0+2", null), 10)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(10, 20)), + new Change<>("0+2", null), 10)), + new CapturedForward<>(new Record<>(new Windowed<>("C", new TimeWindow(10, 20)), + new Change<>("0+3", null), 15)), + new CapturedForward<>(new Record<>(new Windowed<>("C", new TimeWindow(15, 25)), + new Change<>("0+3", null), 15)) + ); + assertThat(forwarded, is(expected)); + } + context.resetForwards(); + + // Progress + context.setSystemTimeMs(10000L); + processor.process(new Record<>("D", "4", 20)); + + forwarded = context.forwarded(); + List, ? extends Change>> expected; + if (emitFinal) { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1+1", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(5, 15)), + new Change<>("0+1", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(5, 15)), + new Change<>("0+2", null), 10)) + ); + } else { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("D", new TimeWindow(15, 25)), + new Change<>("0+4", null), 20)), + new CapturedForward<>(new Record<>(new Windowed<>("D", new TimeWindow(20, 30)), + new Change<>("0+4", null), 20)) + ); + } + assertThat(forwarded, is(expected)); + context.resetForwards(); + + // Progress + context.setSystemTimeMs(10100L); + processor.process(new Record<>("E", "5", 40)); + + forwarded = context.forwarded(); + if (emitFinal) { + assertTrue(forwarded.isEmpty()); + } else { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("E", new TimeWindow(35, 45)), + new Change<>("0+5", null), 40)), + new CapturedForward<>(new Record<>(new Windowed<>("E", new TimeWindow(40, 50)), + new Change<>("0+5", null), 40)) + ); + assertThat(forwarded, is(expected)); + } + + context.getStateStore(WINDOW_STORE_NAME).close(); + } finally { + Utils.delete(stateDir); + } + } + + @Test + public void shouldEmitFromLastEmitTime() throws IOException { + final File stateDir = TestUtils.tempDirectory(); + final long windowSize = 10L; + final Windows windows = TimeWindows.ofSizeAndGrace(ofMillis(windowSize), ofMillis(5)).advanceBy(ofMillis(5)); + + try { + // Always process + props.put(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, 0); + final MockInternalNewProcessorContext, Change> context = makeContext(stateDir, windowSize); + final KStreamWindowAggregate processorSupplier = new KStreamWindowAggregate<>( + windows, + WINDOW_STORE_NAME, + emitStrategy, + MockInitializer.STRING_INIT, + MockAggregator.TOSTRING_ADDER + ); + final Processor, Change> processor = processorSupplier.get(); + processor.init(context); + + context.setSystemTimeMs(0L); + processor.process(new Record<>("A", "1", 0)); + processor.process(new Record<>("B", "2", 5)); + processor.process(new Record<>("C", "3", 15)); + + List, ? extends Change>> forwarded = context.forwarded(); + List, ? extends Change>> expected; + if (emitFinal) { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1", null), 0)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(0, 10)), + new Change<>("0+2", null), 5)) + ); + } else { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("A", new TimeWindow(0, 10)), + new Change<>("0+1", null), 0)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(0, 10)), + new Change<>("0+2", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(5, 15)), + new Change<>("0+2", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("C", new TimeWindow(10, 20)), + new Change<>("0+3", null), 15)), + new CapturedForward<>(new Record<>(new Windowed<>("C", new TimeWindow(15, 25)), + new Change<>("0+3", null), 15)) + ); + } + assertThat(forwarded, is(expected)); + context.resetForwards(); + + final Processor, Change> newProcessor = processorSupplier.get(); + newProcessor.init(context); + newProcessor.process(new Record<>("D", "4", 25)); + forwarded = context.forwarded(); + if (emitFinal) { + // Don't output old windows for new processor + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("B", new TimeWindow(5, 15)), + new Change<>("0+2", null), 5)), + new CapturedForward<>(new Record<>(new Windowed<>("C", new TimeWindow(10, 20)), + new Change<>("0+3", null), 15)) + ); + } else { + expected = asList( + new CapturedForward<>(new Record<>(new Windowed<>("D", new TimeWindow(20, 30)), + new Change<>("0+4", null), 25)), + new CapturedForward<>(new Record<>(new Windowed<>("D", new TimeWindow(25, 35)), + new Change<>("0+4", null), 25)) + ); + } + assertThat(forwarded, is(expected)); + context.resetForwards(); + + context.getStateStore(WINDOW_STORE_NAME).close(); + } finally { + Utils.delete(stateDir); + } + } + + @Test + public void showThrowIfEmitFinalUsedWithUnlimitedWindow() { + if (emitFinal) { + final IllegalArgumentException e = assertThrows( + IllegalArgumentException.class, () -> new KStreamWindowAggregate<>( + UnlimitedWindows.of(), + WINDOW_STORE_NAME, + emitStrategy, + MockInitializer.STRING_INIT, + MockAggregator.TOSTRING_ADDER) + ); + assertThat(e.getMessage(), is("ON_WINDOW_CLOSE strategy is only supported for " + + "TimeWindows and SlidingWindows for TimeWindowedKStream")); + } else { + new KStreamWindowAggregate<>( + UnlimitedWindows.of(), + WINDOW_STORE_NAME, + emitStrategy, + MockInitializer.STRING_INIT, + MockAggregator.TOSTRING_ADDER + ); + } + } + + private TimestampedWindowStore getWindowStore(final long windowSize) { + final WindowBytesStoreSupplier supplier; + if (emitFinal) { + supplier = RocksDbIndexedTimeOrderedWindowBytesStoreSupplier.create( + WINDOW_STORE_NAME, + Duration.ofDays(1), + Duration.ofMillis(windowSize), + false, + false + ); + } else { + supplier = Stores.persistentTimestampedWindowStore( + WINDOW_STORE_NAME, + Duration.ofDays(1), + Duration.ofMillis(windowSize), + false + ); + } + + return Stores.timestampedWindowStoreBuilder(supplier, Serdes.String(), Serdes.String()) + .withLoggingDisabled() // Changelog is not supported by MockProcessorContext. + .withCachingDisabled() // Caching is not supported by MockProcessorContext. + .build(); + } + + private MockInternalNewProcessorContext, Change> makeContext(final File stateDir, final long windowSize) { + final MockInternalNewProcessorContext, Change> context = new MockInternalNewProcessorContext<>( + props, + new TaskId(0, 0), + stateDir + ); + + context.setCurrentNode(new ProcessorNode("testNode")); + + // Create, initialize, and register the state store. + final TimestampedWindowStore store = getWindowStore(windowSize); + store.init(context.getStateStoreContext(), store); + context.getStateStoreContext().register(store, null); + + return context; + } + private void assertLatenessMetrics(final TopologyTestDriver driver, final Matcher dropTotal, final Matcher maxLateness, @@ -456,4 +1089,40 @@ private void assertLatenessMetrics(final TopologyTestDriver driver, assertThat(driver.metrics().get(latenessAvgMetric).metricValue(), avgLateness); } + private void assertEmittedMetrics(final TopologyTestDriver driver, + final Matcher emittedTotal) { + + final MetricName emittedTotalMetric; + final MetricName emittedRateMetric; + emittedTotalMetric = new MetricName( + "window-aggregate-final-emit-total", + "stream-processor-node-metrics", + "The total number of emit final records", + mkMap( + mkEntry("thread-id", threadId), + mkEntry("task-id", "0_0"), + mkEntry("processor-node-id", "KSTREAM-AGGREGATE-0000000001") + ) + ); + emittedRateMetric = new MetricName( + "window-aggregate-final-emit-rate", + "stream-processor-node-metrics", + "The average number of emit final records per second", + mkMap( + mkEntry("thread-id", threadId), + mkEntry("task-id", "0_0"), + mkEntry("processor-node-id", "KSTREAM-AGGREGATE-0000000001") + ) + ); + + assertThat(driver.metrics().get(emittedTotalMetric).metricValue(), emittedTotal); + assertThat(driver.metrics().get(emittedRateMetric).metricValue(), not(0.0)); + } + + private Materialized setMaterializedCache(final Materialized materialized) { + if (withCache) { + return materialized.withCachingEnabled(); + } + return materialized.withCachingDisabled(); + } } \ No newline at end of file diff --git a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java index f5f3ff88e..74fe24806 100644 --- a/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java +++ b/streams/src/test/java/org/apache/kafka/streams/kstream/internals/TimeWindowedKStreamImplTest.java @@ -17,20 +17,26 @@ package org.apache.kafka.streams.kstream.internals; +import java.util.ArrayList; +import java.util.Collection; import org.apache.kafka.common.serialization.Serdes; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.Bytes; import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.KeyValueTimestamp; import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig.InternalConfig; import org.apache.kafka.streams.TopologyTestDriver; import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.EmitStrategy; +import org.apache.kafka.streams.kstream.EmitStrategy.StrategyType; import org.apache.kafka.streams.kstream.Grouped; import org.apache.kafka.streams.kstream.KStream; import org.apache.kafka.streams.kstream.Materialized; import org.apache.kafka.streams.kstream.Named; -import org.apache.kafka.streams.kstream.TimeWindowedKStream; import org.apache.kafka.streams.kstream.TimeWindows; import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.processor.StateStore; import org.apache.kafka.streams.state.ValueAndTimestamp; import org.apache.kafka.streams.state.WindowStore; import org.apache.kafka.streams.TestInputTopic; @@ -42,34 +48,71 @@ import org.junit.Before; import org.junit.Test; -import java.util.Arrays; import java.util.List; import java.util.Properties; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; import static java.time.Duration.ofMillis; import static java.time.Instant.ofEpochMilli; +import static java.util.Arrays.asList; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; +@RunWith(Parameterized.class) public class TimeWindowedKStreamImplTest { private static final String TOPIC = "input"; + private static final Windowed KEY_1_WINDOW_0 = new Windowed<>("1", new TimeWindow(0L, 500L)); + private static final Windowed KEY_1_WINDOW_1 = new Windowed<>("1", new TimeWindow(500L, 1000L)); + private static final Windowed KEY_2_WINDOW_1 = new Windowed<>("2", new TimeWindow(500L, 1000L)); + private static final Windowed KEY_2_WINDOW_2 = new Windowed<>("2", new TimeWindow(1000L, 1500L)); + private final StreamsBuilder builder = new StreamsBuilder(); private final Properties props = StreamsTestUtils.getStreamsConfig(Serdes.String(), Serdes.String()); - private TimeWindowedKStream windowedStream; + private TimeWindowedKStreamImpl windowedStream; + + @Parameter + public StrategyType type; + + @Parameter(1) + public boolean withCache; + + @Parameter(2) + public EmitStrategy emitStrategy; + + private boolean emitFinal; + @Parameterized.Parameters(name = "{0}_{1}") + public static Collection getKeySchema() { + return asList(new Object[][] { + {StrategyType.ON_WINDOW_UPDATE, true, EmitStrategy.onWindowUpdate()}, + {StrategyType.ON_WINDOW_UPDATE, false, EmitStrategy.onWindowUpdate()}, + {StrategyType.ON_WINDOW_CLOSE, true, EmitStrategy.onWindowClose()}, + {StrategyType.ON_WINDOW_CLOSE, false, EmitStrategy.onWindowClose()} + }); + } + + @SuppressWarnings("unchecked") @Before public void before() { + emitFinal = type.equals(StrategyType.ON_WINDOW_CLOSE); + // Set interval to 0 so that it always tries to emit + props.setProperty(InternalConfig.EMIT_INTERVAL_MS_KSTREAMS_WINDOWED_AGGREGATION, "0"); final KStream stream = builder.stream(TOPIC, Consumed.with(Serdes.String(), Serdes.String())); - windowedStream = stream. + // TODO: remove this cast https://issues.apache.org/jira/browse/KAFKA-13800 + windowedStream = (TimeWindowedKStreamImpl) (stream. groupByKey(Grouped.with(Serdes.String(), Serdes.String())) - .windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(500L))); + .windowedBy(TimeWindows.ofSizeWithNoGrace(ofMillis(500L)))); } @Test public void shouldCountWindowed() { final MockApiProcessorSupplier, Long, Void, Void> supplier = new MockApiProcessorSupplier<>(); windowedStream + .emitStrategy(emitStrategy) .count() .toStream() .process(supplier); @@ -77,24 +120,37 @@ public void shouldCountWindowed() { try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); } - assertThat( - supplier.theCapturedProcessor().lastValueAndTimestampPerKey() - .get(new Windowed<>("1", new TimeWindow(0L, 500L))), - equalTo(ValueAndTimestamp.make(2L, 15L))); - assertThat( - supplier.theCapturedProcessor().lastValueAndTimestampPerKey() - .get(new Windowed<>("2", new TimeWindow(500L, 1000L))), - equalTo(ValueAndTimestamp.make(2L, 550L))); - assertThat( - supplier.theCapturedProcessor().lastValueAndTimestampPerKey() - .get(new Windowed<>("1", new TimeWindow(500L, 1000L))), - equalTo(ValueAndTimestamp.make(1L, 500L))); + final ArrayList, Long>> processed = supplier.theCapturedProcessor().processed(); + + if (emitFinal) { + assertEquals( + asList( + new KeyValueTimestamp<>(KEY_1_WINDOW_0, 2L, 15L), + new KeyValueTimestamp<>(KEY_1_WINDOW_1, 1L, 500L), + new KeyValueTimestamp<>(KEY_2_WINDOW_1, 2L, 550L) + ), + processed + ); + } else { + assertEquals( + asList( + new KeyValueTimestamp<>(KEY_1_WINDOW_0, 1L, 10L), + new KeyValueTimestamp<>(KEY_1_WINDOW_0, 2L, 15L), + new KeyValueTimestamp<>(KEY_1_WINDOW_1, 1L, 500L), + new KeyValueTimestamp<>(KEY_2_WINDOW_1, 1L, 550L), + new KeyValueTimestamp<>(KEY_2_WINDOW_1, 2L, 550L), + new KeyValueTimestamp<>(KEY_2_WINDOW_2, 1L, 1000L) + ), + processed + ); + } } @Test public void shouldReduceWindowed() { final MockApiProcessorSupplier, String, Void, Void> supplier = new MockApiProcessorSupplier<>(); windowedStream + .emitStrategy(emitStrategy) .reduce(MockReducer.STRING_ADDER) .toStream() .process(supplier); @@ -102,54 +158,81 @@ public void shouldReduceWindowed() { try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); } - assertThat( - supplier.theCapturedProcessor().lastValueAndTimestampPerKey() - .get(new Windowed<>("1", new TimeWindow(0L, 500L))), - equalTo(ValueAndTimestamp.make("1+2", 15L))); - assertThat( - supplier.theCapturedProcessor().lastValueAndTimestampPerKey() - .get(new Windowed<>("2", new TimeWindow(500L, 1000L))), - equalTo(ValueAndTimestamp.make("10+20", 550L))); - assertThat( - supplier.theCapturedProcessor().lastValueAndTimestampPerKey() - .get(new Windowed<>("1", new TimeWindow(500L, 1000L))), - equalTo(ValueAndTimestamp.make("3", 500L))); + + final ArrayList, String>> processed = supplier.theCapturedProcessor().processed(); + if (emitFinal) { + assertEquals( + asList( + new KeyValueTimestamp<>(KEY_1_WINDOW_0, "1+2", 15L), + new KeyValueTimestamp<>(KEY_1_WINDOW_1, "3", 500L), + new KeyValueTimestamp<>(KEY_2_WINDOW_1, "10+20", 550L) + ), + processed + ); + } else { + assertEquals( + asList( + new KeyValueTimestamp<>(KEY_1_WINDOW_0, "1", 10L), + new KeyValueTimestamp<>(KEY_1_WINDOW_0, "1+2", 15L), + new KeyValueTimestamp<>(KEY_1_WINDOW_1, "3", 500L), + new KeyValueTimestamp<>(KEY_2_WINDOW_1, "10", 550L), + new KeyValueTimestamp<>(KEY_2_WINDOW_1, "10+20", 550L), + new KeyValueTimestamp<>(KEY_2_WINDOW_2, "30", 1000L) + ), + processed + ); + } } @Test public void shouldAggregateWindowed() { final MockApiProcessorSupplier, String, Void, Void> supplier = new MockApiProcessorSupplier<>(); windowedStream + .emitStrategy(emitStrategy) .aggregate( MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, - Materialized.with(Serdes.String(), Serdes.String())) + setMaterializedCache(Materialized.with(Serdes.String(), Serdes.String()))) .toStream() .process(supplier); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); } - assertThat( - supplier.theCapturedProcessor().lastValueAndTimestampPerKey() - .get(new Windowed<>("1", new TimeWindow(0L, 500L))), - equalTo(ValueAndTimestamp.make("0+1+2", 15L))); - assertThat( - supplier.theCapturedProcessor().lastValueAndTimestampPerKey() - .get(new Windowed<>("2", new TimeWindow(500L, 1000L))), - equalTo(ValueAndTimestamp.make("0+10+20", 550L))); - assertThat( - supplier.theCapturedProcessor().lastValueAndTimestampPerKey() - .get(new Windowed<>("1", new TimeWindow(500L, 1000L))), - equalTo(ValueAndTimestamp.make("0+3", 500L))); + + final ArrayList, String>> processed = supplier.theCapturedProcessor().processed(); + if (emitFinal) { + assertEquals( + asList( + new KeyValueTimestamp<>(KEY_1_WINDOW_0, "0+1+2", 15L), + new KeyValueTimestamp<>(KEY_1_WINDOW_1, "0+3", 500L), + new KeyValueTimestamp<>(KEY_2_WINDOW_1, "0+10+20", 550L) + ), + processed + ); + } else { + assertEquals( + asList( + new KeyValueTimestamp<>(KEY_1_WINDOW_0, "0+1", 10L), + new KeyValueTimestamp<>(KEY_1_WINDOW_0, "0+1+2", 15L), + new KeyValueTimestamp<>(KEY_1_WINDOW_1, "0+3", 500L), + new KeyValueTimestamp<>(KEY_2_WINDOW_1, "0+10", 550L), + new KeyValueTimestamp<>(KEY_2_WINDOW_1, "0+10+20", 550L), + new KeyValueTimestamp<>(KEY_2_WINDOW_2, "0+30", 1000L) + ), + processed + ); + } } @Test public void shouldMaterializeCount() { - windowedStream.count( - Materialized.>as("count-store") - .withKeySerde(Serdes.String()) - .withValueSerde(Serdes.Long())); + windowedStream + .emitStrategy(emitStrategy) + .count( + setMaterializedCache(Materialized.>as("count-store") + .withKeySerde(Serdes.String()) + .withValueSerde(Serdes.Long()))); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); @@ -158,10 +241,11 @@ public void shouldMaterializeCount() { final List, Long>> data = StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); - assertThat(data, equalTo(Arrays.asList( + assertThat(data, equalTo(asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 500)), 2L), KeyValue.pair(new Windowed<>("1", new TimeWindow(500, 1000)), 1L), - KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), 2L)))); + KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), 2L), + KeyValue.pair(new Windowed<>("2", new TimeWindow(1000, 1500)), 1L)))); } { final WindowStore> windowStore = @@ -169,10 +253,11 @@ public void shouldMaterializeCount() { final List, ValueAndTimestamp>> data = StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); - assertThat(data, equalTo(Arrays.asList( + assertThat(data, equalTo(asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 500)), ValueAndTimestamp.make(2L, 15L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(500, 1000)), ValueAndTimestamp.make(1L, 500L)), - KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), ValueAndTimestamp.make(2L, 550L))))); + KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), ValueAndTimestamp.make(2L, 550L)), + KeyValue.pair(new Windowed<>("2", new TimeWindow(1000, 1500)), ValueAndTimestamp.make(1L, 1000L))))); } } } @@ -181,9 +266,9 @@ public void shouldMaterializeCount() { public void shouldMaterializeReduced() { windowedStream.reduce( MockReducer.STRING_ADDER, - Materialized.>as("reduced") + setMaterializedCache(Materialized.>as("reduced") .withKeySerde(Serdes.String()) - .withValueSerde(Serdes.String())); + .withValueSerde(Serdes.String()))); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); @@ -192,20 +277,22 @@ public void shouldMaterializeReduced() { final List, String>> data = StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); - assertThat(data, equalTo(Arrays.asList( + assertThat(data, equalTo(asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 500)), "1+2"), KeyValue.pair(new Windowed<>("1", new TimeWindow(500, 1000)), "3"), - KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), "10+20")))); + KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), "10+20"), + KeyValue.pair(new Windowed<>("2", new TimeWindow(1000, 1500)), "30")))); } { final WindowStore> windowStore = driver.getTimestampedWindowStore("reduced"); final List, ValueAndTimestamp>> data = StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); - assertThat(data, equalTo(Arrays.asList( + assertThat(data, equalTo(asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 500)), ValueAndTimestamp.make("1+2", 15L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(500, 1000)), ValueAndTimestamp.make("3", 500L)), - KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), ValueAndTimestamp.make("10+20", 550L))))); + KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), ValueAndTimestamp.make("10+20", 550L)), + KeyValue.pair(new Windowed<>("2", new TimeWindow(1000, 1500)), ValueAndTimestamp.make("30", 1000L))))); } } } @@ -215,9 +302,9 @@ public void shouldMaterializeAggregated() { windowedStream.aggregate( MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER, - Materialized.>as("aggregated") + setMaterializedCache(Materialized.>as("aggregated") .withKeySerde(Serdes.String()) - .withValueSerde(Serdes.String())); + .withValueSerde(Serdes.String()))); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); @@ -226,20 +313,22 @@ public void shouldMaterializeAggregated() { final List, String>> data = StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); - assertThat(data, equalTo(Arrays.asList( + assertThat(data, equalTo(asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 500)), "0+1+2"), KeyValue.pair(new Windowed<>("1", new TimeWindow(500, 1000)), "0+3"), - KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), "0+10+20")))); + KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), "0+10+20"), + KeyValue.pair(new Windowed<>("2", new TimeWindow(1000, 1500)), "0+30")))); } { final WindowStore> windowStore = driver.getTimestampedWindowStore("aggregated"); final List, ValueAndTimestamp>> data = StreamsTestUtils.toList(windowStore.fetch("1", "2", ofEpochMilli(0), ofEpochMilli(1000L))); - assertThat(data, equalTo(Arrays.asList( + assertThat(data, equalTo(asList( KeyValue.pair(new Windowed<>("1", new TimeWindow(0, 500)), ValueAndTimestamp.make("0+1+2", 15L)), KeyValue.pair(new Windowed<>("1", new TimeWindow(500, 1000)), ValueAndTimestamp.make("0+3", 500L)), - KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), ValueAndTimestamp.make("0+10+20", 550L))))); + KeyValue.pair(new Windowed<>("2", new TimeWindow(500, 1000)), ValueAndTimestamp.make("0+10+20", 550L)), + KeyValue.pair(new Windowed<>("2", new TimeWindow(1000, 1500)), ValueAndTimestamp.make("0+30", 1000L))))); } } } @@ -264,7 +353,7 @@ public void shouldThrowNullPointerOnMaterializedAggregateIfInitializerIsNull() { assertThrows(NullPointerException.class, () -> windowedStream.aggregate( null, MockAggregator.TOSTRING_ADDER, - Materialized.as("store"))); + setMaterializedCache(Materialized.as("store")))); } @Test @@ -272,7 +361,7 @@ public void shouldThrowNullPointerOnMaterializedAggregateIfAggregatorIsNull() { assertThrows(NullPointerException.class, () -> windowedStream.aggregate( MockInitializer.STRING_INIT, null, - Materialized.as("store"))); + setMaterializedCache(Materialized.as("store")))); } @SuppressWarnings("unchecked") @@ -288,7 +377,7 @@ public void shouldThrowNullPointerOnMaterializedAggregateIfMaterializedIsNull() public void shouldThrowNullPointerOnMaterializedReduceIfReducerIsNull() { assertThrows(NullPointerException.class, () -> windowedStream.reduce( null, - Materialized.as("store"))); + setMaterializedCache(Materialized.as("store")))); } @Test @@ -319,6 +408,13 @@ private void processData(final TopologyTestDriver driver) { inputTopic.pipeInput("1", "3", 500L); inputTopic.pipeInput("2", "10", 550L); inputTopic.pipeInput("2", "20", 500L); + inputTopic.pipeInput("2", "30", 1000L); } + private Materialized setMaterializedCache(final Materialized materialized) { + if (withCache) { + return materialized.withCachingEnabled(); + } + return materialized.withCachingDisabled(); + } } diff --git a/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedWindowStoreTest.java b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedWindowStoreTest.java new file mode 100644 index 000000000..b9ef24d3c --- /dev/null +++ b/streams/src/test/java/org/apache/kafka/streams/state/internals/TimeOrderedWindowStoreTest.java @@ -0,0 +1,1245 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.streams.state.internals; + +import java.util.Collection; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.common.header.internals.RecordHeaders; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.serialization.Serdes; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.utils.Bytes; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.streams.KeyValue; +import org.apache.kafka.streams.StreamsBuilder; +import org.apache.kafka.streams.StreamsConfig; +import org.apache.kafka.streams.TestInputTopic; +import org.apache.kafka.streams.TopologyTestDriver; +import org.apache.kafka.streams.errors.InvalidStateStoreException; +import org.apache.kafka.streams.kstream.Consumed; +import org.apache.kafka.streams.kstream.TimeWindowedDeserializer; +import org.apache.kafka.streams.kstream.Windowed; +import org.apache.kafka.streams.kstream.internals.TimeWindow; +import org.apache.kafka.streams.processor.ProcessorContext; +import org.apache.kafka.streams.processor.StateStoreContext; +import org.apache.kafka.streams.processor.api.Processor; +import org.apache.kafka.streams.processor.api.Record; +import org.apache.kafka.streams.processor.internals.MockStreamsMetrics; +import org.apache.kafka.streams.processor.internals.ProcessorRecordContext; +import org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender; +import org.apache.kafka.streams.query.Position; +import org.apache.kafka.streams.state.KeyValueIterator; +import org.apache.kafka.streams.state.StoreBuilder; +import org.apache.kafka.streams.state.Stores; +import org.apache.kafka.streams.state.TimestampedWindowStore; +import org.apache.kafka.streams.state.ValueAndTimestamp; +import org.apache.kafka.streams.state.WindowStore; +import org.apache.kafka.streams.state.WindowStoreIterator; +import org.apache.kafka.streams.state.internals.PrefixedWindowKeySchemas.KeyFirstWindowKeySchema; +import org.apache.kafka.streams.state.internals.PrefixedWindowKeySchemas.TimeFirstWindowKeySchema; +import org.apache.kafka.test.InternalMockProcessorContext; +import org.apache.kafka.test.TestUtils; +import org.easymock.EasyMock; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.nio.charset.StandardCharsets; +import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; +import java.util.UUID; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; + +import static java.time.Duration.ofHours; +import static java.time.Duration.ofMinutes; +import static java.time.Instant.ofEpochMilli; +import static java.util.Arrays.asList; +import static org.apache.kafka.common.utils.Utils.mkEntry; +import static org.apache.kafka.common.utils.Utils.mkMap; +import static org.apache.kafka.streams.state.internals.ThreadCacheTest.memoryCacheEntrySize; +import static org.apache.kafka.test.StreamsTestUtils.toList; +import static org.apache.kafka.test.StreamsTestUtils.verifyAllWindowedKeyValues; +import static org.apache.kafka.test.StreamsTestUtils.verifyKeyValueList; +import static org.apache.kafka.test.StreamsTestUtils.verifyWindowedKeyValue; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.hasItem; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.assertTrue; + +@RunWith(Parameterized.class) +public class TimeOrderedWindowStoreTest { + + private static final int MAX_CACHE_SIZE_BYTES = 300; + private static final long DEFAULT_TIMESTAMP = 10L; + private static final Long WINDOW_SIZE = 10L; + private static final long SEGMENT_INTERVAL = 100L; + private final static String TOPIC = "topic"; + private static final String CACHE_NAMESPACE = "0_0-store-name"; + + private InternalMockProcessorContext context; + private RocksDBTimeOrderedSegmentedBytesStore bytesStore; + private WindowStore underlyingStore; + private TimeOrderedCachingWindowStore cachingStore; + private CacheFlushListenerStub, String> cacheListener; + private ThreadCache cache; + private TimeFirstWindowKeySchema baseKeySchema; + + @Parameter + public boolean hasIndex; + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + return asList(new Object[][] { + {true}, + {false} + }); + } + + @Before + public void setUp() { + baseKeySchema = new TimeFirstWindowKeySchema(); + bytesStore = new RocksDBTimeOrderedSegmentedBytesStore("test", "metrics-scope", 100, SEGMENT_INTERVAL, hasIndex); + underlyingStore = new RocksDBTimeOrderedWindowStore(bytesStore, false, WINDOW_SIZE); + final TimeWindowedDeserializer keyDeserializer = new TimeWindowedDeserializer<>(new StringDeserializer(), WINDOW_SIZE); + keyDeserializer.setIsChangelogTopic(true); + cacheListener = new CacheFlushListenerStub<>(keyDeserializer, new StringDeserializer()); + cachingStore = new TimeOrderedCachingWindowStore(underlyingStore, WINDOW_SIZE, SEGMENT_INTERVAL); + cachingStore.setFlushListener(cacheListener, false); + cache = new ThreadCache(new LogContext("testCache "), MAX_CACHE_SIZE_BYTES, new MockStreamsMetrics(new Metrics())); + context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); + context.setRecordContext(new ProcessorRecordContext(DEFAULT_TIMESTAMP, 0, 0, TOPIC, new RecordHeaders())); + cachingStore.init((StateStoreContext) context, cachingStore); + } + + @After + public void closeStore() { + cachingStore.close(); + } + + @SuppressWarnings("deprecation") + @Test + public void shouldDelegateDeprecatedInit() { + final RocksDBTimeOrderedWindowStore inner = EasyMock.mock(RocksDBTimeOrderedWindowStore.class); + EasyMock.expect(inner.hasIndex()).andReturn(hasIndex); + EasyMock.replay(inner); + final TimeOrderedCachingWindowStore outer = new TimeOrderedCachingWindowStore(inner, WINDOW_SIZE, SEGMENT_INTERVAL); + + EasyMock.reset(inner); + EasyMock.expect(inner.name()).andStubReturn("store"); + inner.init((ProcessorContext) context, outer); + EasyMock.expectLastCall(); + EasyMock.replay(inner); + outer.init((ProcessorContext) context, outer); + EasyMock.verify(inner); + } + + @Test + public void shouldDelegateInit() { + final RocksDBTimeOrderedWindowStore inner = EasyMock.mock(RocksDBTimeOrderedWindowStore.class); + EasyMock.expect(inner.hasIndex()).andReturn(hasIndex); + EasyMock.replay(inner); + final TimeOrderedCachingWindowStore outer = new TimeOrderedCachingWindowStore(inner, WINDOW_SIZE, SEGMENT_INTERVAL); + + EasyMock.reset(inner); + EasyMock.expect(inner.name()).andStubReturn("store"); + inner.init((StateStoreContext) context, outer); + EasyMock.expectLastCall(); + EasyMock.replay(inner); + outer.init((StateStoreContext) context, outer); + EasyMock.verify(inner); + } + + @Test + public void shouldThrowIfWrongStore() { + final RocksDBTimestampedWindowStore innerWrong = EasyMock.mock(RocksDBTimestampedWindowStore.class); + final Exception e = assertThrows(IllegalArgumentException.class, + () -> new TimeOrderedCachingWindowStore(innerWrong, WINDOW_SIZE, SEGMENT_INTERVAL)); + assertThat(e.getMessage(), + containsString("TimeOrderedCachingWindowStore only supports RocksDBTimeOrderedWindowStore backed store")); + + final RocksDBTimeOrderedWindowStore inner = EasyMock.mock(RocksDBTimeOrderedWindowStore.class); + // Nothing happens + new TimeOrderedCachingWindowStore(inner, WINDOW_SIZE, SEGMENT_INTERVAL); + } + + @Test + public void shouldNotReturnDuplicatesInRanges() { + final StreamsBuilder builder = new StreamsBuilder(); + + final StoreBuilder> storeBuilder = Stores.timestampedWindowStoreBuilder( + RocksDbIndexedTimeOrderedWindowBytesStoreSupplier.create( + "store-name", + ofHours(1L), + ofMinutes(1), + false, + hasIndex + ), Serdes.String(), Serdes.String()) + .withCachingEnabled(); + + builder.addStateStore(storeBuilder); + + builder.stream(TOPIC, + Consumed.with(Serdes.String(), Serdes.String())) + .process(() -> new Processor() { + private WindowStore> store; + private int numRecordsProcessed; + private org.apache.kafka.streams.processor.api.ProcessorContext context; + + @Override + public void init(final org.apache.kafka.streams.processor.api.ProcessorContext processorContext) { + this.context = processorContext; + this.store = processorContext.getStateStore("store-name"); + int count = 0; + + try (final KeyValueIterator, ValueAndTimestamp> all = store.all()) { + while (all.hasNext()) { + count++; + all.next(); + } + } + + assertThat(count, equalTo(0)); + } + + @Override + public void process(final Record record) { + int count = 0; + + try (final KeyValueIterator, ValueAndTimestamp> all = store.all()) { + while (all.hasNext()) { + count++; + all.next(); + } + } + + assertThat(count, equalTo(numRecordsProcessed)); + + store.put(record.value(), ValueAndTimestamp.make(record.value(), record.timestamp()), record.timestamp()); + + numRecordsProcessed++; + + context.forward(record); + } + + @Override + public void close() { + } + }, "store-name"); + + final Properties streamsConfiguration = new Properties(); + streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName()); + streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()); + streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 10 * 1000L); + + final Instant initialWallClockTime = Instant.ofEpochMilli(0L); + final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), streamsConfiguration, initialWallClockTime); + + final TestInputTopic inputTopic = driver.createInputTopic(TOPIC, + Serdes.String().serializer(), + Serdes.String().serializer(), + initialWallClockTime, + Duration.ZERO); + + for (int i = 0; i < 5; i++) { + inputTopic.pipeInput(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + driver.advanceWallClockTime(Duration.ofSeconds(10)); + inputTopic.advanceTime(Duration.ofSeconds(10)); + for (int i = 0; i < 5; i++) { + inputTopic.pipeInput(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + driver.advanceWallClockTime(Duration.ofSeconds(10)); + inputTopic.advanceTime(Duration.ofSeconds(10)); + for (int i = 0; i < 5; i++) { + inputTopic.pipeInput(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + driver.advanceWallClockTime(Duration.ofSeconds(10)); + inputTopic.advanceTime(Duration.ofSeconds(10)); + for (int i = 0; i < 5; i++) { + inputTopic.pipeInput(UUID.randomUUID().toString(), UUID.randomUUID().toString()); + } + + driver.close(); + } + + @Test + public void shouldPutFetchFromCache() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + + assertThat(cachingStore.fetch(bytesKey("a"), 10), equalTo(bytesValue("a"))); + assertThat(cachingStore.fetch(bytesKey("b"), 10), equalTo(bytesValue("b"))); + assertThat(cachingStore.fetch(bytesKey("c"), 10), equalTo(null)); + assertThat(cachingStore.fetch(bytesKey("a"), 0), equalTo(null)); + + try (final WindowStoreIterator a = cachingStore.fetch(bytesKey("a"), ofEpochMilli(10), ofEpochMilli(10)); + final WindowStoreIterator b = cachingStore.fetch(bytesKey("b"), ofEpochMilli(10), ofEpochMilli(10))) { + verifyKeyValue(a.next(), DEFAULT_TIMESTAMP, "a"); + verifyKeyValue(b.next(), DEFAULT_TIMESTAMP, "b"); + assertFalse(a.hasNext()); + assertFalse(b.hasNext()); + final int expectedSize = hasIndex ? 4 : 2; + assertEquals(expectedSize, cache.size()); + } + } + + @Test + public void shouldMatchPositionAfterPutWithFlushListener() { + cachingStore.setFlushListener(record -> { }, false); + shouldMatchPositionAfterPut(); + } + + @Test + public void shouldMatchPositionAfterPutWithoutFlushListener() { + cachingStore.setFlushListener(null, false); + shouldMatchPositionAfterPut(); + } + + private void shouldMatchPositionAfterPut() { + context.setRecordContext(new ProcessorRecordContext(0, 1, 0, "", new RecordHeaders())); + cachingStore.put(bytesKey("key1"), bytesValue("value1"), DEFAULT_TIMESTAMP); + context.setRecordContext(new ProcessorRecordContext(0, 2, 0, "", new RecordHeaders())); + cachingStore.put(bytesKey("key2"), bytesValue("value2"), DEFAULT_TIMESTAMP); + + // Position should correspond to the last record's context, not the current context. + context.setRecordContext( + new ProcessorRecordContext(0, 3, 0, "", new RecordHeaders()) + ); + + // the caching window store doesn't maintain a separate + // position because it never serves queries from the cache + assertEquals(Position.emptyPosition(), cachingStore.getPosition()); + assertEquals(Position.emptyPosition(), underlyingStore.getPosition()); + + cachingStore.flush(); + + assertEquals( + Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 2L))))), + cachingStore.getPosition() + ); + assertEquals( + Position.fromMap(mkMap(mkEntry("", mkMap(mkEntry(0, 2L))))), + underlyingStore.getPosition() + ); + } + + private void verifyKeyValue(final KeyValue next, + final long expectedKey, + final String expectedValue) { + assertThat(next.key, equalTo(expectedKey)); + assertThat(next.value, equalTo(bytesValue(expectedValue))); + } + + private static byte[] bytesValue(final String value) { + return value.getBytes(); + } + + private static Bytes bytesKey(final String key) { + return Bytes.wrap(key.getBytes()); + } + + private String stringFrom(final byte[] from) { + return Serdes.String().deserializer().deserialize("", from); + } + + @Test + public void shouldPutFetchRangeFromCache() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + + try (final KeyValueIterator, byte[]> iterator = + cachingStore.fetch(bytesKey("a"), bytesKey("b"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP))) { + final List> expectedKeys = Arrays.asList( + new Windowed<>(bytesKey("a"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + new Windowed<>(bytesKey("b"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)) + ); + + final List expectedValues = Arrays.asList("a", "b"); + + verifyAllWindowedKeyValues(iterator, expectedKeys, expectedValues); + final int expectedSize = hasIndex ? 4 : 2; + assertEquals(expectedSize, cache.size()); + } + } + + @Test + public void shouldPutFetchRangeFromCacheForNullKeyFrom() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("c"), bytesValue("c"), DEFAULT_TIMESTAMP + 10L); + cachingStore.put(bytesKey("d"), bytesValue("d"), DEFAULT_TIMESTAMP + 20L); + cachingStore.put(bytesKey("e"), bytesValue("e"), DEFAULT_TIMESTAMP + 20L); + + try (final KeyValueIterator, byte[]> iterator = + cachingStore.fetch(null, bytesKey("d"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + 20L))) { + final List> expectedKeys = Arrays.asList( + new Windowed<>(bytesKey("a"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + new Windowed<>(bytesKey("b"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + new Windowed<>(bytesKey("c"), new TimeWindow(DEFAULT_TIMESTAMP + 10L, DEFAULT_TIMESTAMP + 10L + WINDOW_SIZE)), + new Windowed<>(bytesKey("d"), new TimeWindow(DEFAULT_TIMESTAMP + 20L, DEFAULT_TIMESTAMP + 20L + WINDOW_SIZE)) + ); + + final List expectedValues = Arrays.asList("a", "b", "c", "d"); + + verifyAllWindowedKeyValues(iterator, expectedKeys, expectedValues); + } + } + + @Test + public void shouldPutFetchRangeFromCacheForNullKeyTo() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("c"), bytesValue("c"), DEFAULT_TIMESTAMP + 10L); + cachingStore.put(bytesKey("d"), bytesValue("d"), DEFAULT_TIMESTAMP + 20L); + cachingStore.put(bytesKey("e"), bytesValue("e"), DEFAULT_TIMESTAMP + 20L); + + try (final KeyValueIterator, byte[]> iterator = + cachingStore.fetch(bytesKey("b"), null, ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + 20L))) { + final List> expectedKeys = Arrays.asList( + new Windowed<>(bytesKey("b"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + new Windowed<>(bytesKey("c"), new TimeWindow(DEFAULT_TIMESTAMP + 10L, DEFAULT_TIMESTAMP + 10L + WINDOW_SIZE)), + new Windowed<>(bytesKey("d"), new TimeWindow(DEFAULT_TIMESTAMP + 20L, DEFAULT_TIMESTAMP + 20L + WINDOW_SIZE)), + new Windowed<>(bytesKey("e"), new TimeWindow(DEFAULT_TIMESTAMP + 20L, DEFAULT_TIMESTAMP + 20L + WINDOW_SIZE)) + ); + + final List expectedValues = Arrays.asList("b", "c", "d", "e"); + + verifyAllWindowedKeyValues(iterator, expectedKeys, expectedValues); + } + } + + @Test + public void shouldPutFetchRangeFromCacheForNullKeyFromKeyTo() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("c"), bytesValue("c"), DEFAULT_TIMESTAMP + 10L); + cachingStore.put(bytesKey("d"), bytesValue("d"), DEFAULT_TIMESTAMP + 20L); + cachingStore.put(bytesKey("e"), bytesValue("e"), DEFAULT_TIMESTAMP + 20L); + + try (final KeyValueIterator, byte[]> iterator = + cachingStore.fetch(null, null, ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + 20L))) { + final List> expectedKeys = Arrays.asList( + new Windowed<>(bytesKey("a"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + new Windowed<>(bytesKey("b"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + new Windowed<>(bytesKey("c"), new TimeWindow(DEFAULT_TIMESTAMP + 10L, DEFAULT_TIMESTAMP + 10L + WINDOW_SIZE)), + new Windowed<>(bytesKey("d"), new TimeWindow(DEFAULT_TIMESTAMP + 20L, DEFAULT_TIMESTAMP + 20L + WINDOW_SIZE)), + new Windowed<>(bytesKey("e"), new TimeWindow(DEFAULT_TIMESTAMP + 20L, DEFAULT_TIMESTAMP + 20L + WINDOW_SIZE)) + ); + + final List expectedValues = Arrays.asList("a", "b", "c", "d", "e"); + + verifyAllWindowedKeyValues(iterator, expectedKeys, expectedValues); + } + } + + @Test + public void shouldPutBackwardFetchRangeFromCacheForNullKeyFrom() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("c"), bytesValue("c"), DEFAULT_TIMESTAMP + 10L); + cachingStore.put(bytesKey("d"), bytesValue("d"), DEFAULT_TIMESTAMP + 20L); + cachingStore.put(bytesKey("e"), bytesValue("e"), DEFAULT_TIMESTAMP + 20L); + + try (final KeyValueIterator, byte[]> iterator = + cachingStore.backwardFetch(null, bytesKey("c"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + 20L))) { + final List> expectedKeys = Arrays.asList( + new Windowed<>(bytesKey("c"), new TimeWindow(DEFAULT_TIMESTAMP + 10L, DEFAULT_TIMESTAMP + 10L + WINDOW_SIZE)), + new Windowed<>(bytesKey("b"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + new Windowed<>(bytesKey("a"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)) + ); + + final List expectedValues = Arrays.asList("c", "b", "a"); + + verifyAllWindowedKeyValues(iterator, expectedKeys, expectedValues); + } + } + + @Test + public void shouldPutBackwardFetchRangeFromCacheForNullKeyTo() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("c"), bytesValue("c"), DEFAULT_TIMESTAMP + 10L); + cachingStore.put(bytesKey("d"), bytesValue("d"), DEFAULT_TIMESTAMP + 20L); + cachingStore.put(bytesKey("e"), bytesValue("e"), DEFAULT_TIMESTAMP + 20L); + + try (final KeyValueIterator, byte[]> iterator = + cachingStore.backwardFetch(bytesKey("c"), null, ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + 20L))) { + final List> expectedKeys = Arrays.asList( + new Windowed<>(bytesKey("e"), new TimeWindow(DEFAULT_TIMESTAMP + 20L, DEFAULT_TIMESTAMP + 20L + WINDOW_SIZE)), + new Windowed<>(bytesKey("d"), new TimeWindow(DEFAULT_TIMESTAMP + 20L, DEFAULT_TIMESTAMP + 20L + WINDOW_SIZE)), + new Windowed<>(bytesKey("c"), new TimeWindow(DEFAULT_TIMESTAMP + 10L, DEFAULT_TIMESTAMP + 10L + WINDOW_SIZE)) + ); + + final List expectedValues = Arrays.asList("e", "d", "c"); + + verifyAllWindowedKeyValues(iterator, expectedKeys, expectedValues); + } + } + + @Test + public void shouldPutBackwardFetchRangeFromCacheForNullKeyFromKeyTo() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("c"), bytesValue("c"), DEFAULT_TIMESTAMP + 10L); + cachingStore.put(bytesKey("d"), bytesValue("d"), DEFAULT_TIMESTAMP + 20L); + cachingStore.put(bytesKey("e"), bytesValue("e"), DEFAULT_TIMESTAMP + 20L); + + try (final KeyValueIterator, byte[]> iterator = + cachingStore.backwardFetch(null, null, ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + 20L))) { + final List> expectedKeys = Arrays.asList( + new Windowed<>(bytesKey("e"), new TimeWindow(DEFAULT_TIMESTAMP + 20L, DEFAULT_TIMESTAMP + 20L + WINDOW_SIZE)), + new Windowed<>(bytesKey("d"), new TimeWindow(DEFAULT_TIMESTAMP + 20L, DEFAULT_TIMESTAMP + 20L + WINDOW_SIZE)), + new Windowed<>(bytesKey("c"), new TimeWindow(DEFAULT_TIMESTAMP + 10L, DEFAULT_TIMESTAMP + 10L + WINDOW_SIZE)), + new Windowed<>(bytesKey("b"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + new Windowed<>(bytesKey("a"), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)) + ); + + final List expectedValues = Arrays.asList("e", "d", "c", "b", "a"); + + verifyAllWindowedKeyValues(iterator, expectedKeys, expectedValues); + } + } + + @Test + public void shouldGetAllFromCache() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("c"), bytesValue("c"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("d"), bytesValue("d"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("e"), bytesValue("e"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("f"), bytesValue("f"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("g"), bytesValue("g"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("h"), bytesValue("h"), DEFAULT_TIMESTAMP); + + try (final KeyValueIterator, byte[]> iterator = cachingStore.all()) { + final String[] array = {"a", "b", "c", "d", "e", "f", "g", "h"}; + for (final String s : array) { + verifyWindowedKeyValue( + iterator.next(), + new Windowed<>(bytesKey(s), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + s); + } + assertFalse(iterator.hasNext()); + } + } + + @Test + public void shouldGetAllBackwardFromCache() { + cachingStore.put(bytesKey("a"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("b"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("c"), bytesValue("c"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("d"), bytesValue("d"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("e"), bytesValue("e"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("f"), bytesValue("f"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("g"), bytesValue("g"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("h"), bytesValue("h"), DEFAULT_TIMESTAMP); + + try (final KeyValueIterator, byte[]> iterator = cachingStore.backwardAll()) { + final String[] array = {"h", "g", "f", "e", "d", "c", "b", "a"}; + for (final String s : array) { + verifyWindowedKeyValue( + iterator.next(), + new Windowed<>(bytesKey(s), new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + s); + } + assertFalse(iterator.hasNext()); + } + } + + @Test + public void shouldFetchAllWithinTimestampRange() { + final String[] array = {"a", "b", "c", "d", "e", "f", "g", "h"}; + for (int i = 0; i < array.length; i++) { + cachingStore.put(bytesKey(array[i]), bytesValue(array[i]), i); + } + + try (final KeyValueIterator, byte[]> iterator = + cachingStore.fetchAll(ofEpochMilli(0), ofEpochMilli(7))) { + for (int i = 0; i < array.length; i++) { + final String str = array[i]; + verifyWindowedKeyValue( + iterator.next(), + new Windowed<>(bytesKey(str), new TimeWindow(i, i + WINDOW_SIZE)), + str); + } + assertFalse(iterator.hasNext()); + } + + try (final KeyValueIterator, byte[]> iterator1 = + cachingStore.fetchAll(ofEpochMilli(2), ofEpochMilli(4))) { + for (int i = 2; i <= 4; i++) { + final String str = array[i]; + verifyWindowedKeyValue( + iterator1.next(), + new Windowed<>(bytesKey(str), new TimeWindow(i, i + WINDOW_SIZE)), + str); + } + assertFalse(iterator1.hasNext()); + } + + try (final KeyValueIterator, byte[]> iterator2 = + cachingStore.fetchAll(ofEpochMilli(5), ofEpochMilli(7))) { + for (int i = 5; i <= 7; i++) { + final String str = array[i]; + verifyWindowedKeyValue( + iterator2.next(), + new Windowed<>(bytesKey(str), new TimeWindow(i, i + WINDOW_SIZE)), + str); + } + assertFalse(iterator2.hasNext()); + } + } + + @Test + public void shouldFetchAllBackwardWithinTimestampRange() { + final String[] array = {"a", "b", "c", "d", "e", "f", "g", "h"}; + for (int i = 0; i < array.length; i++) { + cachingStore.put(bytesKey(array[i]), bytesValue(array[i]), i); + } + + try (final KeyValueIterator, byte[]> iterator = + cachingStore.backwardFetchAll(ofEpochMilli(0), ofEpochMilli(7))) { + for (int i = array.length - 1; i >= 0; i--) { + final String str = array[i]; + verifyWindowedKeyValue( + iterator.next(), + new Windowed<>(bytesKey(str), new TimeWindow(i, i + WINDOW_SIZE)), + str); + } + assertFalse(iterator.hasNext()); + } + + try (final KeyValueIterator, byte[]> iterator1 = + cachingStore.backwardFetchAll(ofEpochMilli(2), ofEpochMilli(4))) { + for (int i = 4; i >= 2; i--) { + final String str = array[i]; + verifyWindowedKeyValue( + iterator1.next(), + new Windowed<>(bytesKey(str), new TimeWindow(i, i + WINDOW_SIZE)), + str); + } + assertFalse(iterator1.hasNext()); + } + + try (final KeyValueIterator, byte[]> iterator2 = + cachingStore.backwardFetchAll(ofEpochMilli(5), ofEpochMilli(7))) { + for (int i = 7; i >= 5; i--) { + final String str = array[i]; + verifyWindowedKeyValue( + iterator2.next(), + new Windowed<>(bytesKey(str), new TimeWindow(i, i + WINDOW_SIZE)), + str); + } + assertFalse(iterator2.hasNext()); + } + } + + @Test + public void shouldFlushEvictedItemsIntoUnderlyingStore() { + final int added = addItemsToCache(); + // all dirty entries should have been flushed + try (final KeyValueIterator iter = bytesStore.fetch( + Bytes.wrap("0".getBytes(StandardCharsets.UTF_8)), + DEFAULT_TIMESTAMP, + DEFAULT_TIMESTAMP)) { + final KeyValue next = iter.next(); + assertEquals(DEFAULT_TIMESTAMP, baseKeySchema.segmentTimestamp(next.key)); + assertArrayEquals("0".getBytes(), next.value); + assertFalse(iter.hasNext()); + assertEquals(added - 1, cache.size()); + } + } + + @Test + public void shouldForwardDirtyItemsWhenFlushCalled() { + final Windowed windowedKey = + new Windowed<>("1", new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)); + cachingStore.put(bytesKey("1"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.flush(); + assertEquals("a", cacheListener.forwarded.get(windowedKey).newValue); + assertNull(cacheListener.forwarded.get(windowedKey).oldValue); + } + + @Test + public void shouldSetFlushListener() { + assertTrue(cachingStore.setFlushListener(null, true)); + assertTrue(cachingStore.setFlushListener(null, false)); + } + + @Test + public void shouldForwardOldValuesWhenEnabled() { + cachingStore.setFlushListener(cacheListener, true); + final Windowed windowedKey = + new Windowed<>("1", new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)); + cachingStore.put(bytesKey("1"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("1"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.flush(); + assertEquals("b", cacheListener.forwarded.get(windowedKey).newValue); + assertNull(cacheListener.forwarded.get(windowedKey).oldValue); + cacheListener.forwarded.clear(); + cachingStore.put(bytesKey("1"), bytesValue("c"), DEFAULT_TIMESTAMP); + cachingStore.flush(); + assertEquals("c", cacheListener.forwarded.get(windowedKey).newValue); + assertEquals("b", cacheListener.forwarded.get(windowedKey).oldValue); + cachingStore.put(bytesKey("1"), null, DEFAULT_TIMESTAMP); + cachingStore.flush(); + assertNull(cacheListener.forwarded.get(windowedKey).newValue); + assertEquals("c", cacheListener.forwarded.get(windowedKey).oldValue); + cacheListener.forwarded.clear(); + cachingStore.put(bytesKey("1"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("1"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("1"), null, DEFAULT_TIMESTAMP); + cachingStore.flush(); + assertNull(cacheListener.forwarded.get(windowedKey)); + cacheListener.forwarded.clear(); + } + + @Test + public void shouldForwardOldValuesWhenDisabled() { + final Windowed windowedKey = + new Windowed<>("1", new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)); + cachingStore.put(bytesKey("1"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("1"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.flush(); + assertEquals("b", cacheListener.forwarded.get(windowedKey).newValue); + assertNull(cacheListener.forwarded.get(windowedKey).oldValue); + cachingStore.put(bytesKey("1"), bytesValue("c"), DEFAULT_TIMESTAMP); + cachingStore.flush(); + assertEquals("c", cacheListener.forwarded.get(windowedKey).newValue); + assertNull(cacheListener.forwarded.get(windowedKey).oldValue); + cachingStore.put(bytesKey("1"), null, DEFAULT_TIMESTAMP); + cachingStore.flush(); + assertNull(cacheListener.forwarded.get(windowedKey).newValue); + assertNull(cacheListener.forwarded.get(windowedKey).oldValue); + cacheListener.forwarded.clear(); + cachingStore.put(bytesKey("1"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("1"), bytesValue("b"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("1"), null, DEFAULT_TIMESTAMP); + cachingStore.flush(); + assertNull(cacheListener.forwarded.get(windowedKey)); + cacheListener.forwarded.clear(); + } + + @Test + public void shouldForwardDirtyItemToListenerWhenEvicted() { + final int numRecords = addItemsToCache(); + assertEquals(numRecords, cacheListener.forwarded.size()); + } + + @Test + public void shouldTakeValueFromCacheIfSameTimestampFlushedToRocks() { + cachingStore.put(bytesKey("1"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.flush(); + cachingStore.put(bytesKey("1"), bytesValue("b"), DEFAULT_TIMESTAMP); + + try (final WindowStoreIterator fetch = + cachingStore.fetch(bytesKey("1"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP))) { + verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP, "b"); + assertFalse(fetch.hasNext()); + } + } + + @Test + public void shouldIterateAcrossWindows() { + cachingStore.put(bytesKey("1"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("1"), bytesValue("b"), DEFAULT_TIMESTAMP + WINDOW_SIZE); + + try (final WindowStoreIterator fetch = + cachingStore.fetch(bytesKey("1"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + WINDOW_SIZE))) { + verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP, "a"); + verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP + WINDOW_SIZE, "b"); + assertFalse(fetch.hasNext()); + } + } + + @Test + public void shouldIterateBackwardAcrossWindows() { + cachingStore.put(bytesKey("1"), bytesValue("a"), DEFAULT_TIMESTAMP); + cachingStore.put(bytesKey("1"), bytesValue("b"), DEFAULT_TIMESTAMP + WINDOW_SIZE); + + try (final WindowStoreIterator fetch = + cachingStore.backwardFetch(bytesKey("1"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + WINDOW_SIZE))) { + verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP + WINDOW_SIZE, "b"); + verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP, "a"); + assertFalse(fetch.hasNext()); + } + } + + @Test + public void shouldIterateCacheAndStore() { + final Bytes key = Bytes.wrap("1".getBytes()); + bytesStore.put(TimeFirstWindowKeySchema.toStoreKeyBinary(key, DEFAULT_TIMESTAMP, 0), "a".getBytes()); + cachingStore.put(key, bytesValue("b"), DEFAULT_TIMESTAMP + WINDOW_SIZE); + try (final WindowStoreIterator fetch = + cachingStore.fetch(bytesKey("1"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + WINDOW_SIZE))) { + verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP, "a"); + verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP + WINDOW_SIZE, "b"); + assertFalse(fetch.hasNext()); + } + } + + @Test + public void shouldIterateBackwardCacheAndStore() { + final Bytes key = Bytes.wrap("1".getBytes()); + bytesStore.put(TimeFirstWindowKeySchema.toStoreKeyBinary(key, DEFAULT_TIMESTAMP, 0), "a".getBytes()); + cachingStore.put(key, bytesValue("b"), DEFAULT_TIMESTAMP + WINDOW_SIZE); + try (final WindowStoreIterator fetch = + cachingStore.backwardFetch(bytesKey("1"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + WINDOW_SIZE))) { + verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP + WINDOW_SIZE, "b"); + verifyKeyValue(fetch.next(), DEFAULT_TIMESTAMP, "a"); + assertFalse(fetch.hasNext()); + } + } + + @Test + public void shouldIterateCacheAndStoreKeyRange() { + final Bytes key = Bytes.wrap("1".getBytes()); + bytesStore.put(TimeFirstWindowKeySchema.toStoreKeyBinary(key, DEFAULT_TIMESTAMP, 0), "a".getBytes()); + cachingStore.put(key, bytesValue("b"), DEFAULT_TIMESTAMP + WINDOW_SIZE); + + try (final KeyValueIterator, byte[]> fetchRange = + cachingStore.fetch(key, bytesKey("2"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + WINDOW_SIZE))) { + verifyWindowedKeyValue( + fetchRange.next(), + new Windowed<>(key, new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + "a"); + verifyWindowedKeyValue( + fetchRange.next(), + new Windowed<>(key, new TimeWindow(DEFAULT_TIMESTAMP + WINDOW_SIZE, DEFAULT_TIMESTAMP + WINDOW_SIZE + WINDOW_SIZE)), + "b"); + assertFalse(fetchRange.hasNext()); + } + } + + @Test + public void shouldIterateBackwardCacheAndStoreKeyRange() { + final Bytes key = Bytes.wrap("1".getBytes()); + bytesStore.put(TimeFirstWindowKeySchema.toStoreKeyBinary(key, DEFAULT_TIMESTAMP, 0), "a".getBytes()); + cachingStore.put(key, bytesValue("b"), DEFAULT_TIMESTAMP + WINDOW_SIZE); + + try (final KeyValueIterator, byte[]> fetchRange = + cachingStore.backwardFetch(key, bytesKey("2"), ofEpochMilli(DEFAULT_TIMESTAMP), ofEpochMilli(DEFAULT_TIMESTAMP + WINDOW_SIZE))) { + verifyWindowedKeyValue( + fetchRange.next(), + new Windowed<>(key, new TimeWindow(DEFAULT_TIMESTAMP + WINDOW_SIZE, DEFAULT_TIMESTAMP + WINDOW_SIZE + WINDOW_SIZE)), + "b"); + verifyWindowedKeyValue( + fetchRange.next(), + new Windowed<>(key, new TimeWindow(DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP + WINDOW_SIZE)), + "a"); + assertFalse(fetchRange.hasNext()); + } + } + + @Test + public void shouldClearNamespaceCacheOnClose() { + cachingStore.put(bytesKey("a"), bytesValue("a"), 0L); + final int size = hasIndex ? 2 : 1; + assertEquals(size, cache.size()); + cachingStore.close(); + assertEquals(0, cache.size()); + } + + @Test + public void shouldThrowIfTryingToFetchFromClosedCachingStore() { + cachingStore.close(); + assertThrows(InvalidStateStoreException.class, () -> cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(10))); + } + + @Test + public void shouldThrowIfTryingToFetchRangeFromClosedCachingStore() { + cachingStore.close(); + assertThrows(InvalidStateStoreException.class, () -> cachingStore.fetch(bytesKey("a"), bytesKey("b"), ofEpochMilli(0), ofEpochMilli(10))); + } + + @Test + public void shouldThrowIfTryingToWriteToClosedCachingStore() { + cachingStore.close(); + assertThrows(InvalidStateStoreException.class, () -> cachingStore.put(bytesKey("a"), bytesValue("a"), 0L)); + } + + @Test + public void shouldSkipNonExistBaseKeyInCache() { + cachingStore.put(bytesKey("aa"), bytesValue("0002"), 0); + + final SegmentedCacheFunction indexCacheFunction = new SegmentedCacheFunction(new KeyFirstWindowKeySchema(), SEGMENT_INTERVAL); + + final Bytes key = bytesKey("a"); + final byte[] value = bytesValue("0001"); + final Bytes cacheIndexKey = indexCacheFunction.cacheKey(KeyFirstWindowKeySchema.toStoreKeyBinary(key, 1, 0)); + final String cacheName = context.taskId() + "-test"; + + // Only put index to store + cache.put(cacheName, + cacheIndexKey, + new LRUCacheEntry( + new byte[0], + new RecordHeaders(), + true, + context.offset(), + context.timestamp(), + context.partition(), + "") + ); + + underlyingStore.put(key, value, 1); + + if (hasIndex) { + verifyKeyValueList( + asList( + windowedPair("a", "0001", 1), + windowedPair("aa", "0002", 0) + ), + toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + ofEpochMilli(Long.MAX_VALUE))) + ); + } else { + verifyKeyValueList( + asList( + windowedPair("aa", "0002", 0), + windowedPair("a", "0001", 1) + ), + toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + ofEpochMilli(Long.MAX_VALUE))) + ); + } + } + + @Test + public void shouldFetchAndIterateOverExactKeys() { + cachingStore.put(bytesKey("a"), bytesValue("0001"), 0); + cachingStore.put(bytesKey("aa"), bytesValue("0002"), 0); + cachingStore.put(bytesKey("a"), bytesValue("0003"), 1); + cachingStore.put(bytesKey("aa"), bytesValue("0004"), 1); + cachingStore.put(bytesKey("a"), bytesValue("0005"), SEGMENT_INTERVAL); + + final List> expected = asList( + KeyValue.pair(0L, bytesValue("0001")), + KeyValue.pair(1L, bytesValue("0003")), + KeyValue.pair(SEGMENT_INTERVAL, bytesValue("0005")) + ); + final List> actual = + toList(cachingStore.fetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); + verifyKeyValueList(expected, actual); + } + + @Test + public void shouldBackwardFetchAndIterateOverExactKeys() { + cachingStore.put(bytesKey("a"), bytesValue("0001"), 0); + cachingStore.put(bytesKey("aa"), bytesValue("0002"), 0); + cachingStore.put(bytesKey("a"), bytesValue("0003"), 1); + cachingStore.put(bytesKey("aa"), bytesValue("0004"), 1); + cachingStore.put(bytesKey("a"), bytesValue("0005"), SEGMENT_INTERVAL); + + final List> expected = asList( + KeyValue.pair(SEGMENT_INTERVAL, bytesValue("0005")), + KeyValue.pair(1L, bytesValue("0003")), + KeyValue.pair(0L, bytesValue("0001")) + ); + final List> actual = + toList(cachingStore.backwardFetch(bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))); + verifyKeyValueList(expected, actual); + } + + @Test + public void shouldFetchAndIterateOverKeyRange() { + cachingStore.put(bytesKey("a"), bytesValue("0001"), 0); + cachingStore.put(bytesKey("aa"), bytesValue("0002"), 0); + cachingStore.put(bytesKey("a"), bytesValue("0003"), 1); + cachingStore.put(bytesKey("aa"), bytesValue("0004"), 1); + cachingStore.put(bytesKey("a"), bytesValue("0005"), SEGMENT_INTERVAL); + + verifyKeyValueList( + asList( + windowedPair("a", "0001", 0), + windowedPair("a", "0003", 1), + windowedPair("a", "0005", SEGMENT_INTERVAL) + ), + toList(cachingStore.fetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + ); + + verifyKeyValueList( + asList( + windowedPair("aa", "0002", 0), + windowedPair("aa", "0004", 1)), + toList(cachingStore.fetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + ); + + if (hasIndex) { + verifyKeyValueList( + asList( + windowedPair("a", "0001", 0), + windowedPair("a", "0003", 1), + windowedPair("aa", "0002", 0), + windowedPair("aa", "0004", 1), + windowedPair("a", "0005", SEGMENT_INTERVAL) + ), + toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + ofEpochMilli(Long.MAX_VALUE))) + ); + } else { + verifyKeyValueList( + asList( + windowedPair("a", "0001", 0), + windowedPair("aa", "0002", 0), + windowedPair("a", "0003", 1), + windowedPair("aa", "0004", 1), + windowedPair("a", "0005", SEGMENT_INTERVAL) + ), + toList(cachingStore.fetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + ofEpochMilli(Long.MAX_VALUE))) + ); + } + } + + @Test + public void shouldFetchAndIterateOverKeyBackwardRange() { + cachingStore.put(bytesKey("a"), bytesValue("0001"), 0); + cachingStore.put(bytesKey("aa"), bytesValue("0002"), 0); + cachingStore.put(bytesKey("a"), bytesValue("0003"), 1); + cachingStore.put(bytesKey("aa"), bytesValue("0004"), 1); + cachingStore.put(bytesKey("a"), bytesValue("0005"), SEGMENT_INTERVAL); + + verifyKeyValueList( + asList( + windowedPair("a", "0005", SEGMENT_INTERVAL), + windowedPair("a", "0003", 1), + windowedPair("a", "0001", 0) + ), + toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("a"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + ); + + verifyKeyValueList( + asList( + windowedPair("aa", "0004", 1), + windowedPair("aa", "0002", 0)), + toList(cachingStore.backwardFetch(bytesKey("aa"), bytesKey("aa"), ofEpochMilli(0), ofEpochMilli(Long.MAX_VALUE))) + ); + + if (!hasIndex) { + verifyKeyValueList( + // Ordered by timestamp if has no index + asList( + windowedPair("a", "0005", SEGMENT_INTERVAL), + windowedPair("aa", "0004", 1), + windowedPair("a", "0003", 1), + windowedPair("aa", "0002", 0), + windowedPair("a", "0001", 0) + ), + toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + ofEpochMilli(Long.MAX_VALUE))) + ); + } else { + verifyKeyValueList( + asList( + // First because in larger segments + windowedPair("a", "0005", SEGMENT_INTERVAL), + windowedPair("aa", "0004", 1), + windowedPair("aa", "0002", 0), + windowedPair("a", "0003", 1), + windowedPair("a", "0001", 0) + ), + toList(cachingStore.backwardFetch(bytesKey("a"), bytesKey("aa"), ofEpochMilli(0), + ofEpochMilli(Long.MAX_VALUE))) + ); + } + } + + @Test + public void shouldReturnSameResultsForSingleKeyFetchAndEqualKeyRangeFetch() { + cachingStore.put(bytesKey("a"), bytesValue("0001"), 0); + cachingStore.put(bytesKey("aa"), bytesValue("0002"), 1); + cachingStore.put(bytesKey("aa"), bytesValue("0003"), 2); + cachingStore.put(bytesKey("aaa"), bytesValue("0004"), 3); + + try (final WindowStoreIterator singleKeyIterator = cachingStore.fetch(bytesKey("aa"), 0L, 5L); + final KeyValueIterator, byte[]> keyRangeIterator = cachingStore.fetch(bytesKey("aa"), bytesKey("aa"), 0L, 5L)) { + + assertEquals(stringFrom(singleKeyIterator.next().value), stringFrom(keyRangeIterator.next().value)); + assertEquals(stringFrom(singleKeyIterator.next().value), stringFrom(keyRangeIterator.next().value)); + assertFalse(singleKeyIterator.hasNext()); + assertFalse(keyRangeIterator.hasNext()); + } + } + + @Test + public void shouldReturnSameResultsForSingleKeyFetchAndEqualKeyRangeBackwardFetch() { + cachingStore.put(bytesKey("a"), bytesValue("0001"), 0); + cachingStore.put(bytesKey("aa"), bytesValue("0002"), 1); + cachingStore.put(bytesKey("aa"), bytesValue("0003"), 2); + cachingStore.put(bytesKey("aaa"), bytesValue("0004"), 3); + + try (final WindowStoreIterator singleKeyIterator = + cachingStore.backwardFetch(bytesKey("aa"), Instant.ofEpochMilli(0L), Instant.ofEpochMilli(5L)); + final KeyValueIterator, byte[]> keyRangeIterator = + cachingStore.backwardFetch(bytesKey("aa"), bytesKey("aa"), Instant.ofEpochMilli(0L), Instant.ofEpochMilli(5L))) { + + assertEquals(stringFrom(singleKeyIterator.next().value), stringFrom(keyRangeIterator.next().value)); + assertEquals(stringFrom(singleKeyIterator.next().value), stringFrom(keyRangeIterator.next().value)); + assertFalse(singleKeyIterator.hasNext()); + assertFalse(keyRangeIterator.hasNext()); + } + } + + @Test + public void shouldThrowNullPointerExceptionOnPutNullKey() { + assertThrows(NullPointerException.class, () -> cachingStore.put(null, bytesValue("anyValue"), 0L)); + } + + @Test + public void shouldNotThrowNullPointerExceptionOnPutNullValue() { + cachingStore.put(bytesKey("a"), null, 0L); + } + + @Test + public void shouldThrowNullPointerExceptionOnFetchNullKey() { + assertThrows(NullPointerException.class, () -> cachingStore.fetch(null, ofEpochMilli(1L), ofEpochMilli(2L))); + } + + @Test + public void shouldNotThrowInvalidRangeExceptionWithNegativeFromKey() { + final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TimeOrderedCachingWindowStore.class); + final KeyValueIterator, byte[]> iterator = cachingStore.fetch(keyFrom, keyTo, 0L, 10L)) { + assertFalse(iterator.hasNext()); + + final List messages = appender.getMessages(); + assertThat( + messages, + hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + " This may be due to range arguments set in the wrong order, " + + "or serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + + " Note that the built-in numerical serdes do not follow this for negative numbers") + ); + } + } + + @Test + public void shouldNotThrowInvalidBackwardRangeExceptionWithNegativeFromKey() { + final Bytes keyFrom = Bytes.wrap(Serdes.Integer().serializer().serialize("", -1)); + final Bytes keyTo = Bytes.wrap(Serdes.Integer().serializer().serialize("", 1)); + + try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister(TimeOrderedCachingWindowStore.class); + final KeyValueIterator, byte[]> iterator = + cachingStore.backwardFetch(keyFrom, keyTo, Instant.ofEpochMilli(0L), Instant.ofEpochMilli(10L))) { + assertFalse(iterator.hasNext()); + + final List messages = appender.getMessages(); + assertThat( + messages, + hasItem("Returning empty iterator for fetch with invalid key range: from > to." + + " This may be due to serdes that don't preserve ordering when lexicographically comparing the serialized bytes." + + " Note that the built-in numerical serdes do not follow this for negative numbers") + ); + } + } + + @Test + public void shouldCloseCacheAndWrappedStoreAfterErrorDuringCacheFlush() { + setUpCloseTests(); + EasyMock.reset(cache); + cache.flush(CACHE_NAMESPACE); + EasyMock.expectLastCall().andThrow(new RuntimeException("Simulating an error on flush")); + cache.close(CACHE_NAMESPACE); + EasyMock.replay(cache); + EasyMock.reset(underlyingStore); + underlyingStore.close(); + EasyMock.replay(underlyingStore); + + assertThrows(RuntimeException.class, cachingStore::close); + EasyMock.verify(cache, underlyingStore); + } + + @Test + public void shouldCloseWrappedStoreAfterErrorDuringCacheClose() { + setUpCloseTests(); + EasyMock.reset(cache); + cache.flush(CACHE_NAMESPACE); + cache.close(CACHE_NAMESPACE); + EasyMock.expectLastCall().andThrow(new RuntimeException("Simulating an error on close")); + EasyMock.replay(cache); + EasyMock.reset(underlyingStore); + underlyingStore.close(); + EasyMock.replay(underlyingStore); + + assertThrows(RuntimeException.class, cachingStore::close); + EasyMock.verify(cache, underlyingStore); + } + + @Test + public void shouldCloseCacheAfterErrorDuringStateStoreClose() { + setUpCloseTests(); + EasyMock.reset(cache); + cache.flush(CACHE_NAMESPACE); + cache.close(CACHE_NAMESPACE); + EasyMock.replay(cache); + EasyMock.reset(underlyingStore); + underlyingStore.close(); + EasyMock.expectLastCall().andThrow(new RuntimeException("Simulating an error on close")); + EasyMock.replay(underlyingStore); + + assertThrows(RuntimeException.class, cachingStore::close); + EasyMock.verify(cache, underlyingStore); + } + + private void setUpCloseTests() { + underlyingStore = EasyMock.createNiceMock(RocksDBTimeOrderedWindowStore.class); + EasyMock.expect(underlyingStore.name()).andStubReturn("store-name"); + EasyMock.expect(underlyingStore.isOpen()).andStubReturn(true); + EasyMock.replay(underlyingStore); + cachingStore = new TimeOrderedCachingWindowStore(underlyingStore, WINDOW_SIZE, SEGMENT_INTERVAL); + cache = EasyMock.createNiceMock(ThreadCache.class); + context = new InternalMockProcessorContext<>(TestUtils.tempDirectory(), null, null, null, cache); + context.setRecordContext(new ProcessorRecordContext(10, 0, 0, TOPIC, new RecordHeaders())); + cachingStore.init((StateStoreContext) context, cachingStore); + } + + private static KeyValue, byte[]> windowedPair(final String key, final String value, final long timestamp) { + return KeyValue.pair( + new Windowed<>(bytesKey(key), new TimeWindow(timestamp, timestamp + WINDOW_SIZE)), + bytesValue(value)); + } + + private int addItemsToCache() { + int cachedSize = 0; + int i = 0; + while (cachedSize < MAX_CACHE_SIZE_BYTES) { + final String kv = String.valueOf(i++); + cachingStore.put(bytesKey(kv), bytesValue(kv), DEFAULT_TIMESTAMP); + cachedSize += memoryCacheEntrySize(kv.getBytes(), kv.getBytes(), TOPIC) + + 8 + // timestamp + 4; // sequenceNumber + } + return i; + } + +}