use of org.apache.kafka.common.requests.FetchRequest.PartitionData in project kafka by apache.
the class FetcherTest method testFetchTopicIdUpgradeDowngrade.
@Test
public void testFetchTopicIdUpgradeDowngrade() {
buildFetcher();
TopicIdPartition fooWithoutId = new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("foo", 0));
TopicIdPartition fooWithId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
// Assign foo without a topic id.
subscriptions.assignFromUser(singleton(fooWithoutId.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithoutId), tp -> validLeaderEpoch));
subscriptions.seek(fooWithoutId.topicPartition(), 0);
// Fetch should use version 12.
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fetchRequestMatcher((short) 12, singletonMap(fooWithoutId, new PartitionData(fooWithoutId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch))), emptyList()), fullFetchResponse(1, fooWithoutId, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
// Upgrade.
subscriptions.assignFromUser(singleton(fooWithId.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithId), tp -> validLeaderEpoch));
subscriptions.seek(fooWithId.topicPartition(), 0);
// Fetch should use latest version.
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// foo with old topic id should be removed from the session.
client.prepareResponse(fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(fooWithId, new PartitionData(fooWithId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch))), emptyList()), fullFetchResponse(1, fooWithId, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
// Downgrade.
subscriptions.assignFromUser(singleton(fooWithoutId.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithoutId), tp -> validLeaderEpoch));
subscriptions.seek(fooWithoutId.topicPartition(), 0);
// Fetch should use version 12.
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// foo with old topic id should be removed from the session.
client.prepareResponse(fetchRequestMatcher((short) 12, singletonMap(fooWithoutId, new PartitionData(fooWithoutId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch))), emptyList()), fullFetchResponse(1, fooWithoutId, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
}
use of org.apache.kafka.common.requests.FetchRequest.PartitionData in project kafka by apache.
the class FetcherTest method testFetchForgetTopicIdWhenReplaced.
@Test
public void testFetchForgetTopicIdWhenReplaced() {
buildFetcher();
TopicIdPartition fooWithOldTopicId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
TopicIdPartition fooWithNewTopicId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
// Assign foo with old topic id.
subscriptions.assignFromUser(singleton(fooWithOldTopicId.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithOldTopicId), tp -> validLeaderEpoch));
subscriptions.seek(fooWithOldTopicId.topicPartition(), 0);
// Fetch should use latest version.
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(fooWithOldTopicId, new PartitionData(fooWithOldTopicId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch))), emptyList()), fullFetchResponse(1, fooWithOldTopicId, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
// Replace foo with old topic id with foo with new topic id.
subscriptions.assignFromUser(singleton(fooWithNewTopicId.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(fooWithNewTopicId), tp -> validLeaderEpoch));
subscriptions.seek(fooWithNewTopicId.topicPartition(), 0);
// Fetch should use latest version.
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
// foo with old topic id should be removed from the session.
client.prepareResponse(fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(fooWithNewTopicId, new PartitionData(fooWithNewTopicId.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch))), singletonList(fooWithOldTopicId)), fullFetchResponse(1, fooWithNewTopicId, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
}
use of org.apache.kafka.common.requests.FetchRequest.PartitionData in project kafka by apache.
the class FetcherTest method testFetchForgetTopicIdWhenUnassigned.
@Test
public void testFetchForgetTopicIdWhenUnassigned() {
buildFetcher();
TopicIdPartition foo = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
TopicIdPartition bar = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0));
// Assign foo and bar.
subscriptions.assignFromUser(singleton(foo.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(foo), tp -> validLeaderEpoch));
subscriptions.seek(foo.topicPartition(), 0);
// Fetch should use latest version.
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(foo, new PartitionData(foo.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch))), emptyList()), fullFetchResponse(1, foo, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
// Assign bar and unassign foo.
subscriptions.assignFromUser(singleton(bar.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(bar), tp -> validLeaderEpoch));
subscriptions.seek(bar.topicPartition(), 0);
// Fetch should use latest version.
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(bar, new PartitionData(bar.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch))), singletonList(foo)), fullFetchResponse(1, bar, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
}
use of org.apache.kafka.common.requests.FetchRequest.PartitionData in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testGetOffsetsForTimesWithUnknownOffset.
private void testGetOffsetsForTimesWithUnknownOffset() {
client.reset();
// Ensure metadata has both partition.
Cluster cluster = TestUtils.clusterWith(1, topicName, 1);
metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
Map<TopicPartition, ListOffsetResponse.PartitionData> partitionData = new HashMap<>();
partitionData.put(tp0, new ListOffsetResponse.PartitionData(Errors.NONE, ListOffsetResponse.UNKNOWN_TIMESTAMP, ListOffsetResponse.UNKNOWN_OFFSET));
client.prepareResponseFrom(new ListOffsetResponse(0, partitionData), cluster.leaderFor(tp0));
Map<TopicPartition, Long> timestampToSearch = new HashMap<>();
timestampToSearch.put(tp0, 0L);
Map<TopicPartition, OffsetAndTimestamp> offsetAndTimestampMap = fetcher.offsetsByTimes(timestampToSearch, Long.MAX_VALUE);
assertTrue(offsetAndTimestampMap.containsKey(tp0));
assertNull(offsetAndTimestampMap.get(tp0));
}
use of org.apache.kafka.common.requests.FetchRequest.PartitionData in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testBatchedListOffsetsMetadataErrors.
@Test(expected = TimeoutException.class)
public void testBatchedListOffsetsMetadataErrors() {
Map<TopicPartition, ListOffsetResponse.PartitionData> partitionData = new HashMap<>();
partitionData.put(tp0, new ListOffsetResponse.PartitionData(Errors.NOT_LEADER_FOR_PARTITION, ListOffsetResponse.UNKNOWN_TIMESTAMP, ListOffsetResponse.UNKNOWN_OFFSET));
partitionData.put(tp1, new ListOffsetResponse.PartitionData(Errors.UNKNOWN_TOPIC_OR_PARTITION, ListOffsetResponse.UNKNOWN_TIMESTAMP, ListOffsetResponse.UNKNOWN_OFFSET));
client.prepareResponse(new ListOffsetResponse(0, partitionData));
Map<TopicPartition, Long> offsetsToSearch = new HashMap<>();
offsetsToSearch.put(tp0, ListOffsetRequest.EARLIEST_TIMESTAMP);
offsetsToSearch.put(tp1, ListOffsetRequest.EARLIEST_TIMESTAMP);
fetcher.offsetsByTimes(offsetsToSearch, 0);
}
Aggregations