use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetcherTest method testConsumingViaIncrementalFetchRequests.
@Test
public void testConsumingViaIncrementalFetchRequests() {
buildFetcher(2);
List<ConsumerRecord<byte[], byte[]>> records;
assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1)));
subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0)));
subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1)));
// Fetch some records and establish an incremental fetch session.
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions1 = new LinkedHashMap<>();
partitions1.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(2).setLastStableOffset(2).setLogStartOffset(0).setRecords(this.records));
partitions1.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setLogStartOffset(0).setRecords(emptyRecords));
FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1);
client.prepareResponse(resp1);
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchedRecords();
assertFalse(fetchedRecords.containsKey(tp1));
records = fetchedRecords.get(tp0);
assertEquals(2, records.size());
assertEquals(3L, subscriptions.position(tp0).offset);
assertEquals(1L, subscriptions.position(tp1).offset);
assertEquals(1, records.get(0).offset());
assertEquals(2, records.get(1).offset());
// There is still a buffered record.
assertEquals(0, fetcher.sendFetches());
fetchedRecords = fetchedRecords();
assertFalse(fetchedRecords.containsKey(tp1));
records = fetchedRecords.get(tp0);
assertEquals(1, records.size());
assertEquals(3, records.get(0).offset());
assertEquals(4L, subscriptions.position(tp0).offset);
// The second response contains no new records.
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions2 = new LinkedHashMap<>();
FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2);
client.prepareResponse(resp2);
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(time.timer(0));
fetchedRecords = fetchedRecords();
assertTrue(fetchedRecords.isEmpty());
assertEquals(4L, subscriptions.position(tp0).offset);
assertEquals(1L, subscriptions.position(tp1).offset);
// The third response contains some new records for tp0.
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions3 = new LinkedHashMap<>();
partitions3.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setLastStableOffset(4).setLogStartOffset(0).setRecords(this.nextRecords));
FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3);
client.prepareResponse(resp3);
assertEquals(1, fetcher.sendFetches());
consumerClient.poll(time.timer(0));
fetchedRecords = fetchedRecords();
assertFalse(fetchedRecords.containsKey(tp1));
records = fetchedRecords.get(tp0);
assertEquals(2, records.size());
assertEquals(6L, subscriptions.position(tp0).offset);
assertEquals(1L, subscriptions.position(tp1).offset);
assertEquals(4, records.get(0).offset());
assertEquals(5, records.get(1).offset());
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetcherTest method testFetchForgetTopicIdWhenUnassigned.
@Test
public void testFetchForgetTopicIdWhenUnassigned() {
buildFetcher();
TopicIdPartition foo = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0));
TopicIdPartition bar = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0));
// Assign foo and bar.
subscriptions.assignFromUser(singleton(foo.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(foo), tp -> validLeaderEpoch));
subscriptions.seek(foo.topicPartition(), 0);
// Fetch should use latest version.
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(foo, new PartitionData(foo.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch))), emptyList()), fullFetchResponse(1, foo, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
// Assign bar and unassign foo.
subscriptions.assignFromUser(singleton(bar.topicPartition()));
client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, singleton(bar), tp -> validLeaderEpoch));
subscriptions.seek(bar.topicPartition(), 0);
// Fetch should use latest version.
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), singletonMap(bar, new PartitionData(bar.topicId(), 0, FetchRequest.INVALID_LOG_START_OFFSET, fetchSize, Optional.of(validLeaderEpoch))), singletonList(foo)), fullFetchResponse(1, bar, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
fetchedRecords();
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetcherTest method testFetchResponseMetricsWithOnePartitionAtTheWrongOffset.
@Test
public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() {
buildFetcher();
assignFromUser(mkSet(tp0, tp1));
subscriptions.seek(tp0, 0);
subscriptions.seek(tp1, 0);
Map<MetricName, KafkaMetric> allMetrics = metrics.metrics();
KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg));
KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg));
// send the fetch and then seek to a new offset
assertEquals(1, fetcher.sendFetches());
subscriptions.seek(tp1, 5);
MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes());
MemoryRecords records = builder.build();
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new HashMap<>();
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setHighWatermark(100).setLogStartOffset(0).setRecords(records));
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setLogStartOffset(0).setRecords(MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("val".getBytes()))));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
fetcher.collectFetch();
// we should have ignored the record at the wrong offset
int expectedBytes = 0;
for (Record record : records.records()) expectedBytes += record.sizeInBytes();
assertEquals(expectedBytes, (Double) fetchSizeAverage.metricValue(), EPSILON);
assertEquals(3, (Double) recordsCountAverage.metricValue(), EPSILON);
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetcherTest method testFetchWithTopicId.
@Test
public void testFetchWithTopicId() {
buildFetcher();
TopicIdPartition tp = new TopicIdPartition(topicId, new TopicPartition(topicName, 0));
assignFromUser(singleton(tp.topicPartition()));
subscriptions.seek(tp.topicPartition(), 0);
// Fetch should use latest version
assertEquals(1, fetcher.sendFetches());
assertFalse(fetcher.hasCompletedFetches());
client.prepareResponse(fetchRequestMatcher(ApiKeys.FETCH.latestVersion(), tp, 0, Optional.of(validLeaderEpoch)), fullFetchResponse(tp, this.records, Errors.NONE, 100L, 0));
consumerClient.poll(time.timer(0));
assertTrue(fetcher.hasCompletedFetches());
Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchedRecords();
assertTrue(partitionRecords.containsKey(tp.topicPartition()));
List<ConsumerRecord<byte[], byte[]>> records = partitionRecords.get(tp.topicPartition());
assertEquals(3, records.size());
// this is the next fetching position
assertEquals(4L, subscriptions.position(tp.topicPartition()).offset);
long offset = 1;
for (ConsumerRecord<byte[], byte[]> record : records) {
assertEquals(offset, record.offset());
offset += 1;
}
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetcherTest method testFetchPositionAfterException.
@Test
public void testFetchPositionAfterException() {
// verify the advancement in the next fetch offset equals to the number of fetched records when
// some fetched partitions cause Exception. This ensures that consumer won't lose record upon exception
buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED);
assignFromUser(mkSet(tp0, tp1));
subscriptions.seek(tp0, 1);
subscriptions.seek(tp1, 1);
assertEquals(1, fetcher.sendFetches());
Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>();
partitions.put(tidp1, new FetchResponseData.PartitionData().setPartitionIndex(tp1.partition()).setHighWatermark(100).setRecords(records));
partitions.put(tidp0, new FetchResponseData.PartitionData().setPartitionIndex(tp0.partition()).setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()).setHighWatermark(100));
client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)));
consumerClient.poll(time.timer(0));
List<ConsumerRecord<byte[], byte[]>> allFetchedRecords = new ArrayList<>();
fetchRecordsInto(allFetchedRecords);
assertEquals(1, subscriptions.position(tp0).offset);
assertEquals(4, subscriptions.position(tp1).offset);
assertEquals(3, allFetchedRecords.size());
OffsetOutOfRangeException e = assertThrows(OffsetOutOfRangeException.class, () -> fetchRecordsInto(allFetchedRecords));
assertEquals(singleton(tp0), e.offsetOutOfRangePartitions().keySet());
assertEquals(1L, e.offsetOutOfRangePartitions().get(tp0).longValue());
assertEquals(1, subscriptions.position(tp0).offset);
assertEquals(4, subscriptions.position(tp1).offset);
assertEquals(3, allFetchedRecords.size());
}
Aggregations