use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class FetchRequestTest method testFetchData.
@ParameterizedTest
@MethodSource("fetchVersions")
public void testFetchData(short version) {
TopicPartition topicPartition0 = new TopicPartition("topic", 0);
TopicPartition topicPartition1 = new TopicPartition("unknownIdTopic", 0);
Uuid topicId0 = Uuid.randomUuid();
Uuid topicId1 = Uuid.randomUuid();
// Only include topic IDs for the first topic partition.
Map<Uuid, String> topicNames = Collections.singletonMap(topicId0, topicPartition0.topic());
List<TopicIdPartition> topicIdPartitions = new LinkedList<>();
topicIdPartitions.add(new TopicIdPartition(topicId0, topicPartition0));
topicIdPartitions.add(new TopicIdPartition(topicId1, topicPartition1));
// Include one topic with topic IDs in the topic names map and one without.
Map<TopicPartition, FetchRequest.PartitionData> partitionData = new LinkedHashMap<>();
partitionData.put(topicPartition0, new FetchRequest.PartitionData(topicId0, 0, 0, 0, Optional.empty()));
partitionData.put(topicPartition1, new FetchRequest.PartitionData(topicId1, 0, 0, 0, Optional.empty()));
boolean fetchRequestUsesTopicIds = version >= 13;
FetchRequest fetchRequest = FetchRequest.parse(FetchRequest.Builder.forReplica(version, 0, 1, 1, partitionData).removed(Collections.emptyList()).replaced(Collections.emptyList()).metadata(FetchMetadata.newIncremental(123)).build(version).serialize(), version);
// For versions < 13, we will be provided a topic name and a zero UUID in FetchRequestData.
// Versions 13+ will contain a valid topic ID but an empty topic name.
List<TopicIdPartition> expectedData = new LinkedList<>();
topicIdPartitions.forEach(tidp -> {
String expectedName = fetchRequestUsesTopicIds ? "" : tidp.topic();
Uuid expectedTopicId = fetchRequestUsesTopicIds ? tidp.topicId() : Uuid.ZERO_UUID;
expectedData.add(new TopicIdPartition(expectedTopicId, tidp.partition(), expectedName));
});
// Build the list of TopicIdPartitions based on the FetchRequestData that was serialized and parsed.
List<TopicIdPartition> convertedFetchData = new LinkedList<>();
fetchRequest.data().topics().forEach(topic -> topic.partitions().forEach(partition -> convertedFetchData.add(new TopicIdPartition(topic.topicId(), partition.partition(), topic.topic()))));
// The TopicIdPartitions built from the request data should match what we expect.
assertEquals(expectedData, convertedFetchData);
// For fetch request version 13+ we expect topic names to be filled in for all topics in the topicNames map.
// Otherwise, the topic name should be null.
// For earlier request versions, we expect topic names and zero Uuids.
Map<TopicIdPartition, FetchRequest.PartitionData> expectedFetchData = new LinkedHashMap<>();
// Build the expected map based on fetchRequestUsesTopicIds.
expectedData.forEach(tidp -> {
String expectedName = fetchRequestUsesTopicIds ? topicNames.get(tidp.topicId()) : tidp.topic();
TopicIdPartition tpKey = new TopicIdPartition(tidp.topicId(), new TopicPartition(expectedName, tidp.partition()));
// logStartOffset was not a valid field in versions 4 and earlier.
int logStartOffset = version > 4 ? 0 : -1;
expectedFetchData.put(tpKey, new FetchRequest.PartitionData(tidp.topicId(), 0, logStartOffset, 0, Optional.empty()));
});
assertEquals(expectedFetchData, fetchRequest.fetchData(topicNames));
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class RequestResponseTest method createFetchResponse.
private FetchResponse createFetchResponse(boolean includeAborted) {
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
Uuid topicId = Uuid.randomUuid();
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
responseData.put(new TopicIdPartition(topicId, new TopicPartition("test", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(0).setRecords(records));
List<FetchResponseData.AbortedTransaction> abortedTransactions = emptyList();
if (includeAborted) {
abortedTransactions = singletonList(new FetchResponseData.AbortedTransaction().setProducerId(234L).setFirstOffset(999L));
}
responseData.put(new TopicIdPartition(topicId, new TopicPartition("test", 1)), new FetchResponseData.PartitionData().setPartitionIndex(1).setHighWatermark(1000000).setLogStartOffset(0).setAbortedTransactions(abortedTransactions));
return FetchResponse.parse(FetchResponse.of(Errors.NONE, 25, INVALID_SESSION_ID, responseData).serialize(FETCH.latestVersion()), FETCH.latestVersion());
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class RequestResponseTest method createFetchResponse.
private FetchResponse createFetchResponse(int sessionId) {
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
Map<String, Uuid> topicIds = new HashMap<>();
topicIds.put("test", Uuid.randomUuid());
MemoryRecords records = MemoryRecords.withRecords(CompressionType.NONE, new SimpleRecord("blah".getBytes()));
responseData.put(new TopicIdPartition(topicIds.get("test"), new TopicPartition("test", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setLogStartOffset(0).setRecords(records));
List<FetchResponseData.AbortedTransaction> abortedTransactions = singletonList(new FetchResponseData.AbortedTransaction().setProducerId(234L).setFirstOffset(999L));
responseData.put(new TopicIdPartition(topicIds.get("test"), new TopicPartition("test", 1)), new FetchResponseData.PartitionData().setPartitionIndex(1).setHighWatermark(1000000).setLogStartOffset(0).setAbortedTransactions(abortedTransactions));
return FetchResponse.parse(FetchResponse.of(Errors.NONE, 25, sessionId, responseData).serialize(FETCH.latestVersion()), FETCH.latestVersion());
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class RequestResponseTest method testFetchResponseV4.
@Test
public void testFetchResponseV4() {
LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> responseData = new LinkedHashMap<>();
Map<Uuid, String> topicNames = new HashMap<>();
topicNames.put(Uuid.randomUuid(), "bar");
topicNames.put(Uuid.randomUuid(), "foo");
MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10));
List<FetchResponseData.AbortedTransaction> abortedTransactions = asList(new FetchResponseData.AbortedTransaction().setProducerId(10).setFirstOffset(100), new FetchResponseData.AbortedTransaction().setProducerId(15).setFirstOffset(50));
// Use zero UUID since this is an old request version.
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("bar", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(1000000).setAbortedTransactions(abortedTransactions).setRecords(records));
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("bar", 1)), new FetchResponseData.PartitionData().setPartitionIndex(1).setHighWatermark(900000).setLastStableOffset(5).setRecords(records));
responseData.put(new TopicIdPartition(Uuid.ZERO_UUID, new TopicPartition("foo", 0)), new FetchResponseData.PartitionData().setPartitionIndex(0).setHighWatermark(70000).setLastStableOffset(6).setRecords(records));
FetchResponse response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData);
FetchResponse deserialized = FetchResponse.parse(response.serialize((short) 4), (short) 4);
assertEquals(responseData.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().topicPartition(), Map.Entry::getValue)), deserialized.responseData(topicNames, (short) 4));
}
use of org.apache.kafka.common.TopicIdPartition in project kafka by apache.
the class KafkaConsumerTest method testShouldAttemptToRejoinGroupAfterSyncGroupFailed.
@Test
public void testShouldAttemptToRejoinGroupAfterSyncGroupFailed() throws Exception {
ConsumerMetadata metadata = createMetadata(subscription);
MockClient client = new MockClient(time, metadata);
initMetadata(client, Collections.singletonMap(topic, 1));
Node node = metadata.fetch().nodes().get(0);
KafkaConsumer<String, String> consumer = newConsumer(time, client, subscription, metadata, assignor, false, groupInstanceId);
consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer));
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port());
client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator);
client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator);
client.prepareResponseFrom(fetchResponse(tp0, 0, 1), node);
client.prepareResponseFrom(fetchResponse(tp0, 1, 0), node);
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
consumer.poll(Duration.ZERO);
// heartbeat fails due to rebalance in progress
client.prepareResponseFrom(body -> true, new HeartbeatResponse(new HeartbeatResponseData().setErrorCode(Errors.REBALANCE_IN_PROGRESS.code())), coordinator);
// join group
final ByteBuffer byteBuffer = ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(singletonList(topic)));
// This member becomes the leader
final JoinGroupResponse leaderResponse = new JoinGroupResponse(new JoinGroupResponseData().setErrorCode(Errors.NONE.code()).setGenerationId(1).setProtocolName(assignor.name()).setLeader(memberId).setMemberId(memberId).setMembers(Collections.singletonList(new JoinGroupResponseData.JoinGroupResponseMember().setMemberId(memberId).setMetadata(byteBuffer.array()))));
client.prepareResponseFrom(leaderResponse, coordinator);
// sync group fails due to disconnect
client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator, true);
// should try and find the new coordinator
client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node);
// rejoin group
client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator);
client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator);
client.prepareResponseFrom(body -> body instanceof FetchRequest && ((FetchRequest) body).fetchData(topicNames).containsKey(new TopicIdPartition(topicId, tp0)), fetchResponse(tp0, 1, 1), node);
time.sleep(heartbeatIntervalMs);
Thread.sleep(heartbeatIntervalMs);
consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE));
final ConsumerRecords<String, String> records = consumer.poll(Duration.ZERO);
assertFalse(records.isEmpty());
consumer.close(Duration.ofMillis(0));
}
Aggregations