use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchSessionHandlerTest method testIdUsageWithAllForgottenPartitions.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testIdUsageWithAllForgottenPartitions(boolean useTopicIds) {
// We want to test when all topics are removed from the session
TopicPartition foo0 = new TopicPartition("foo", 0);
Uuid topicId = useTopicIds ? Uuid.randomUuid() : Uuid.ZERO_UUID;
short responseVersion = useTopicIds ? ApiKeys.FETCH.latestVersion() : 12;
FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
// Add topic foo to the session
FetchSessionHandler.Builder builder = handler.newBuilder();
builder.add(foo0, new FetchRequest.PartitionData(topicId, 0, 100, 200, Optional.empty()));
FetchSessionHandler.FetchRequestData data = builder.build();
assertMapsEqual(reqMap(new ReqEntry("foo", topicId, 0, 0, 100, 200)), data.toSend(), data.sessionPartitions());
assertTrue(data.metadata().isFull());
assertEquals(useTopicIds, data.canUseTopicIds());
FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicId, 10, 20)));
handler.handleResponse(resp, responseVersion);
// Remove the topic from the session
FetchSessionHandler.Builder builder2 = handler.newBuilder();
FetchSessionHandler.FetchRequestData data2 = builder2.build();
assertEquals(Collections.singletonList(new TopicIdPartition(topicId, foo0)), data2.toForget());
// Should have the same session ID, next epoch, and same ID usage.
assertEquals(123, data2.metadata().sessionId(), "Did not use same session when useTopicIds was " + useTopicIds);
assertEquals(1, data2.metadata().epoch(), "Did not have correct epoch when useTopicIds was " + useTopicIds);
assertEquals(useTopicIds, data2.canUseTopicIds());
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchSessionHandlerTest method testTopicIdUsageGrantedOnIdUpgrade.
@Test
public void testTopicIdUsageGrantedOnIdUpgrade() {
// We want to test adding a topic ID to an existing partition and a new partition in the incremental request.
// 0 is the existing partition and 1 is the new one.
List<Integer> partitions = Arrays.asList(0, 1);
partitions.forEach(partition -> {
String testType = partition == 0 ? "updating a partition" : "adding a new partition";
FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
FetchSessionHandler.Builder builder = handler.newBuilder();
builder.add(new TopicPartition("foo", 0), new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 100, 200, Optional.empty()));
FetchSessionHandler.FetchRequestData data = builder.build();
assertMapsEqual(reqMap(new ReqEntry("foo", Uuid.ZERO_UUID, 0, 0, 100, 200)), data.toSend(), data.sessionPartitions());
assertTrue(data.metadata().isFull());
assertFalse(data.canUseTopicIds());
FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, Uuid.ZERO_UUID, 10, 20)));
handler.handleResponse(resp, (short) 12);
// Try to add a topic ID to an already existing topic partition (0) or a new partition (1) in the session.
Uuid topicId = Uuid.randomUuid();
FetchSessionHandler.Builder builder2 = handler.newBuilder();
builder2.add(new TopicPartition("foo", partition), new FetchRequest.PartitionData(topicId, 10, 110, 210, Optional.empty()));
FetchSessionHandler.FetchRequestData data2 = builder2.build();
// Should have the same session ID, and next epoch and can only use topic IDs if the partition was updated.
boolean updated = partition == 0;
// The receiving broker will handle closing the session.
assertEquals(123, data2.metadata().sessionId(), "Did not use same session when " + testType);
assertEquals(1, data2.metadata().epoch(), "Did not have correct epoch when " + testType);
assertEquals(updated, data2.canUseTopicIds());
});
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchSessionHandlerTest method testIdUsageRevokedOnIdDowngrade.
@Test
public void testIdUsageRevokedOnIdDowngrade() {
// We want to test removing topic ID from an existing partition and adding a new partition without an ID in the incremental request.
// 0 is the existing partition and 1 is the new one.
List<Integer> partitions = Arrays.asList(0, 1);
partitions.forEach(partition -> {
String testType = partition == 0 ? "updating a partition" : "adding a new partition";
Uuid fooId = Uuid.randomUuid();
FetchSessionHandler handler = new FetchSessionHandler(LOG_CONTEXT, 1);
FetchSessionHandler.Builder builder = handler.newBuilder();
builder.add(new TopicPartition("foo", 0), new FetchRequest.PartitionData(fooId, 0, 100, 200, Optional.empty()));
FetchSessionHandler.FetchRequestData data = builder.build();
assertMapsEqual(reqMap(new ReqEntry("foo", fooId, 0, 0, 100, 200)), data.toSend(), data.sessionPartitions());
assertTrue(data.metadata().isFull());
assertTrue(data.canUseTopicIds());
FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20)));
handler.handleResponse(resp, ApiKeys.FETCH.latestVersion());
// Try to remove a topic ID from an existing topic partition (0) or add a new topic partition (1) without an ID.
FetchSessionHandler.Builder builder2 = handler.newBuilder();
builder2.add(new TopicPartition("foo", partition), new FetchRequest.PartitionData(Uuid.ZERO_UUID, 10, 110, 210, Optional.empty()));
FetchSessionHandler.FetchRequestData data2 = builder2.build();
// Should have the same session ID, and next epoch and can no longer use topic IDs.
// The receiving broker will handle closing the session.
assertEquals(123, data2.metadata().sessionId(), "Did not use same session when " + testType);
assertEquals(1, data2.metadata().epoch(), "Did not have correct epoch when " + testType);
assertFalse(data2.canUseTopicIds());
});
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchRequestTest method testForgottenTopics.
@ParameterizedTest
@MethodSource("fetchVersions")
public void testForgottenTopics(short version) {
// Forgotten topics are not allowed prior to version 7
if (version >= 7) {
TopicPartition topicPartition0 = new TopicPartition("topic", 0);
TopicPartition topicPartition1 = new TopicPartition("unknownIdTopic", 0);
Uuid topicId0 = Uuid.randomUuid();
Uuid topicId1 = Uuid.randomUuid();
// Only include topic IDs for the first topic partition.
Map<Uuid, String> topicNames = Collections.singletonMap(topicId0, topicPartition0.topic());
// Include one topic with topic IDs in the topic names map and one without.
List<TopicIdPartition> toForgetTopics = new LinkedList<>();
toForgetTopics.add(new TopicIdPartition(topicId0, topicPartition0));
toForgetTopics.add(new TopicIdPartition(topicId1, topicPartition1));
boolean fetchRequestUsesTopicIds = version >= 13;
FetchRequest fetchRequest = FetchRequest.parse(FetchRequest.Builder.forReplica(version, 0, 1, 1, Collections.emptyMap()).removed(toForgetTopics).replaced(Collections.emptyList()).metadata(FetchMetadata.newIncremental(123)).build(version).serialize(), version);
// For versions < 13, we will be provided a topic name and a zero Uuid in FetchRequestData.
// Versions 13+ will contain a valid topic ID but an empty topic name.
List<TopicIdPartition> expectedForgottenTopicData = new LinkedList<>();
toForgetTopics.forEach(tidp -> {
String expectedName = fetchRequestUsesTopicIds ? "" : tidp.topic();
Uuid expectedTopicId = fetchRequestUsesTopicIds ? tidp.topicId() : Uuid.ZERO_UUID;
expectedForgottenTopicData.add(new TopicIdPartition(expectedTopicId, tidp.partition(), expectedName));
});
// Build the list of TopicIdPartitions based on the FetchRequestData that was serialized and parsed.
List<TopicIdPartition> convertedForgottenTopicData = new LinkedList<>();
fetchRequest.data().forgottenTopicsData().forEach(forgottenTopic -> forgottenTopic.partitions().forEach(partition -> convertedForgottenTopicData.add(new TopicIdPartition(forgottenTopic.topicId(), partition, forgottenTopic.topic()))));
// The TopicIdPartitions built from the request data should match what we expect.
assertEquals(expectedForgottenTopicData, convertedForgottenTopicData);
// Get the forgottenTopics from the request data.
List<TopicIdPartition> forgottenTopics = fetchRequest.forgottenTopics(topicNames);
// For fetch request version 13+ we expect topic names to be filled in for all topics in the topicNames map.
// Otherwise, the topic name should be null.
// For earlier request versions, we expect topic names and zero Uuids.
// Build the list of expected TopicIdPartitions. These are different from the earlier expected topicIdPartitions
// as empty strings are converted to nulls.
assertEquals(expectedForgottenTopicData.size(), forgottenTopics.size());
List<TopicIdPartition> expectedForgottenTopics = new LinkedList<>();
expectedForgottenTopicData.forEach(tidp -> {
String expectedName = fetchRequestUsesTopicIds ? topicNames.get(tidp.topicId()) : tidp.topic();
expectedForgottenTopics.add(new TopicIdPartition(tidp.topicId(), new TopicPartition(expectedName, tidp.partition())));
});
assertEquals(expectedForgottenTopics, forgottenTopics);
}
}
use of org.apache.kafka.common.Uuid in project kafka by apache.
the class FetchRequestTest method testFetchData.
@ParameterizedTest
@MethodSource("fetchVersions")
public void testFetchData(short version) {
TopicPartition topicPartition0 = new TopicPartition("topic", 0);
TopicPartition topicPartition1 = new TopicPartition("unknownIdTopic", 0);
Uuid topicId0 = Uuid.randomUuid();
Uuid topicId1 = Uuid.randomUuid();
// Only include topic IDs for the first topic partition.
Map<Uuid, String> topicNames = Collections.singletonMap(topicId0, topicPartition0.topic());
List<TopicIdPartition> topicIdPartitions = new LinkedList<>();
topicIdPartitions.add(new TopicIdPartition(topicId0, topicPartition0));
topicIdPartitions.add(new TopicIdPartition(topicId1, topicPartition1));
// Include one topic with topic IDs in the topic names map and one without.
Map<TopicPartition, FetchRequest.PartitionData> partitionData = new LinkedHashMap<>();
partitionData.put(topicPartition0, new FetchRequest.PartitionData(topicId0, 0, 0, 0, Optional.empty()));
partitionData.put(topicPartition1, new FetchRequest.PartitionData(topicId1, 0, 0, 0, Optional.empty()));
boolean fetchRequestUsesTopicIds = version >= 13;
FetchRequest fetchRequest = FetchRequest.parse(FetchRequest.Builder.forReplica(version, 0, 1, 1, partitionData).removed(Collections.emptyList()).replaced(Collections.emptyList()).metadata(FetchMetadata.newIncremental(123)).build(version).serialize(), version);
// For versions < 13, we will be provided a topic name and a zero UUID in FetchRequestData.
// Versions 13+ will contain a valid topic ID but an empty topic name.
List<TopicIdPartition> expectedData = new LinkedList<>();
topicIdPartitions.forEach(tidp -> {
String expectedName = fetchRequestUsesTopicIds ? "" : tidp.topic();
Uuid expectedTopicId = fetchRequestUsesTopicIds ? tidp.topicId() : Uuid.ZERO_UUID;
expectedData.add(new TopicIdPartition(expectedTopicId, tidp.partition(), expectedName));
});
// Build the list of TopicIdPartitions based on the FetchRequestData that was serialized and parsed.
List<TopicIdPartition> convertedFetchData = new LinkedList<>();
fetchRequest.data().topics().forEach(topic -> topic.partitions().forEach(partition -> convertedFetchData.add(new TopicIdPartition(topic.topicId(), partition.partition(), topic.topic()))));
// The TopicIdPartitions built from the request data should match what we expect.
assertEquals(expectedData, convertedFetchData);
// For fetch request version 13+ we expect topic names to be filled in for all topics in the topicNames map.
// Otherwise, the topic name should be null.
// For earlier request versions, we expect topic names and zero Uuids.
Map<TopicIdPartition, FetchRequest.PartitionData> expectedFetchData = new LinkedHashMap<>();
// Build the expected map based on fetchRequestUsesTopicIds.
expectedData.forEach(tidp -> {
String expectedName = fetchRequestUsesTopicIds ? topicNames.get(tidp.topicId()) : tidp.topic();
TopicIdPartition tpKey = new TopicIdPartition(tidp.topicId(), new TopicPartition(expectedName, tidp.partition()));
// logStartOffset was not a valid field in versions 4 and earlier.
int logStartOffset = version > 4 ? 0 : -1;
expectedFetchData.put(tpKey, new FetchRequest.PartitionData(tidp.topicId(), 0, logStartOffset, 0, Optional.empty()));
});
assertEquals(expectedFetchData, fetchRequest.fetchData(topicNames));
}
Aggregations