use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class MetadataTest method testRequestUpdate.
@Test
public void testRequestUpdate() {
assertFalse(metadata.updateRequested());
int[] epochs = { 42, 42, 41, 41, 42, 43, 43, 42, 41, 44 };
boolean[] updateResult = { true, false, false, false, false, true, false, false, false, true };
TopicPartition tp = new TopicPartition("topic", 0);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic", 1), _tp -> 0);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L);
for (int i = 0; i < epochs.length; i++) {
metadata.updateLastSeenEpochIfNewer(tp, epochs[i]);
if (updateResult[i]) {
assertTrue(metadata.updateRequested(), "Expected metadata update to be requested [" + i + "]");
} else {
assertFalse(metadata.updateRequested(), "Did not expect metadata update to be requested [" + i + "]");
}
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, 0L);
assertFalse(metadata.updateRequested());
}
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class MetadataTest method testMetadataMerge.
@Test
public void testMetadataMerge() {
Time time = new MockTime();
Map<String, Uuid> topicIds = new HashMap<>();
final AtomicReference<Set<String>> retainTopics = new AtomicReference<>(new HashSet<>());
metadata = new Metadata(refreshBackoffMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()) {
@Override
protected boolean retainTopic(String topic, boolean isInternal, long nowMs) {
return retainTopics.get().contains(topic);
}
};
// Initialize a metadata instance with two topic variants "old" and "keep". Both will be retained.
String oldClusterId = "oldClusterId";
int oldNodes = 2;
Map<String, Errors> oldTopicErrors = new HashMap<>();
oldTopicErrors.put("oldInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
oldTopicErrors.put("keepInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
oldTopicErrors.put("oldUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
oldTopicErrors.put("keepUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
Map<String, Integer> oldTopicPartitionCounts = new HashMap<>();
oldTopicPartitionCounts.put("oldValidTopic", 2);
oldTopicPartitionCounts.put("keepValidTopic", 3);
retainTopics.set(Utils.mkSet("oldInvalidTopic", "keepInvalidTopic", "oldUnauthorizedTopic", "keepUnauthorizedTopic", "oldValidTopic", "keepValidTopic"));
topicIds.put("oldValidTopic", Uuid.randomUuid());
topicIds.put("keepValidTopic", Uuid.randomUuid());
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds(oldClusterId, oldNodes, oldTopicErrors, oldTopicPartitionCounts, _tp -> 100, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds1 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds1.get(topic), topicIds.get(topic)));
// Update the metadata to add a new topic variant, "new", which will be retained with "keep". Note this
// means that all of the "old" topics should be dropped.
Cluster cluster = metadata.fetch();
assertEquals(cluster.clusterResource().clusterId(), oldClusterId);
assertEquals(cluster.nodes().size(), oldNodes);
assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("oldInvalidTopic", "keepInvalidTopic")));
assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("oldUnauthorizedTopic", "keepUnauthorizedTopic")));
assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("oldValidTopic", "keepValidTopic")));
assertEquals(cluster.partitionsForTopic("oldValidTopic").size(), 2);
assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 3);
assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values()));
String newClusterId = "newClusterId";
int newNodes = oldNodes + 1;
Map<String, Errors> newTopicErrors = new HashMap<>();
newTopicErrors.put("newInvalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
newTopicErrors.put("newUnauthorizedTopic", Errors.TOPIC_AUTHORIZATION_FAILED);
Map<String, Integer> newTopicPartitionCounts = new HashMap<>();
newTopicPartitionCounts.put("keepValidTopic", 2);
newTopicPartitionCounts.put("newValidTopic", 4);
retainTopics.set(Utils.mkSet("keepInvalidTopic", "newInvalidTopic", "keepUnauthorizedTopic", "newUnauthorizedTopic", "keepValidTopic", "newValidTopic"));
topicIds.put("newValidTopic", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds(newClusterId, newNodes, newTopicErrors, newTopicPartitionCounts, _tp -> 200, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
topicIds.remove("oldValidTopic");
Map<String, Uuid> metadataTopicIds2 = metadata.topicIds();
retainTopics.get().forEach(topic -> assertEquals(metadataTopicIds2.get(topic), topicIds.get(topic)));
assertNull(metadataTopicIds2.get("oldValidTopic"));
cluster = metadata.fetch();
assertEquals(cluster.clusterResource().clusterId(), newClusterId);
assertEquals(cluster.nodes().size(), newNodes);
assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("keepInvalidTopic", "newInvalidTopic")));
assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("keepUnauthorizedTopic", "newUnauthorizedTopic")));
assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("keepValidTopic", "newValidTopic")));
assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 2);
assertEquals(cluster.partitionsForTopic("newValidTopic").size(), 4);
assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values()));
// Perform another metadata update, but this time all topic metadata should be cleared.
retainTopics.set(Collections.emptySet());
metadataResponse = RequestTestUtils.metadataUpdateWithIds(newClusterId, newNodes, newTopicErrors, newTopicPartitionCounts, _tp -> 300, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, true, time.milliseconds());
Map<String, Uuid> metadataTopicIds3 = metadata.topicIds();
topicIds.forEach((topicName, topicId) -> assertNull(metadataTopicIds3.get(topicName)));
cluster = metadata.fetch();
assertEquals(cluster.clusterResource().clusterId(), newClusterId);
assertEquals(cluster.nodes().size(), newNodes);
assertEquals(cluster.invalidTopics(), Collections.emptySet());
assertEquals(cluster.unauthorizedTopics(), Collections.emptySet());
assertEquals(cluster.topics(), Collections.emptySet());
assertTrue(cluster.topicIds().isEmpty());
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class MetadataTest method testClusterCopy.
@Test
public void testClusterCopy() {
Map<String, Integer> counts = new HashMap<>();
Map<String, Errors> errors = new HashMap<>();
counts.put("topic1", 2);
counts.put("topic2", 3);
counts.put(Topic.GROUP_METADATA_TOPIC_NAME, 3);
errors.put("topic3", Errors.INVALID_TOPIC_EXCEPTION);
errors.put("topic4", Errors.TOPIC_AUTHORIZATION_FAILED);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 4, errors, counts);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
Cluster cluster = metadata.fetch();
assertEquals(cluster.clusterResource().clusterId(), "dummy");
assertEquals(cluster.nodes().size(), 4);
// topic counts
assertEquals(cluster.invalidTopics(), Collections.singleton("topic3"));
assertEquals(cluster.unauthorizedTopics(), Collections.singleton("topic4"));
assertEquals(cluster.topics().size(), 3);
assertEquals(cluster.internalTopics(), Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME));
// partition counts
assertEquals(cluster.partitionsForTopic("topic1").size(), 2);
assertEquals(cluster.partitionsForTopic("topic2").size(), 3);
// Sentinel instances
InetSocketAddress address = InetSocketAddress.createUnresolved("localhost", 0);
Cluster fromMetadata = MetadataCache.bootstrap(Collections.singletonList(address)).cluster();
Cluster fromCluster = Cluster.bootstrap(Collections.singletonList(address));
assertEquals(fromMetadata, fromCluster);
Cluster fromMetadataEmpty = MetadataCache.empty().cluster();
Cluster fromClusterEmpty = Cluster.empty();
assertEquals(fromMetadataEmpty, fromClusterEmpty);
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class MetadataTest method testEpochUpdateOnChangedTopicIds.
@Test
public void testEpochUpdateOnChangedTopicIds() {
TopicPartition tp = new TopicPartition("topic-1", 0);
Map<String, Uuid> topicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
MetadataResponse metadataResponse = emptyMetadataResponse();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
// Start with a topic with no topic ID
metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 100);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertEquals(Optional.of(100), metadata.lastSeenLeaderEpoch(tp));
// If the older topic ID is null, we should go with the new topic ID as the leader epoch
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 2L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Don't cause update if it's the same one
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 3L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Update if we see newer epoch
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 12, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 4L);
assertEquals(Optional.of(12), metadata.lastSeenLeaderEpoch(tp));
// We should also update if we see a new topicId even if the epoch is lower
Map<String, Uuid> newTopicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 3, newTopicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 5L);
assertEquals(Optional.of(3), metadata.lastSeenLeaderEpoch(tp));
// Finally, update when the topic ID is new and the epoch is higher
Map<String, Uuid> newTopicIds2 = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 20, newTopicIds2);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 6L);
assertEquals(Optional.of(20), metadata.lastSeenLeaderEpoch(tp));
}
use of org.apache.kafka.common.requests.MetadataResponse in project kafka by apache.
the class MetadataTest method testInvalidTopicError.
@Test
public void testInvalidTopicError() {
Time time = new MockTime();
String invalidTopic = "topic dfsa";
MetadataResponse invalidTopicResponse = RequestTestUtils.metadataUpdateWith("clusterId", 1, Collections.singletonMap(invalidTopic, Errors.INVALID_TOPIC_EXCEPTION), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(invalidTopicResponse, false, time.milliseconds());
InvalidTopicException e = assertThrows(InvalidTopicException.class, () -> metadata.maybeThrowAnyException());
assertEquals(Collections.singleton(invalidTopic), e.invalidTopics());
// We clear the exception once it has been raised to the user
metadata.maybeThrowAnyException();
// Reset the invalid topic error
metadata.updateWithCurrentRequestVersion(invalidTopicResponse, false, time.milliseconds());
// If we get a good update, the error should clear even if we haven't had a chance to raise it to the user
metadata.updateWithCurrentRequestVersion(emptyMetadataResponse(), false, time.milliseconds());
metadata.maybeThrowAnyException();
}
Aggregations