use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class MetadataTest method testEpochUpdateAfterTopicDeletion.
@Test
public void testEpochUpdateAfterTopicDeletion() {
TopicPartition tp = new TopicPartition("topic-1", 0);
MetadataResponse metadataResponse = emptyMetadataResponse();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
// Start with a Topic topic-1 with a random topic ID
Map<String, Uuid> topicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Topic topic-1 is now deleted so Response contains an Error. LeaderEpoch should still maintain Old value
metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.singletonMap("topic-1", Errors.UNKNOWN_TOPIC_OR_PARTITION), Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
// Create topic-1 again but this time with a different topic ID. LeaderEpoch should be updated to new even if lower.
Map<String, Uuid> newTopicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 5, newTopicIds);
metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
assertEquals(Optional.of(5), metadata.lastSeenLeaderEpoch(tp));
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class MetadataTest method testRequestVersion.
@Test
public void testRequestVersion() {
Time time = new MockTime();
metadata.requestUpdate();
Metadata.MetadataRequestAndVersion versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), false, time.milliseconds());
assertFalse(metadata.updateRequested());
// bump the request version for new topics added to the metadata
metadata.requestUpdateForNewTopics();
// simulating a bump while a metadata request is in flight
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
metadata.requestUpdateForNewTopics();
metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, time.milliseconds());
// metadata update is still needed
assertTrue(metadata.updateRequested());
// the next update will resolve it
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, time.milliseconds());
assertFalse(metadata.updateRequested());
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class MetadataTest method testPartialMetadataUpdate.
@Test
public void testPartialMetadataUpdate() {
Time time = new MockTime();
metadata = new Metadata(refreshBackoffMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()) {
@Override
protected MetadataRequest.Builder newMetadataRequestBuilderForNewTopics() {
return newMetadataRequestBuilder();
}
};
assertFalse(metadata.updateRequested());
// Request a metadata update. This must force a full metadata update request.
metadata.requestUpdate();
Metadata.MetadataRequestAndVersion versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertFalse(versionAndBuilder.isPartialUpdate);
metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), false, time.milliseconds());
assertFalse(metadata.updateRequested());
// Request a metadata update for a new topic. This should perform a partial metadata update.
metadata.requestUpdateForNewTopics();
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertTrue(versionAndBuilder.isPartialUpdate);
metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, time.milliseconds());
assertFalse(metadata.updateRequested());
// Request both types of metadata updates. This should always perform a full update.
metadata.requestUpdate();
metadata.requestUpdateForNewTopics();
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertFalse(versionAndBuilder.isPartialUpdate);
metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), false, time.milliseconds());
assertFalse(metadata.updateRequested());
// Request only a partial metadata update, but elapse enough time such that a full refresh is needed.
metadata.requestUpdateForNewTopics();
final long refreshTimeMs = time.milliseconds() + metadata.metadataExpireMs();
versionAndBuilder = metadata.newMetadataRequestAndVersion(refreshTimeMs);
assertFalse(versionAndBuilder.isPartialUpdate);
metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, refreshTimeMs);
assertFalse(metadata.updateRequested());
// Request two partial metadata updates that are overlapping.
metadata.requestUpdateForNewTopics();
versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertTrue(versionAndBuilder.isPartialUpdate);
metadata.requestUpdateForNewTopics();
Metadata.MetadataRequestAndVersion overlappingVersionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
assertTrue(overlappingVersionAndBuilder.isPartialUpdate);
assertTrue(metadata.updateRequested());
metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic-1", 1)), true, time.milliseconds());
assertTrue(metadata.updateRequested());
metadata.update(overlappingVersionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic-2", 1)), true, time.milliseconds());
assertFalse(metadata.updateRequested());
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class MetadataTest method testMetadataTopicErrors.
@Test
public void testMetadataTopicErrors() {
Time time = new MockTime();
Map<String, Errors> topicErrors = new HashMap<>(3);
topicErrors.put("invalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
topicErrors.put("sensitiveTopic1", Errors.TOPIC_AUTHORIZATION_FAILED);
topicErrors.put("sensitiveTopic2", Errors.TOPIC_AUTHORIZATION_FAILED);
MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("clusterId", 1, topicErrors, Collections.emptyMap());
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
TopicAuthorizationException e1 = assertThrows(TopicAuthorizationException.class, () -> metadata.maybeThrowExceptionForTopic("sensitiveTopic1"));
assertEquals(Collections.singleton("sensitiveTopic1"), e1.unauthorizedTopics());
// We clear the exception once it has been raised to the user
metadata.maybeThrowAnyException();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
TopicAuthorizationException e2 = assertThrows(TopicAuthorizationException.class, () -> metadata.maybeThrowExceptionForTopic("sensitiveTopic2"));
assertEquals(Collections.singleton("sensitiveTopic2"), e2.unauthorizedTopics());
metadata.maybeThrowAnyException();
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
InvalidTopicException e3 = assertThrows(InvalidTopicException.class, () -> metadata.maybeThrowExceptionForTopic("invalidTopic"));
assertEquals(Collections.singleton("invalidTopic"), e3.invalidTopics());
metadata.maybeThrowAnyException();
// Other topics should not throw exception, but they should clear existing exception
metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
metadata.maybeThrowExceptionForTopic("anotherTopic");
metadata.maybeThrowAnyException();
}
use of org.apache.kafka.common.utils.Time in project kafka by apache.
the class SslSelectorTest method testConnectionWithCustomKeyManager.
@Test
public void testConnectionWithCustomKeyManager() throws Exception {
TestProviderCreator testProviderCreator = new TestProviderCreator();
int requestSize = 100 * 1024;
final String node = "0";
String request = TestUtils.randomString(requestSize);
Map<String, Object> sslServerConfigs = TestSslUtils.createSslConfig(TestKeyManagerFactory.ALGORITHM, TestTrustManagerFactory.ALGORITHM, TestSslUtils.DEFAULT_TLS_PROTOCOL_FOR_TESTS);
sslServerConfigs.put(SecurityConfig.SECURITY_PROVIDERS_CONFIG, testProviderCreator.getClass().getName());
EchoServer server = new EchoServer(SecurityProtocol.SSL, sslServerConfigs);
server.start();
Time time = new MockTime();
File trustStoreFile = new File(TestKeyManagerFactory.TestKeyManager.mockTrustStoreFile);
Map<String, Object> sslClientConfigs = TestSslUtils.createSslConfig(true, true, Mode.CLIENT, trustStoreFile, "client");
ChannelBuilder channelBuilder = new TestSslChannelBuilder(Mode.CLIENT);
channelBuilder.configure(sslClientConfigs);
Metrics metrics = new Metrics();
Selector selector = new Selector(5000, metrics, time, "MetricGroup", channelBuilder, new LogContext());
selector.connect(node, new InetSocketAddress("localhost", server.port), BUFFER_SIZE, BUFFER_SIZE);
while (!selector.connected().contains(node)) selector.poll(10000L);
while (!selector.isChannelReady(node)) selector.poll(10000L);
selector.send(createSend(node, request));
waitForBytesBuffered(selector, node);
TestUtils.waitForCondition(() -> cipherMetrics(metrics).size() == 1, "Waiting for cipher metrics to be created.");
assertEquals(Integer.valueOf(1), cipherMetrics(metrics).get(0).metricValue());
assertNotNull(selector.channel(node).channelMetadataRegistry().cipherInformation());
selector.close(node);
super.verifySelectorEmpty(selector);
assertEquals(1, cipherMetrics(metrics).size());
assertEquals(Integer.valueOf(0), cipherMetrics(metrics).get(0).metricValue());
Security.removeProvider(testProviderCreator.getProvider().getName());
selector.close();
server.close();
metrics.close();
}
Aggregations