Search in sources :

Example 21 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class MetadataTest method testEpochUpdateAfterTopicDeletion.

@Test
public void testEpochUpdateAfterTopicDeletion() {
    TopicPartition tp = new TopicPartition("topic-1", 0);
    MetadataResponse metadataResponse = emptyMetadataResponse();
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L);
    // Start with a Topic topic-1 with a random topic ID
    Map<String, Uuid> topicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
    metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10, topicIds);
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
    assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
    // Topic topic-1 is now deleted so Response contains an Error. LeaderEpoch should still maintain Old value
    metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.singletonMap("topic-1", Errors.UNKNOWN_TOPIC_OR_PARTITION), Collections.emptyMap());
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
    assertEquals(Optional.of(10), metadata.lastSeenLeaderEpoch(tp));
    // Create topic-1 again but this time with a different topic ID. LeaderEpoch should be updated to new even if lower.
    Map<String, Uuid> newTopicIds = Collections.singletonMap("topic-1", Uuid.randomUuid());
    metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 5, newTopicIds);
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L);
    assertEquals(Optional.of(5), metadata.lastSeenLeaderEpoch(tp));
}
Also used : Uuid(org.apache.kafka.common.Uuid) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) MessageUtil(org.apache.kafka.common.protocol.MessageUtil) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) MockClusterResourceListener(org.apache.kafka.test.MockClusterResourceListener) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) HashMap(java.util.HashMap) AtomicReference(java.util.concurrent.atomic.AtomicReference) ByteBuffer(java.nio.ByteBuffer) HashSet(java.util.HashSet) Cluster(org.apache.kafka.common.Cluster) MetadataResponseBrokerCollection(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseBrokerCollection) MetadataResponseTopic(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) LogContext(org.apache.kafka.common.utils.LogContext) Map(java.util.Map) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) Topic(org.apache.kafka.common.internals.Topic) Utils(org.apache.kafka.common.utils.Utils) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponseTopicCollection(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopicCollection) Time(org.apache.kafka.common.utils.Time) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) InetSocketAddress(java.net.InetSocketAddress) Test(org.junit.jupiter.api.Test) Objects(java.util.Objects) List(java.util.List) MetadataResponsePartition(org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Errors(org.apache.kafka.common.protocol.Errors) Optional(java.util.Optional) Node(org.apache.kafka.common.Node) MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Collections(java.util.Collections) Uuid(org.apache.kafka.common.Uuid) TopicPartition(org.apache.kafka.common.TopicPartition) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) Test(org.junit.jupiter.api.Test)

Example 22 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class MetadataTest method testRequestVersion.

@Test
public void testRequestVersion() {
    Time time = new MockTime();
    metadata.requestUpdate();
    Metadata.MetadataRequestAndVersion versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
    metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), false, time.milliseconds());
    assertFalse(metadata.updateRequested());
    // bump the request version for new topics added to the metadata
    metadata.requestUpdateForNewTopics();
    // simulating a bump while a metadata request is in flight
    versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
    metadata.requestUpdateForNewTopics();
    metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, time.milliseconds());
    // metadata update is still needed
    assertTrue(metadata.updateRequested());
    // the next update will resolve it
    versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
    metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, time.milliseconds());
    assertFalse(metadata.updateRequested());
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.jupiter.api.Test)

Example 23 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class MetadataTest method testPartialMetadataUpdate.

@Test
public void testPartialMetadataUpdate() {
    Time time = new MockTime();
    metadata = new Metadata(refreshBackoffMs, metadataExpireMs, new LogContext(), new ClusterResourceListeners()) {

        @Override
        protected MetadataRequest.Builder newMetadataRequestBuilderForNewTopics() {
            return newMetadataRequestBuilder();
        }
    };
    assertFalse(metadata.updateRequested());
    // Request a metadata update. This must force a full metadata update request.
    metadata.requestUpdate();
    Metadata.MetadataRequestAndVersion versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
    assertFalse(versionAndBuilder.isPartialUpdate);
    metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), false, time.milliseconds());
    assertFalse(metadata.updateRequested());
    // Request a metadata update for a new topic. This should perform a partial metadata update.
    metadata.requestUpdateForNewTopics();
    versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
    assertTrue(versionAndBuilder.isPartialUpdate);
    metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, time.milliseconds());
    assertFalse(metadata.updateRequested());
    // Request both types of metadata updates. This should always perform a full update.
    metadata.requestUpdate();
    metadata.requestUpdateForNewTopics();
    versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
    assertFalse(versionAndBuilder.isPartialUpdate);
    metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), false, time.milliseconds());
    assertFalse(metadata.updateRequested());
    // Request only a partial metadata update, but elapse enough time such that a full refresh is needed.
    metadata.requestUpdateForNewTopics();
    final long refreshTimeMs = time.milliseconds() + metadata.metadataExpireMs();
    versionAndBuilder = metadata.newMetadataRequestAndVersion(refreshTimeMs);
    assertFalse(versionAndBuilder.isPartialUpdate);
    metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1)), true, refreshTimeMs);
    assertFalse(metadata.updateRequested());
    // Request two partial metadata updates that are overlapping.
    metadata.requestUpdateForNewTopics();
    versionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
    assertTrue(versionAndBuilder.isPartialUpdate);
    metadata.requestUpdateForNewTopics();
    Metadata.MetadataRequestAndVersion overlappingVersionAndBuilder = metadata.newMetadataRequestAndVersion(time.milliseconds());
    assertTrue(overlappingVersionAndBuilder.isPartialUpdate);
    assertTrue(metadata.updateRequested());
    metadata.update(versionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic-1", 1)), true, time.milliseconds());
    assertTrue(metadata.updateRequested());
    metadata.update(overlappingVersionAndBuilder.requestVersion, RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic-2", 1)), true, time.milliseconds());
    assertFalse(metadata.updateRequested());
}
Also used : ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) LogContext(org.apache.kafka.common.utils.LogContext) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.jupiter.api.Test)

Example 24 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class MetadataTest method testMetadataTopicErrors.

@Test
public void testMetadataTopicErrors() {
    Time time = new MockTime();
    Map<String, Errors> topicErrors = new HashMap<>(3);
    topicErrors.put("invalidTopic", Errors.INVALID_TOPIC_EXCEPTION);
    topicErrors.put("sensitiveTopic1", Errors.TOPIC_AUTHORIZATION_FAILED);
    topicErrors.put("sensitiveTopic2", Errors.TOPIC_AUTHORIZATION_FAILED);
    MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("clusterId", 1, topicErrors, Collections.emptyMap());
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
    TopicAuthorizationException e1 = assertThrows(TopicAuthorizationException.class, () -> metadata.maybeThrowExceptionForTopic("sensitiveTopic1"));
    assertEquals(Collections.singleton("sensitiveTopic1"), e1.unauthorizedTopics());
    // We clear the exception once it has been raised to the user
    metadata.maybeThrowAnyException();
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
    TopicAuthorizationException e2 = assertThrows(TopicAuthorizationException.class, () -> metadata.maybeThrowExceptionForTopic("sensitiveTopic2"));
    assertEquals(Collections.singleton("sensitiveTopic2"), e2.unauthorizedTopics());
    metadata.maybeThrowAnyException();
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
    InvalidTopicException e3 = assertThrows(InvalidTopicException.class, () -> metadata.maybeThrowExceptionForTopic("invalidTopic"));
    assertEquals(Collections.singleton("invalidTopic"), e3.invalidTopics());
    metadata.maybeThrowAnyException();
    // Other topics should not throw exception, but they should clear existing exception
    metadata.updateWithCurrentRequestVersion(metadataResponse, false, time.milliseconds());
    metadata.maybeThrowExceptionForTopic("anotherTopic");
    metadata.maybeThrowAnyException();
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) HashMap(java.util.HashMap) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) MockTime(org.apache.kafka.common.utils.MockTime) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) Test(org.junit.jupiter.api.Test)

Example 25 with Time

use of org.apache.kafka.common.utils.Time in project kafka by apache.

the class SslSelectorTest method testConnectionWithCustomKeyManager.

@Test
public void testConnectionWithCustomKeyManager() throws Exception {
    TestProviderCreator testProviderCreator = new TestProviderCreator();
    int requestSize = 100 * 1024;
    final String node = "0";
    String request = TestUtils.randomString(requestSize);
    Map<String, Object> sslServerConfigs = TestSslUtils.createSslConfig(TestKeyManagerFactory.ALGORITHM, TestTrustManagerFactory.ALGORITHM, TestSslUtils.DEFAULT_TLS_PROTOCOL_FOR_TESTS);
    sslServerConfigs.put(SecurityConfig.SECURITY_PROVIDERS_CONFIG, testProviderCreator.getClass().getName());
    EchoServer server = new EchoServer(SecurityProtocol.SSL, sslServerConfigs);
    server.start();
    Time time = new MockTime();
    File trustStoreFile = new File(TestKeyManagerFactory.TestKeyManager.mockTrustStoreFile);
    Map<String, Object> sslClientConfigs = TestSslUtils.createSslConfig(true, true, Mode.CLIENT, trustStoreFile, "client");
    ChannelBuilder channelBuilder = new TestSslChannelBuilder(Mode.CLIENT);
    channelBuilder.configure(sslClientConfigs);
    Metrics metrics = new Metrics();
    Selector selector = new Selector(5000, metrics, time, "MetricGroup", channelBuilder, new LogContext());
    selector.connect(node, new InetSocketAddress("localhost", server.port), BUFFER_SIZE, BUFFER_SIZE);
    while (!selector.connected().contains(node)) selector.poll(10000L);
    while (!selector.isChannelReady(node)) selector.poll(10000L);
    selector.send(createSend(node, request));
    waitForBytesBuffered(selector, node);
    TestUtils.waitForCondition(() -> cipherMetrics(metrics).size() == 1, "Waiting for cipher metrics to be created.");
    assertEquals(Integer.valueOf(1), cipherMetrics(metrics).get(0).metricValue());
    assertNotNull(selector.channel(node).channelMetadataRegistry().cipherInformation());
    selector.close(node);
    super.verifySelectorEmpty(selector);
    assertEquals(1, cipherMetrics(metrics).size());
    assertEquals(Integer.valueOf(0), cipherMetrics(metrics).get(0).metricValue());
    Security.removeProvider(testProviderCreator.getProvider().getName());
    selector.close();
    server.close();
    metrics.close();
}
Also used : InetSocketAddress(java.net.InetSocketAddress) TestProviderCreator(org.apache.kafka.common.security.ssl.mock.TestProviderCreator) LogContext(org.apache.kafka.common.utils.LogContext) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) Metrics(org.apache.kafka.common.metrics.Metrics) File(java.io.File) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.jupiter.api.Test)

Aggregations

Time (org.apache.kafka.common.utils.Time)125 MockTime (org.apache.kafka.common.utils.MockTime)107 Test (org.junit.jupiter.api.Test)63 MockClient (org.apache.kafka.clients.MockClient)55 HashMap (java.util.HashMap)53 Cluster (org.apache.kafka.common.Cluster)41 Test (org.junit.Test)40 Node (org.apache.kafka.common.Node)39 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)32 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)31 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)30 Metadata (org.apache.kafka.clients.Metadata)28 ProducerMetadata (org.apache.kafka.clients.producer.internals.ProducerMetadata)25 TopicPartition (org.apache.kafka.common.TopicPartition)22 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)21 LogContext (org.apache.kafka.common.utils.LogContext)17 Map (java.util.Map)14 Properties (java.util.Properties)14 MetricName (org.apache.kafka.common.MetricName)14 ExecutionException (java.util.concurrent.ExecutionException)13