Search in sources :

Example 1 with MetadataRequest

use of org.apache.kafka.common.requests.MetadataRequest in project kafka by apache.

the class KafkaAdminClientTest method testConnectionFailureOnMetadataUpdate.

@Test
public void testConnectionFailureOnMetadataUpdate() throws Exception {
    // This tests the scenario in which we successfully connect to the bootstrap server, but
    // the server disconnects before sending the full response
    Cluster cluster = mockBootstrapCluster();
    try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) {
        Cluster discoveredCluster = mockCluster(3, 0);
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, null, true);
        env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, RequestTestUtils.metadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList()));
        env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, prepareCreateTopicsResponse("myTopic", Errors.NONE));
        KafkaFuture<Void> future = env.adminClient().createTopics(singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all();
        future.get();
    }
}
Also used : MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) CreateTopicsRequest(org.apache.kafka.common.requests.CreateTopicsRequest) Cluster(org.apache.kafka.common.Cluster) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 2 with MetadataRequest

use of org.apache.kafka.common.requests.MetadataRequest in project kafka by apache.

the class KafkaAdminClientTest method expectMetadataRequest.

private void expectMetadataRequest(AdminClientUnitTestEnv env, TopicPartition topicPartition, Node leader) {
    MetadataResponseData.MetadataResponseTopicCollection responseTopics = new MetadataResponseData.MetadataResponseTopicCollection();
    MetadataResponseTopic responseTopic = new MetadataResponseTopic().setName(topicPartition.topic()).setErrorCode(Errors.NONE.code());
    responseTopics.add(responseTopic);
    MetadataResponsePartition responsePartition = new MetadataResponsePartition().setErrorCode(Errors.NONE.code()).setPartitionIndex(topicPartition.partition()).setLeaderId(leader.id()).setReplicaNodes(singletonList(leader.id())).setIsrNodes(singletonList(leader.id()));
    responseTopic.partitions().add(responsePartition);
    env.kafkaClient().prepareResponse(request -> {
        if (!(request instanceof MetadataRequest)) {
            return false;
        }
        MetadataRequest metadataRequest = (MetadataRequest) request;
        return metadataRequest.topics().equals(singletonList(topicPartition.topic()));
    }, new MetadataResponse(new MetadataResponseData().setTopics(responseTopics), MetadataResponseData.HIGHEST_SUPPORTED_VERSION));
}
Also used : MetadataResponseTopic(org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic) MetadataResponsePartition(org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) MetadataResponseData(org.apache.kafka.common.message.MetadataResponseData) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse)

Example 3 with MetadataRequest

use of org.apache.kafka.common.requests.MetadataRequest in project kafka by apache.

the class MetadataRequestBenchmark method buildAllTopicMetadataRequest.

private RequestChannel.Request buildAllTopicMetadataRequest() {
    MetadataRequest metadataRequest = MetadataRequest.Builder.allTopics().build();
    RequestHeader header = new RequestHeader(metadataRequest.apiKey(), metadataRequest.version(), "", 0);
    ByteBuffer bodyBuffer = metadataRequest.serialize();
    RequestContext context = new RequestContext(header, "1", null, principal, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), SecurityProtocol.PLAINTEXT, ClientInformation.EMPTY, false);
    return new RequestChannel.Request(1, context, 0, MemoryPool.NONE, bodyBuffer, requestChannelMetrics, Option.empty());
}
Also used : UpdateMetadataRequest(org.apache.kafka.common.requests.UpdateMetadataRequest) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) UpdateMetadataRequest(org.apache.kafka.common.requests.UpdateMetadataRequest) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) RequestHeader(org.apache.kafka.common.requests.RequestHeader) RequestContext(org.apache.kafka.common.requests.RequestContext) ByteBuffer(java.nio.ByteBuffer)

Example 4 with MetadataRequest

use of org.apache.kafka.common.requests.MetadataRequest in project apache-kafka-on-k8s by banzaicloud.

the class SaslAuthenticatorTest method testDisallowedKafkaRequestsBeforeAuthentication.

/**
 * Tests that Kafka requests that are forbidden until successful authentication result
 * in authentication failure and do not cause any failures in the server.
 */
@Test
public void testDisallowedKafkaRequestsBeforeAuthentication() throws Exception {
    SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT;
    configureMechanisms("PLAIN", Arrays.asList("PLAIN"));
    server = createEchoServer(securityProtocol);
    // Send metadata request before Kafka SASL handshake request
    String node1 = "invalid1";
    createClientConnection(SecurityProtocol.PLAINTEXT, node1);
    MetadataRequest metadataRequest1 = new MetadataRequest.Builder(Collections.singletonList("sometopic"), true).build();
    RequestHeader metadataRequestHeader1 = new RequestHeader(ApiKeys.METADATA, metadataRequest1.version(), "someclient", 1);
    selector.send(metadataRequest1.toSend(node1, metadataRequestHeader1));
    NetworkTestUtils.waitForChannelClose(selector, node1, ChannelState.READY.state());
    selector.close();
    // Test good connection still works
    createAndCheckClientConnection(securityProtocol, "good1");
    // Send metadata request after Kafka SASL handshake request
    String node2 = "invalid2";
    createClientConnection(SecurityProtocol.PLAINTEXT, node2);
    sendHandshakeRequestReceiveResponse(node2, (short) 1);
    MetadataRequest metadataRequest2 = new MetadataRequest.Builder(Collections.singletonList("sometopic"), true).build();
    RequestHeader metadataRequestHeader2 = new RequestHeader(ApiKeys.METADATA, metadataRequest2.version(), "someclient", 2);
    selector.send(metadataRequest2.toSend(node2, metadataRequestHeader2));
    NetworkTestUtils.waitForChannelClose(selector, node2, ChannelState.READY.state());
    selector.close();
    // Test good connection still works
    createAndCheckClientConnection(securityProtocol, "good2");
}
Also used : MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) SecurityProtocol(org.apache.kafka.common.security.auth.SecurityProtocol) RequestHeader(org.apache.kafka.common.requests.RequestHeader) Test(org.junit.Test)

Example 5 with MetadataRequest

use of org.apache.kafka.common.requests.MetadataRequest in project kafka by apache.

the class KafkaAdminClientTest method testUnreachableBootstrapServer.

@Test
public void testUnreachableBootstrapServer() throws Exception {
    // This tests the scenario in which the bootstrap server is unreachable for a short while,
    // which prevents AdminClient from being able to send the initial metadata request
    Cluster cluster = Cluster.bootstrap(singletonList(new InetSocketAddress("localhost", 8121)));
    Map<Node, Long> unreachableNodes = Collections.singletonMap(cluster.nodes().get(0), 200L);
    try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster, AdminClientUnitTestEnv.clientConfigs(), unreachableNodes)) {
        Cluster discoveredCluster = mockCluster(3, 0);
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest, RequestTestUtils.metadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList()));
        env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, prepareCreateTopicsResponse("myTopic", Errors.NONE));
        KafkaFuture<Void> future = env.adminClient().createTopics(singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all();
        future.get();
    }
}
Also used : MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) CreateTopicsRequest(org.apache.kafka.common.requests.CreateTopicsRequest) InetSocketAddress(java.net.InetSocketAddress) Node(org.apache.kafka.common.Node) OptionalLong(java.util.OptionalLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) Cluster(org.apache.kafka.common.Cluster) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Aggregations

MetadataRequest (org.apache.kafka.common.requests.MetadataRequest)8 Test (org.junit.jupiter.api.Test)5 MetadataResponseData (org.apache.kafka.common.message.MetadataResponseData)3 MetadataResponse (org.apache.kafka.common.requests.MetadataResponse)3 RequestHeader (org.apache.kafka.common.requests.RequestHeader)3 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)3 Cluster (org.apache.kafka.common.Cluster)2 Node (org.apache.kafka.common.Node)2 MetadataResponsePartition (org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition)2 MetadataResponseTopic (org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic)2 CreateTopicsRequest (org.apache.kafka.common.requests.CreateTopicsRequest)2 SecurityProtocol (org.apache.kafka.common.security.auth.SecurityProtocol)2 InetSocketAddress (java.net.InetSocketAddress)1 ByteBuffer (java.nio.ByteBuffer)1 Arrays (java.util.Arrays)1 Collections.emptyMap (java.util.Collections.emptyMap)1 Collections.singletonMap (java.util.Collections.singletonMap)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 List (java.util.List)1