use of org.apache.kafka.common.requests.MetadataRequest in project kafka by apache.
the class KafkaAdminClientTest method testConnectionFailureOnMetadataUpdate.
@Test
public void testConnectionFailureOnMetadataUpdate() throws Exception {
// This tests the scenario in which we successfully connect to the bootstrap server, but
// the server disconnects before sending the full response
Cluster cluster = mockBootstrapCluster();
try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster)) {
Cluster discoveredCluster = mockCluster(3, 0);
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, null, true);
env.kafkaClient().prepareResponse(request -> request instanceof MetadataRequest, RequestTestUtils.metadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList()));
env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, prepareCreateTopicsResponse("myTopic", Errors.NONE));
KafkaFuture<Void> future = env.adminClient().createTopics(singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all();
future.get();
}
}
use of org.apache.kafka.common.requests.MetadataRequest in project kafka by apache.
the class KafkaAdminClientTest method expectMetadataRequest.
private void expectMetadataRequest(AdminClientUnitTestEnv env, TopicPartition topicPartition, Node leader) {
MetadataResponseData.MetadataResponseTopicCollection responseTopics = new MetadataResponseData.MetadataResponseTopicCollection();
MetadataResponseTopic responseTopic = new MetadataResponseTopic().setName(topicPartition.topic()).setErrorCode(Errors.NONE.code());
responseTopics.add(responseTopic);
MetadataResponsePartition responsePartition = new MetadataResponsePartition().setErrorCode(Errors.NONE.code()).setPartitionIndex(topicPartition.partition()).setLeaderId(leader.id()).setReplicaNodes(singletonList(leader.id())).setIsrNodes(singletonList(leader.id()));
responseTopic.partitions().add(responsePartition);
env.kafkaClient().prepareResponse(request -> {
if (!(request instanceof MetadataRequest)) {
return false;
}
MetadataRequest metadataRequest = (MetadataRequest) request;
return metadataRequest.topics().equals(singletonList(topicPartition.topic()));
}, new MetadataResponse(new MetadataResponseData().setTopics(responseTopics), MetadataResponseData.HIGHEST_SUPPORTED_VERSION));
}
use of org.apache.kafka.common.requests.MetadataRequest in project kafka by apache.
the class MetadataRequestBenchmark method buildAllTopicMetadataRequest.
private RequestChannel.Request buildAllTopicMetadataRequest() {
MetadataRequest metadataRequest = MetadataRequest.Builder.allTopics().build();
RequestHeader header = new RequestHeader(metadataRequest.apiKey(), metadataRequest.version(), "", 0);
ByteBuffer bodyBuffer = metadataRequest.serialize();
RequestContext context = new RequestContext(header, "1", null, principal, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), SecurityProtocol.PLAINTEXT, ClientInformation.EMPTY, false);
return new RequestChannel.Request(1, context, 0, MemoryPool.NONE, bodyBuffer, requestChannelMetrics, Option.empty());
}
use of org.apache.kafka.common.requests.MetadataRequest in project apache-kafka-on-k8s by banzaicloud.
the class SaslAuthenticatorTest method testDisallowedKafkaRequestsBeforeAuthentication.
/**
* Tests that Kafka requests that are forbidden until successful authentication result
* in authentication failure and do not cause any failures in the server.
*/
@Test
public void testDisallowedKafkaRequestsBeforeAuthentication() throws Exception {
SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT;
configureMechanisms("PLAIN", Arrays.asList("PLAIN"));
server = createEchoServer(securityProtocol);
// Send metadata request before Kafka SASL handshake request
String node1 = "invalid1";
createClientConnection(SecurityProtocol.PLAINTEXT, node1);
MetadataRequest metadataRequest1 = new MetadataRequest.Builder(Collections.singletonList("sometopic"), true).build();
RequestHeader metadataRequestHeader1 = new RequestHeader(ApiKeys.METADATA, metadataRequest1.version(), "someclient", 1);
selector.send(metadataRequest1.toSend(node1, metadataRequestHeader1));
NetworkTestUtils.waitForChannelClose(selector, node1, ChannelState.READY.state());
selector.close();
// Test good connection still works
createAndCheckClientConnection(securityProtocol, "good1");
// Send metadata request after Kafka SASL handshake request
String node2 = "invalid2";
createClientConnection(SecurityProtocol.PLAINTEXT, node2);
sendHandshakeRequestReceiveResponse(node2, (short) 1);
MetadataRequest metadataRequest2 = new MetadataRequest.Builder(Collections.singletonList("sometopic"), true).build();
RequestHeader metadataRequestHeader2 = new RequestHeader(ApiKeys.METADATA, metadataRequest2.version(), "someclient", 2);
selector.send(metadataRequest2.toSend(node2, metadataRequestHeader2));
NetworkTestUtils.waitForChannelClose(selector, node2, ChannelState.READY.state());
selector.close();
// Test good connection still works
createAndCheckClientConnection(securityProtocol, "good2");
}
use of org.apache.kafka.common.requests.MetadataRequest in project kafka by apache.
the class KafkaAdminClientTest method testUnreachableBootstrapServer.
@Test
public void testUnreachableBootstrapServer() throws Exception {
// This tests the scenario in which the bootstrap server is unreachable for a short while,
// which prevents AdminClient from being able to send the initial metadata request
Cluster cluster = Cluster.bootstrap(singletonList(new InetSocketAddress("localhost", 8121)));
Map<Node, Long> unreachableNodes = Collections.singletonMap(cluster.nodes().get(0), 200L);
try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(Time.SYSTEM, cluster, AdminClientUnitTestEnv.clientConfigs(), unreachableNodes)) {
Cluster discoveredCluster = mockCluster(3, 0);
env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
env.kafkaClient().prepareResponse(body -> body instanceof MetadataRequest, RequestTestUtils.metadataResponse(discoveredCluster.nodes(), discoveredCluster.clusterResource().clusterId(), 1, Collections.emptyList()));
env.kafkaClient().prepareResponse(body -> body instanceof CreateTopicsRequest, prepareCreateTopicsResponse("myTopic", Errors.NONE));
KafkaFuture<Void> future = env.adminClient().createTopics(singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)).all();
future.get();
}
}
Aggregations