Search in sources :

Example 46 with KafkaFuture

use of org.apache.kafka.common.KafkaFuture in project ranger by apache.

the class KafkaRangerTopicCreationTest method testCreateTopic.

@Test
public void testCreateTopic() throws Exception {
    final String topic = "test";
    Properties properties = new Properties();
    properties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + port);
    properties.put("client.id", "test-consumer-id");
    properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
    AdminClient client = KafkaAdminClient.create(properties);
    CreateTopicsResult result = client.createTopics(Arrays.asList(new NewTopic(topic, 1, (short) 1)));
    result.values().get(topic).get();
    for (Map.Entry<String, KafkaFuture<Void>> entry : result.values().entrySet()) {
        System.out.println("Create Topic : " + entry.getKey() + " " + "isCancelled : " + entry.getValue().isCancelled() + " " + "isCompletedExceptionally : " + entry.getValue().isCompletedExceptionally() + " " + "isDone : " + entry.getValue().isDone());
    }
}
Also used : CreateTopicsResult(org.apache.kafka.clients.admin.CreateTopicsResult) KafkaFuture(org.apache.kafka.common.KafkaFuture) NewTopic(org.apache.kafka.clients.admin.NewTopic) Properties(java.util.Properties) HashMap(java.util.HashMap) Map(java.util.Map) AdminClient(org.apache.kafka.clients.admin.AdminClient) KafkaAdminClient(org.apache.kafka.clients.admin.KafkaAdminClient) Test(org.junit.Test)

Example 47 with KafkaFuture

use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClientTest method testDeleteRecords.

@Test
public void testDeleteRecords() throws Exception {
    HashMap<Integer, Node> nodes = new HashMap<>();
    nodes.put(0, new Node(0, "localhost", 8121));
    List<PartitionInfo> partitionInfos = new ArrayList<>();
    partitionInfos.add(new PartitionInfo("my_topic", 0, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    partitionInfos.add(new PartitionInfo("my_topic", 1, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    partitionInfos.add(new PartitionInfo("my_topic", 2, null, new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    partitionInfos.add(new PartitionInfo("my_topic", 3, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    partitionInfos.add(new PartitionInfo("my_topic", 4, nodes.get(0), new Node[] { nodes.get(0) }, new Node[] { nodes.get(0) }));
    Cluster cluster = new Cluster("mockClusterId", nodes.values(), partitionInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), nodes.get(0));
    TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0);
    TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1);
    TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2);
    TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3);
    TopicPartition myTopicPartition4 = new TopicPartition("my_topic", 4);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
        env.kafkaClient().setNode(env.cluster().nodes().get(0));
        Map<TopicPartition, DeleteRecordsResponse.PartitionResponse> m = new HashMap<>();
        m.put(myTopicPartition0, new DeleteRecordsResponse.PartitionResponse(3, Errors.NONE));
        m.put(myTopicPartition1, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.OFFSET_OUT_OF_RANGE));
        m.put(myTopicPartition3, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.NOT_LEADER_FOR_PARTITION));
        m.put(myTopicPartition4, new DeleteRecordsResponse.PartitionResponse(DeleteRecordsResponse.INVALID_LOW_WATERMARK, Errors.UNKNOWN_TOPIC_OR_PARTITION));
        List<MetadataResponse.TopicMetadata> t = new ArrayList<>();
        List<MetadataResponse.PartitionMetadata> p = new ArrayList<>();
        p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 0, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        p.add(new MetadataResponse.PartitionMetadata(Errors.LEADER_NOT_AVAILABLE, 2, null, Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 3, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        p.add(new MetadataResponse.PartitionMetadata(Errors.NONE, 4, nodes.get(0), Collections.singletonList(nodes.get(0)), Collections.singletonList(nodes.get(0)), Collections.<Node>emptyList()));
        t.add(new MetadataResponse.TopicMetadata(Errors.NONE, "my_topic", false, p));
        env.kafkaClient().prepareResponse(new MetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), cluster.controller().id(), t));
        env.kafkaClient().prepareResponse(new DeleteRecordsResponse(0, m));
        Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
        recordsToDelete.put(myTopicPartition0, RecordsToDelete.beforeOffset(3L));
        recordsToDelete.put(myTopicPartition1, RecordsToDelete.beforeOffset(10L));
        recordsToDelete.put(myTopicPartition2, RecordsToDelete.beforeOffset(10L));
        recordsToDelete.put(myTopicPartition3, RecordsToDelete.beforeOffset(10L));
        recordsToDelete.put(myTopicPartition4, RecordsToDelete.beforeOffset(10L));
        DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete);
        // success on records deletion for partition 0
        Map<TopicPartition, KafkaFuture<DeletedRecords>> values = results.lowWatermarks();
        KafkaFuture<DeletedRecords> myTopicPartition0Result = values.get(myTopicPartition0);
        long lowWatermark = myTopicPartition0Result.get().lowWatermark();
        assertEquals(lowWatermark, 3);
        // "offset out of range" failure on records deletion for partition 1
        KafkaFuture<DeletedRecords> myTopicPartition1Result = values.get(myTopicPartition1);
        try {
            myTopicPartition1Result.get();
            fail("get() should throw ExecutionException");
        } catch (ExecutionException e0) {
            assertTrue(e0.getCause() instanceof OffsetOutOfRangeException);
        }
        // "leader not available" failure on metadata request for partition 2
        KafkaFuture<DeletedRecords> myTopicPartition2Result = values.get(myTopicPartition2);
        try {
            myTopicPartition2Result.get();
            fail("get() should throw ExecutionException");
        } catch (ExecutionException e1) {
            assertTrue(e1.getCause() instanceof LeaderNotAvailableException);
        }
        // "not leader for partition" failure on records deletion for partition 3
        KafkaFuture<DeletedRecords> myTopicPartition3Result = values.get(myTopicPartition3);
        try {
            myTopicPartition3Result.get();
            fail("get() should throw ExecutionException");
        } catch (ExecutionException e1) {
            assertTrue(e1.getCause() instanceof NotLeaderForPartitionException);
        }
        // "unknown topic or partition" failure on records deletion for partition 4
        KafkaFuture<DeletedRecords> myTopicPartition4Result = values.get(myTopicPartition4);
        try {
            myTopicPartition4Result.get();
            fail("get() should throw ExecutionException");
        } catch (ExecutionException e1) {
            assertTrue(e1.getCause() instanceof UnknownTopicOrPartitionException);
        }
    }
}
Also used : HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) UnknownTopicOrPartitionException(org.apache.kafka.common.errors.UnknownTopicOrPartitionException) ArrayList(java.util.ArrayList) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) DeleteRecordsResponse(org.apache.kafka.common.requests.DeleteRecordsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ExecutionException(java.util.concurrent.ExecutionException) NotLeaderForPartitionException(org.apache.kafka.common.errors.NotLeaderForPartitionException) KafkaFuture(org.apache.kafka.common.KafkaFuture) Cluster(org.apache.kafka.common.Cluster) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetOutOfRangeException(org.apache.kafka.common.errors.OffsetOutOfRangeException) Test(org.junit.Test)

Example 48 with KafkaFuture

use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClientTest method testDeleteAcls.

@Test
public void testDeleteAcls() throws Exception {
    try (AdminClientUnitTestEnv env = mockClientEnv()) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
        env.kafkaClient().setNode(env.cluster().controller());
        // Test a call where one filter has an error.
        env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList(new AclFilterResponse(asList(new AclDeletionResult(ACL1), new AclDeletionResult(ACL2))), new AclFilterResponse(new ApiError(Errors.SECURITY_DISABLED, "No security"), Collections.<AclDeletionResult>emptySet()))));
        DeleteAclsResult results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
        Map<AclBindingFilter, KafkaFuture<FilterResults>> filterResults = results.values();
        FilterResults filter1Results = filterResults.get(FILTER1).get();
        assertEquals(null, filter1Results.values().get(0).exception());
        assertEquals(ACL1, filter1Results.values().get(0).binding());
        assertEquals(null, filter1Results.values().get(1).exception());
        assertEquals(ACL2, filter1Results.values().get(1).binding());
        assertFutureError(filterResults.get(FILTER2), SecurityDisabledException.class);
        assertFutureError(results.all(), SecurityDisabledException.class);
        // Test a call where one deletion result has an error.
        env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList(new AclFilterResponse(asList(new AclDeletionResult(ACL1), new AclDeletionResult(new ApiError(Errors.SECURITY_DISABLED, "No security"), ACL2))), new AclFilterResponse(Collections.<AclDeletionResult>emptySet()))));
        results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
        assertTrue(results.values().get(FILTER2).get().values().isEmpty());
        assertFutureError(results.all(), SecurityDisabledException.class);
        // Test a call where there are no errors.
        env.kafkaClient().prepareResponse(new DeleteAclsResponse(0, asList(new AclFilterResponse(asList(new AclDeletionResult(ACL1))), new AclFilterResponse(asList(new AclDeletionResult(ACL2))))));
        results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2));
        Collection<AclBinding> deleted = results.all().get();
        assertCollectionIs(deleted, ACL1, ACL2);
    }
}
Also used : DeleteAclsResponse(org.apache.kafka.common.requests.DeleteAclsResponse) FilterResults(org.apache.kafka.clients.admin.DeleteAclsResult.FilterResults) AclBindingFilter(org.apache.kafka.common.acl.AclBindingFilter) KafkaFuture(org.apache.kafka.common.KafkaFuture) AclFilterResponse(org.apache.kafka.common.requests.DeleteAclsResponse.AclFilterResponse) AclDeletionResult(org.apache.kafka.common.requests.DeleteAclsResponse.AclDeletionResult) ApiError(org.apache.kafka.common.requests.ApiError) AclBinding(org.apache.kafka.common.acl.AclBinding) Test(org.junit.Test)

Example 49 with KafkaFuture

use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.

the class KafkaAdminClientTest method testCreatePartitions.

@Test
public void testCreatePartitions() throws Exception {
    try (AdminClientUnitTestEnv env = mockClientEnv()) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet());
        env.kafkaClient().setNode(env.cluster().controller());
        Map<String, ApiError> m = new HashMap<>();
        m.put("my_topic", ApiError.NONE);
        m.put("other_topic", ApiError.fromThrowable(new InvalidTopicException("some detailed reason")));
        // Test a call where one filter has an error.
        env.kafkaClient().prepareResponse(new CreatePartitionsResponse(0, m));
        Map<String, NewPartitions> counts = new HashMap<>();
        counts.put("my_topic", NewPartitions.increaseTo(3));
        counts.put("other_topic", NewPartitions.increaseTo(3, asList(asList(2), asList(3))));
        CreatePartitionsResult results = env.adminClient().createPartitions(counts);
        Map<String, KafkaFuture<Void>> values = results.values();
        KafkaFuture<Void> myTopicResult = values.get("my_topic");
        myTopicResult.get();
        KafkaFuture<Void> otherTopicResult = values.get("other_topic");
        try {
            otherTopicResult.get();
            fail("get() should throw ExecutionException");
        } catch (ExecutionException e0) {
            assertTrue(e0.getCause() instanceof InvalidTopicException);
            InvalidTopicException e = (InvalidTopicException) e0.getCause();
            assertEquals("some detailed reason", e.getMessage());
        }
    }
}
Also used : KafkaFuture(org.apache.kafka.common.KafkaFuture) HashMap(java.util.HashMap) CreatePartitionsResponse(org.apache.kafka.common.requests.CreatePartitionsResponse) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) ApiError(org.apache.kafka.common.requests.ApiError) ExecutionException(java.util.concurrent.ExecutionException) Test(org.junit.Test)

Example 50 with KafkaFuture

use of org.apache.kafka.common.KafkaFuture in project apache-kafka-on-k8s by banzaicloud.

the class MockAdminClient method createTopics.

@Override
public CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options) {
    Map<String, KafkaFuture<Void>> createTopicResult = new HashMap<>();
    if (timeoutNextRequests > 0) {
        for (final NewTopic newTopic : newTopics) {
            String topicName = newTopic.name();
            KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
            future.completeExceptionally(new TimeoutException());
            createTopicResult.put(topicName, future);
        }
        --timeoutNextRequests;
        return new CreateTopicsResult(createTopicResult);
    }
    for (final NewTopic newTopic : newTopics) {
        KafkaFutureImpl<Void> future = new KafkaFutureImpl<>();
        String topicName = newTopic.name();
        if (allTopics.containsKey(topicName)) {
            future.completeExceptionally(new TopicExistsException(String.format("Topic %s exists already.", topicName)));
            createTopicResult.put(topicName, future);
            continue;
        }
        int replicationFactor = newTopic.replicationFactor();
        List<Node> replicas = new ArrayList<>(replicationFactor);
        for (int i = 0; i < replicationFactor; ++i) {
            replicas.add(brokers.get(i));
        }
        int numberOfPartitions = newTopic.numPartitions();
        List<TopicPartitionInfo> partitions = new ArrayList<>(numberOfPartitions);
        for (int p = 0; p < numberOfPartitions; ++p) {
            partitions.add(new TopicPartitionInfo(p, brokers.get(0), replicas, Collections.<Node>emptyList()));
        }
        allTopics.put(topicName, new TopicMetadata(false, partitions, newTopic.configs()));
        future.complete(null);
        createTopicResult.put(topicName, future);
    }
    return new CreateTopicsResult(createTopicResult);
}
Also used : KafkaFuture(org.apache.kafka.common.KafkaFuture) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Aggregations

KafkaFuture (org.apache.kafka.common.KafkaFuture)84 HashMap (java.util.HashMap)59 Map (java.util.Map)43 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)31 ExecutionException (java.util.concurrent.ExecutionException)30 TimeoutException (org.apache.kafka.common.errors.TimeoutException)21 ArrayList (java.util.ArrayList)16 TopicPartition (org.apache.kafka.common.TopicPartition)16 ConfigResource (org.apache.kafka.common.config.ConfigResource)16 UnknownTopicOrPartitionException (org.apache.kafka.common.errors.UnknownTopicOrPartitionException)15 Test (org.junit.jupiter.api.Test)15 HashSet (java.util.HashSet)14 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)14 Test (org.junit.Test)12 TopicPartitionReplica (org.apache.kafka.common.TopicPartitionReplica)10 TopicExistsException (org.apache.kafka.common.errors.TopicExistsException)10 NewTopic (org.apache.kafka.clients.admin.NewTopic)8 AbstractResponse (org.apache.kafka.common.requests.AbstractResponse)8 AdminClient (org.apache.kafka.clients.admin.AdminClient)7 ReplicaLogDirInfo (org.apache.kafka.clients.admin.DescribeReplicaLogDirsResult.ReplicaLogDirInfo)7