Search in sources :

Example 11 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project kafka by apache.

the class MockAdminClient method createTopics.

@Override
public synchronized CreateTopicsResult createTopics(Collection<NewTopic> newTopics, CreateTopicsOptions options) {
    Map<String, KafkaFuture<CreateTopicsResult.TopicMetadataAndConfig>> createTopicResult = new HashMap<>();
    if (timeoutNextRequests > 0) {
        for (final NewTopic newTopic : newTopics) {
            String topicName = newTopic.name();
            KafkaFutureImpl<CreateTopicsResult.TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
            future.completeExceptionally(new TimeoutException());
            createTopicResult.put(topicName, future);
        }
        --timeoutNextRequests;
        return new CreateTopicsResult(createTopicResult);
    }
    for (final NewTopic newTopic : newTopics) {
        KafkaFutureImpl<CreateTopicsResult.TopicMetadataAndConfig> future = new KafkaFutureImpl<>();
        String topicName = newTopic.name();
        if (allTopics.containsKey(topicName)) {
            future.completeExceptionally(new TopicExistsException(String.format("Topic %s exists already.", topicName)));
            createTopicResult.put(topicName, future);
            continue;
        }
        int replicationFactor = newTopic.replicationFactor();
        if (replicationFactor == -1) {
            replicationFactor = defaultReplicationFactor;
        }
        if (replicationFactor > brokers.size()) {
            future.completeExceptionally(new InvalidReplicationFactorException(String.format("Replication factor: %d is larger than brokers: %d", newTopic.replicationFactor(), brokers.size())));
            createTopicResult.put(topicName, future);
            continue;
        }
        List<Node> replicas = new ArrayList<>(replicationFactor);
        for (int i = 0; i < replicationFactor; ++i) {
            replicas.add(brokers.get(i));
        }
        int numberOfPartitions = newTopic.numPartitions();
        if (numberOfPartitions == -1) {
            numberOfPartitions = defaultPartitions;
        }
        List<TopicPartitionInfo> partitions = new ArrayList<>(numberOfPartitions);
        // Partitions start off on the first log directory of each broker, for now.
        List<String> logDirs = new ArrayList<>(numberOfPartitions);
        for (int i = 0; i < numberOfPartitions; i++) {
            partitions.add(new TopicPartitionInfo(i, brokers.get(0), replicas, Collections.emptyList()));
            logDirs.add(brokerLogDirs.get(partitions.get(i).leader().id()).get(0));
        }
        Uuid topicId = Uuid.randomUuid();
        topicIds.put(topicName, topicId);
        topicNames.put(topicId, topicName);
        allTopics.put(topicName, new TopicMetadata(topicId, false, partitions, logDirs, newTopic.configs()));
        future.complete(null);
        createTopicResult.put(topicName, future);
    }
    return new CreateTopicsResult(createTopicResult);
}
Also used : KafkaFuture(org.apache.kafka.common.KafkaFuture) HashMap(java.util.HashMap) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) TopicExistsException(org.apache.kafka.common.errors.TopicExistsException) Uuid(org.apache.kafka.common.Uuid) TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) InvalidReplicationFactorException(org.apache.kafka.common.errors.InvalidReplicationFactorException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 12 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project kafka by apache.

the class MockAdminClient method findPartitionReassignment.

private synchronized PartitionReassignment findPartitionReassignment(TopicPartition partition) {
    NewPartitionReassignment reassignment = reassignments.get(partition);
    if (reassignment == null) {
        return null;
    }
    TopicMetadata metadata = allTopics.get(partition.topic());
    if (metadata == null) {
        throw new RuntimeException("Internal MockAdminClient logic error: found " + "reassignment for " + partition + ", but no TopicMetadata");
    }
    TopicPartitionInfo info = metadata.partitions.get(partition.partition());
    if (info == null) {
        throw new RuntimeException("Internal MockAdminClient logic error: found " + "reassignment for " + partition + ", but no TopicPartitionInfo");
    }
    List<Integer> replicas = new ArrayList<>();
    List<Integer> removingReplicas = new ArrayList<>();
    List<Integer> addingReplicas = new ArrayList<>(reassignment.targetReplicas());
    for (Node node : info.replicas()) {
        replicas.add(node.id());
        if (!reassignment.targetReplicas().contains(node.id())) {
            removingReplicas.add(node.id());
        }
        addingReplicas.remove(Integer.valueOf(node.id()));
    }
    return new PartitionReassignment(replicas, addingReplicas, removingReplicas);
}
Also used : TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList)

Example 13 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project kafka by apache.

the class WorkerSourceTaskTest method testTopicCreateWhenTopicExists.

@Test
public void testTopicCreateWhenTopicExists() throws Exception {
    if (!enableTopicCreation)
        // should only test with topic creation enabled
        return;
    createWorkerTask();
    SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
    SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD);
    expectPreliminaryCalls();
    TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList());
    TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo));
    EasyMock.expect(admin.describeTopics(TOPIC)).andReturn(Collections.singletonMap(TOPIC, topicDesc));
    expectSendRecordTaskCommitRecordSucceed(false);
    expectSendRecordTaskCommitRecordSucceed(false);
    PowerMock.replayAll();
    Whitebox.setInternalState(workerTask, "toSend", Arrays.asList(record1, record2));
    Whitebox.invokeMethod(workerTask, "sendRecords");
}
Also used : TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) SourceRecord(org.apache.kafka.connect.source.SourceRecord) ThreadedTest(org.apache.kafka.connect.util.ThreadedTest) RetryWithToleranceOperatorTest(org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest) ParameterizedTest(org.apache.kafka.connect.util.ParameterizedTest) Test(org.junit.Test)

Example 14 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project kafka by apache.

the class TransactionsCommandTest method testFindHangingLookupTopicAndBrokerId.

@Test
public void testFindHangingLookupTopicAndBrokerId() throws Exception {
    int brokerId = 5;
    String topic = "foo";
    String[] args = new String[] { "--bootstrap-server", "localhost:9092", "find-hanging", "--broker-id", String.valueOf(brokerId), "--topic", topic };
    Node node0 = new Node(0, "localhost", 9092);
    Node node1 = new Node(1, "localhost", 9093);
    Node node5 = new Node(5, "localhost", 9097);
    TopicPartitionInfo partition0 = new TopicPartitionInfo(0, node0, Arrays.asList(node0, node1), Arrays.asList(node0, node1));
    TopicPartitionInfo partition1 = new TopicPartitionInfo(1, node1, Arrays.asList(node1, node5), Arrays.asList(node1, node5));
    TopicDescription description = new TopicDescription(topic, false, Arrays.asList(partition0, partition1));
    expectDescribeTopics(singletonMap(topic, description));
    DescribeProducersResult result = Mockito.mock(DescribeProducersResult.class);
    Mockito.when(result.all()).thenReturn(completedFuture(emptyMap()));
    Mockito.when(admin.describeProducers(Collections.singletonList(new TopicPartition(topic, 1)), new DescribeProducersOptions().brokerId(brokerId))).thenReturn(result);
    execute(args);
    assertNormalExit();
    assertNoHangingTransactions();
}
Also used : TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) DescribeProducersResult(org.apache.kafka.clients.admin.DescribeProducersResult) DescribeProducersOptions(org.apache.kafka.clients.admin.DescribeProducersOptions) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Example 15 with TopicPartitionInfo

use of org.apache.kafka.common.TopicPartitionInfo in project kafka by apache.

the class TransactionsCommandTest method testFindHangingLookupTopicPartitionsForBroker.

@Test
public void testFindHangingLookupTopicPartitionsForBroker() throws Exception {
    int brokerId = 5;
    String[] args = new String[] { "--bootstrap-server", "localhost:9092", "find-hanging", "--broker-id", String.valueOf(brokerId) };
    String topic = "foo";
    expectListTopics(singleton(topic));
    Node node0 = new Node(0, "localhost", 9092);
    Node node1 = new Node(1, "localhost", 9093);
    Node node5 = new Node(5, "localhost", 9097);
    TopicPartitionInfo partition0 = new TopicPartitionInfo(0, node0, Arrays.asList(node0, node1), Arrays.asList(node0, node1));
    TopicPartitionInfo partition1 = new TopicPartitionInfo(1, node1, Arrays.asList(node1, node5), Arrays.asList(node1, node5));
    TopicDescription description = new TopicDescription(topic, false, Arrays.asList(partition0, partition1));
    expectDescribeTopics(singletonMap(topic, description));
    DescribeProducersResult result = Mockito.mock(DescribeProducersResult.class);
    Mockito.when(result.all()).thenReturn(completedFuture(emptyMap()));
    Mockito.when(admin.describeProducers(Collections.singletonList(new TopicPartition(topic, 1)), new DescribeProducersOptions().brokerId(brokerId))).thenReturn(result);
    execute(args);
    assertNormalExit();
    assertNoHangingTransactions();
}
Also used : TopicPartitionInfo(org.apache.kafka.common.TopicPartitionInfo) TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) TopicDescription(org.apache.kafka.clients.admin.TopicDescription) DescribeProducersResult(org.apache.kafka.clients.admin.DescribeProducersResult) DescribeProducersOptions(org.apache.kafka.clients.admin.DescribeProducersOptions) Test(org.junit.jupiter.api.Test) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest)

Aggregations

TopicPartitionInfo (org.apache.kafka.common.TopicPartitionInfo)62 Test (org.junit.Test)33 TopicDescription (org.apache.kafka.clients.admin.TopicDescription)31 Node (org.apache.kafka.common.Node)28 ArrayList (java.util.ArrayList)20 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)18 NewTopic (org.apache.kafka.clients.admin.NewTopic)16 HashMap (java.util.HashMap)14 Cluster (org.apache.kafka.common.Cluster)11 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)11 StreamsConfig (org.apache.kafka.streams.StreamsConfig)11 Test (org.junit.jupiter.api.Test)10 TopicPartition (org.apache.kafka.common.TopicPartition)8 ConfigResource (org.apache.kafka.common.config.ConfigResource)8 Map (java.util.Map)7 AdminClient (org.apache.kafka.clients.admin.AdminClient)7 Config (org.apache.kafka.clients.admin.Config)7 TopicMetadataAndConfig (org.apache.kafka.clients.admin.CreateTopicsResult.TopicMetadataAndConfig)7 TopicConfig (org.apache.kafka.common.config.TopicConfig)7 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)6