Search in sources :

Example 71 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class KafkaBasedLog method start.

public void start() {
    log.info("Starting KafkaBasedLog with topic " + topic);
    // Create the topic admin client and initialize the topic ...
    // may be null
    admin = topicAdminSupplier.get();
    initializer.accept(admin);
    // Then create the producer and consumer
    producer = createProducer();
    consumer = createConsumer();
    List<TopicPartition> partitions = new ArrayList<>();
    // We expect that the topics will have been created either manually by the user or automatically by the herder
    List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
    long started = time.nanoseconds();
    long sleepMs = 100;
    while (partitionInfos.isEmpty() && time.nanoseconds() - started < CREATE_TOPIC_TIMEOUT_NS) {
        time.sleep(sleepMs);
        sleepMs = Math.min(2 * sleepMs, MAX_SLEEP_MS);
        partitionInfos = consumer.partitionsFor(topic);
    }
    if (partitionInfos.isEmpty())
        throw new ConnectException("Could not look up partition metadata for offset backing store topic in" + " allotted period. This could indicate a connectivity issue, unavailable topic partitions, or if" + " this is your first use of the topic it may have taken too long to create.");
    for (PartitionInfo partition : partitionInfos) partitions.add(new TopicPartition(partition.topic(), partition.partition()));
    partitionCount = partitions.size();
    consumer.assign(partitions);
    // Always consume from the beginning of all partitions. Necessary to ensure that we don't use committed offsets
    // when a 'group.id' is specified (if offsets happen to have been committed unexpectedly).
    consumer.seekToBeginning(partitions);
    readToLogEnd();
    thread = new WorkThread();
    thread.start();
    log.info("Finished reading KafkaBasedLog for topic " + topic);
    log.info("Started KafkaBasedLog for topic " + topic);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) PartitionInfo(org.apache.kafka.common.PartitionInfo) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 72 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class TransactionManagerTest method testRaiseErrorWhenNoPartitionsPendingOnDrain.

@Test
public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedException {
    doInitTransactions();
    transactionManager.beginTransaction();
    // Don't execute transactionManager.maybeAddPartitionToTransaction(tp0). This should result in an error on drain.
    appendToAccumulator(tp0);
    Node node1 = new Node(0, "localhost", 1111);
    PartitionInfo part1 = new PartitionInfo(topic, 0, node1, null, null);
    Cluster cluster = new Cluster(null, Collections.singletonList(node1), Collections.singletonList(part1), Collections.emptySet(), Collections.emptySet());
    Set<Node> nodes = new HashSet<>();
    nodes.add(node1);
    Map<Integer, List<ProducerBatch>> drainedBatches = accumulator.drain(cluster, nodes, Integer.MAX_VALUE, time.milliseconds());
    // We shouldn't drain batches which haven't been added to the transaction yet.
    assertTrue(drainedBatches.containsKey(node1.id()));
    assertTrue(drainedBatches.get(node1.id()).isEmpty());
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) PartitionInfo(org.apache.kafka.common.PartitionInfo) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 73 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class StickyPartitionCacheTest method unavailablePartitionsTest.

@Test
public void unavailablePartitionsTest() {
    // Partition 1 in topic A and partition 0 in topic B are unavailable partitions.
    List<PartitionInfo> allPartitions = asList(new PartitionInfo(TOPIC_A, 0, NODES[0], NODES, NODES), new PartitionInfo(TOPIC_A, 1, null, NODES, NODES), new PartitionInfo(TOPIC_A, 2, NODES[2], NODES, NODES), new PartitionInfo(TOPIC_B, 0, null, NODES, NODES), new PartitionInfo(TOPIC_B, 1, NODES[0], NODES, NODES), new PartitionInfo(TOPIC_C, 0, null, NODES, NODES));
    Cluster testCluster = new Cluster("clusterId", asList(NODES[0], NODES[1], NODES[2]), allPartitions, Collections.emptySet(), Collections.emptySet());
    StickyPartitionCache stickyPartitionCache = new StickyPartitionCache();
    // Assure we never choose partition 1 because it is unavailable.
    int partA = stickyPartitionCache.partition(TOPIC_A, testCluster);
    assertNotEquals(1, partA);
    for (int aPartitions = 0; aPartitions < 100; aPartitions++) {
        partA = stickyPartitionCache.nextPartition(TOPIC_A, testCluster, partA);
        assertNotEquals(1, stickyPartitionCache.partition(TOPIC_A, testCluster));
    }
    // Assure we always choose partition 1 for topic B.
    int partB = stickyPartitionCache.partition(TOPIC_B, testCluster);
    assertEquals(1, partB);
    for (int bPartitions = 0; bPartitions < 100; bPartitions++) {
        partB = stickyPartitionCache.nextPartition(TOPIC_B, testCluster, partB);
        assertEquals(1, stickyPartitionCache.partition(TOPIC_B, testCluster));
    }
    // Assure that we still choose the partition when there are no partitions available.
    int partC = stickyPartitionCache.partition(TOPIC_C, testCluster);
    assertEquals(0, partC);
    partC = stickyPartitionCache.nextPartition(TOPIC_C, testCluster, partC);
    assertEquals(0, partC);
}
Also used : Cluster(org.apache.kafka.common.Cluster) PartitionInfo(org.apache.kafka.common.PartitionInfo) Test(org.junit.jupiter.api.Test)

Example 74 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class StickyPartitionCacheTest method testStickyPartitionCache.

@Test
public void testStickyPartitionCache() {
    List<PartitionInfo> allPartitions = asList(new PartitionInfo(TOPIC_A, 0, NODES[0], NODES, NODES), new PartitionInfo(TOPIC_A, 1, NODES[1], NODES, NODES), new PartitionInfo(TOPIC_A, 2, NODES[2], NODES, NODES), new PartitionInfo(TOPIC_B, 0, NODES[0], NODES, NODES));
    Cluster testCluster = new Cluster("clusterId", asList(NODES), allPartitions, Collections.emptySet(), Collections.emptySet());
    StickyPartitionCache stickyPartitionCache = new StickyPartitionCache();
    int partA = stickyPartitionCache.partition(TOPIC_A, testCluster);
    assertEquals(partA, stickyPartitionCache.partition(TOPIC_A, testCluster));
    int partB = stickyPartitionCache.partition(TOPIC_B, testCluster);
    assertEquals(partB, stickyPartitionCache.partition(TOPIC_B, testCluster));
    int changedPartA = stickyPartitionCache.nextPartition(TOPIC_A, testCluster, partA);
    assertEquals(changedPartA, stickyPartitionCache.partition(TOPIC_A, testCluster));
    assertNotEquals(partA, changedPartA);
    int changedPartA2 = stickyPartitionCache.partition(TOPIC_A, testCluster);
    assertEquals(changedPartA2, changedPartA);
    // We do not want to change partitions because the previous partition does not match the current sticky one.
    int changedPartA3 = stickyPartitionCache.nextPartition(TOPIC_A, testCluster, partA);
    assertEquals(changedPartA3, changedPartA2);
    // Check that the we can still use the partitioner when there is only one partition
    int changedPartB = stickyPartitionCache.nextPartition(TOPIC_B, testCluster, partB);
    assertEquals(changedPartB, stickyPartitionCache.partition(TOPIC_B, testCluster));
}
Also used : Cluster(org.apache.kafka.common.Cluster) PartitionInfo(org.apache.kafka.common.PartitionInfo) Test(org.junit.jupiter.api.Test)

Example 75 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class KafkaAdminClientTest method testListOffsetsUnsupportedNonMaxTimestamp.

@Test
public void testListOffsetsUnsupportedNonMaxTimestamp() {
    Node node = new Node(0, "localhost", 8120);
    List<Node> nodes = Collections.singletonList(node);
    List<PartitionInfo> pInfos = new ArrayList<>();
    pInfos.add(new PartitionInfo("foo", 0, node, new Node[] { node }, new Node[] { node }));
    final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.emptySet(), Collections.emptySet(), node);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, AdminClientConfig.RETRIES_CONFIG, "2")) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(ApiKeys.LIST_OFFSETS.id, (short) 0, (short) 0));
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        // listoffsets response from broker 0
        env.kafkaClient().prepareUnsupportedVersionResponse(request -> request instanceof ListOffsetsRequest);
        ListOffsetsResult result = env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.latest()));
        TestUtils.assertFutureThrows(result.partitionResult(tp0), UnsupportedVersionException.class);
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) PartitionInfo(org.apache.kafka.common.PartitionInfo) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Aggregations

PartitionInfo (org.apache.kafka.common.PartitionInfo)227 TopicPartition (org.apache.kafka.common.TopicPartition)142 HashMap (java.util.HashMap)87 Node (org.apache.kafka.common.Node)85 Test (org.junit.Test)82 Cluster (org.apache.kafka.common.Cluster)80 ArrayList (java.util.ArrayList)73 HashSet (java.util.HashSet)67 Set (java.util.Set)38 Map (java.util.Map)34 Test (org.junit.jupiter.api.Test)31 List (java.util.List)30 TaskId (org.apache.kafka.streams.processor.TaskId)25 StreamsConfig (org.apache.kafka.streams.StreamsConfig)16 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)16 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)15 Properties (java.util.Properties)13 MockTime (org.apache.kafka.common.utils.MockTime)13 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)11 HostInfo (org.apache.kafka.streams.state.HostInfo)11