Search in sources :

Example 81 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class KafkaAdminClientTest method testListOffsetsPartialResponse.

@Test
public void testListOffsetsPartialResponse() throws Exception {
    Node node0 = new Node(0, "localhost", 8120);
    Node node1 = new Node(1, "localhost", 8121);
    List<Node> nodes = Arrays.asList(node0, node1);
    List<PartitionInfo> pInfos = new ArrayList<>();
    pInfos.add(new PartitionInfo("foo", 0, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
    pInfos.add(new PartitionInfo("foo", 1, node0, new Node[] { node0, node1 }, new Node[] { node0, node1 }));
    final Cluster cluster = new Cluster("mockClusterId", nodes, pInfos, Collections.<String>emptySet(), Collections.<String>emptySet(), node0);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    final TopicPartition tp1 = new TopicPartition("foo", 1);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE));
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -2L, 123L, 456);
        ListOffsetsResponseData data = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(data), node0);
        Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
        partitions.put(tp0, OffsetSpec.latest());
        partitions.put(tp1, OffsetSpec.latest());
        ListOffsetsResult result = env.adminClient().listOffsets(partitions);
        assertNotNull(result.partitionResult(tp0).get());
        TestUtils.assertFutureThrows(result.partitionResult(tp1), ApiException.class);
        TestUtils.assertFutureThrows(result.all(), ApiException.class);
    }
}
Also used : HashMap(java.util.HashMap) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) ArrayList(java.util.ArrayList) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 82 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class KafkaAdminClientTest method testListOffsetsWithMultiplePartitionsLeaderChange.

@Test
public void testListOffsetsWithMultiplePartitionsLeaderChange() throws Exception {
    Node node0 = new Node(0, "localhost", 8120);
    Node node1 = new Node(1, "localhost", 8121);
    Node node2 = new Node(2, "localhost", 8122);
    List<Node> nodes = Arrays.asList(node0, node1, node2);
    final PartitionInfo oldPInfo1 = new PartitionInfo("foo", 0, node0, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
    final PartitionInfo oldPnfo2 = new PartitionInfo("foo", 1, node0, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
    List<PartitionInfo> oldPInfos = Arrays.asList(oldPInfo1, oldPnfo2);
    final Cluster oldCluster = new Cluster("mockClusterId", nodes, oldPInfos, Collections.emptySet(), Collections.emptySet(), node0);
    final TopicPartition tp0 = new TopicPartition("foo", 0);
    final TopicPartition tp1 = new TopicPartition("foo", 1);
    try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(oldCluster)) {
        env.kafkaClient().setNodeApiVersions(NodeApiVersions.create());
        env.kafkaClient().prepareResponse(prepareMetadataResponse(oldCluster, Errors.NONE));
        ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1L, 345L, 543);
        ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.LEADER_NOT_AVAILABLE, -2L, 123L, 456);
        ListOffsetsResponseData responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0, t1));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node0);
        final PartitionInfo newPInfo1 = new PartitionInfo("foo", 0, node1, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
        final PartitionInfo newPInfo2 = new PartitionInfo("foo", 1, node2, new Node[] { node0, node1, node2 }, new Node[] { node0, node1, node2 });
        List<PartitionInfo> newPInfos = Arrays.asList(newPInfo1, newPInfo2);
        final Cluster newCluster = new Cluster("mockClusterId", nodes, newPInfos, Collections.emptySet(), Collections.emptySet(), node0);
        env.kafkaClient().prepareResponse(prepareMetadataResponse(newCluster, Errors.NONE));
        t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 345L, 543);
        responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t0));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node1);
        t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -2L, 123L, 456);
        responseData = new ListOffsetsResponseData().setThrottleTimeMs(0).setTopics(Arrays.asList(t1));
        env.kafkaClient().prepareResponseFrom(new ListOffsetsResponse(responseData), node2);
        Map<TopicPartition, OffsetSpec> partitions = new HashMap<>();
        partitions.put(tp0, OffsetSpec.latest());
        partitions.put(tp1, OffsetSpec.latest());
        ListOffsetsResult result = env.adminClient().listOffsets(partitions);
        Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get();
        assertFalse(offsets.isEmpty());
        assertEquals(345L, offsets.get(tp0).offset());
        assertEquals(543, offsets.get(tp0).leaderEpoch().get().intValue());
        assertEquals(-1L, offsets.get(tp0).timestamp());
        assertEquals(123L, offsets.get(tp1).offset());
        assertEquals(456, offsets.get(tp1).leaderEpoch().get().intValue());
        assertEquals(-2L, offsets.get(tp1).timestamp());
    }
}
Also used : ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) HashMap(java.util.HashMap) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Node(org.apache.kafka.common.Node) Cluster(org.apache.kafka.common.Cluster) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) TopicPartition(org.apache.kafka.common.TopicPartition) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) PartitionInfo(org.apache.kafka.common.PartitionInfo) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) Test(org.junit.jupiter.api.Test)

Example 83 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class SmokeTestDriver method generate.

public static Map<String, Set<Integer>> generate(final String kafka, final int numKeys, final int maxRecordsPerKey, final Duration timeToSpend) {
    final Properties producerProps = generatorProperties(kafka);
    int numRecordsProduced = 0;
    final Map<String, Set<Integer>> allData = new HashMap<>();
    final ValueList[] data = new ValueList[numKeys];
    for (int i = 0; i < numKeys; i++) {
        data[i] = new ValueList(i, i + maxRecordsPerKey - 1);
        allData.put(data[i].key, new HashSet<>());
    }
    final Random rand = new Random();
    int remaining = data.length;
    final long recordPauseTime = timeToSpend.toMillis() / numKeys / maxRecordsPerKey;
    List<ProducerRecord<byte[], byte[]>> needRetry = new ArrayList<>();
    try (final KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(producerProps)) {
        while (remaining > 0) {
            final int index = rand.nextInt(remaining);
            final String key = data[index].key;
            final int value = data[index].next();
            if (value < 0) {
                remaining--;
                data[index] = data[remaining];
            } else {
                final ProducerRecord<byte[], byte[]> record = new ProducerRecord<>("data", stringSerde.serializer().serialize("", key), intSerde.serializer().serialize("", value));
                producer.send(record, new TestCallback(record, needRetry));
                numRecordsProduced++;
                allData.get(key).add(value);
                if (numRecordsProduced % 100 == 0) {
                    System.out.println(Instant.now() + " " + numRecordsProduced + " records produced");
                }
                Utils.sleep(Math.max(recordPauseTime, 2));
            }
        }
        producer.flush();
        int remainingRetries = 5;
        while (!needRetry.isEmpty()) {
            final List<ProducerRecord<byte[], byte[]>> needRetry2 = new ArrayList<>();
            for (final ProducerRecord<byte[], byte[]> record : needRetry) {
                System.out.println("retry producing " + stringSerde.deserializer().deserialize("", record.key()));
                producer.send(record, new TestCallback(record, needRetry2));
            }
            producer.flush();
            needRetry = needRetry2;
            if (--remainingRetries == 0 && !needRetry.isEmpty()) {
                System.err.println("Failed to produce all records after multiple retries");
                Exit.exit(1);
            }
        }
        // now that we've sent everything, we'll send some final records with a timestamp high enough to flush out
        // all suppressed records.
        final List<PartitionInfo> partitions = producer.partitionsFor("data");
        for (final PartitionInfo partition : partitions) {
            producer.send(new ProducerRecord<>(partition.topic(), partition.partition(), System.currentTimeMillis() + Duration.ofDays(2).toMillis(), stringSerde.serializer().serialize("", "flush"), intSerde.serializer().serialize("", 0)));
        }
    }
    return Collections.unmodifiableMap(allData);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Properties(java.util.Properties) Random(java.util.Random) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) PartitionInfo(org.apache.kafka.common.PartitionInfo)

Example 84 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class StreamThreadTest method shouldReturnStandbyTaskMetadataWhileRunningState.

@Test
public void shouldReturnStandbyTaskMetadataWhileRunningState() {
    internalStreamsBuilder.stream(Collections.singleton(topic1), consumed).groupByKey().count(Materialized.as("count-one"));
    internalStreamsBuilder.buildAndOptimizeTopology();
    final StreamThread thread = createStreamThread(CLIENT_ID, config, false);
    final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
    restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog", 0, null, new Node[0], new Node[0])));
    final HashMap<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(new TopicPartition("stream-thread-test-count-one-changelog", 1), 0L);
    restoreConsumer.updateEndOffsets(offsets);
    restoreConsumer.updateBeginningOffsets(offsets);
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
    final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
    // assign single partition
    standbyTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().handleAssignment(emptyMap(), standbyTasks);
    thread.rebalanceListener().onPartitionsAssigned(Collections.emptyList());
    thread.runOnce();
    final ThreadMetadata threadMetadata = thread.threadMetadata();
    assertEquals(StreamThread.State.RUNNING.name(), threadMetadata.threadState());
    assertTrue(threadMetadata.standbyTasks().contains(new TaskMetadataImpl(task1, Utils.mkSet(t1p1), new HashMap<>(), new HashMap<>(), Optional.empty())));
    assertTrue(threadMetadata.activeTasks().isEmpty());
    thread.taskManager().shutdown(true);
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) PartitionInfo(org.apache.kafka.common.PartitionInfo) Test(org.junit.Test)

Example 85 with PartitionInfo

use of org.apache.kafka.common.PartitionInfo in project kafka by apache.

the class StreamThreadTest method shouldRecoverFromInvalidOffsetExceptionOnRestoreAndFinishRestore.

@Test
public void shouldRecoverFromInvalidOffsetExceptionOnRestoreAndFinishRestore() throws Exception {
    internalStreamsBuilder.stream(Collections.singleton("topic"), consumed).groupByKey().count(Materialized.as("count"));
    internalStreamsBuilder.buildAndOptimizeTopology();
    final StreamThread thread = createStreamThread("clientId", config, false);
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    final MockConsumer<byte[], byte[]> mockRestoreConsumer = (MockConsumer<byte[], byte[]>) thread.restoreConsumer();
    final MockAdminClient mockAdminClient = (MockAdminClient) thread.adminClient();
    final TopicPartition topicPartition = new TopicPartition("topic", 0);
    final Set<TopicPartition> topicPartitionSet = Collections.singleton(topicPartition);
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final TaskId task0 = new TaskId(0, 0);
    activeTasks.put(task0, topicPartitionSet);
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    mockConsumer.updatePartitions("topic", Collections.singletonList(new PartitionInfo("topic", 0, null, new Node[0], new Node[0])));
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(topicPartition, 0L));
    mockRestoreConsumer.updatePartitions("stream-thread-test-count-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-changelog", 0, null, new Node[0], new Node[0])));
    final TopicPartition changelogPartition = new TopicPartition("stream-thread-test-count-changelog", 0);
    final Set<TopicPartition> changelogPartitionSet = Collections.singleton(changelogPartition);
    mockRestoreConsumer.updateBeginningOffsets(Collections.singletonMap(changelogPartition, 0L));
    mockAdminClient.updateEndOffsets(Collections.singletonMap(changelogPartition, 2L));
    mockConsumer.schedulePollTask(() -> {
        thread.setState(StreamThread.State.PARTITIONS_REVOKED);
        thread.rebalanceListener().onPartitionsAssigned(topicPartitionSet);
    });
    try {
        thread.start();
        TestUtils.waitForCondition(() -> mockRestoreConsumer.assignment().size() == 1, "Never get the assignment");
        mockRestoreConsumer.addRecord(new ConsumerRecord<>("stream-thread-test-count-changelog", 0, 0L, "K1".getBytes(), "V1".getBytes()));
        TestUtils.waitForCondition(() -> mockRestoreConsumer.position(changelogPartition) == 1L, "Never restore first record");
        mockRestoreConsumer.setPollException(new InvalidOffsetException("Try Again!") {

            @Override
            public Set<TopicPartition> partitions() {
                return changelogPartitionSet;
            }
        });
        // after handling the exception and reviving the task, the position
        // should be reset to the beginning.
        TestUtils.waitForCondition(() -> mockRestoreConsumer.position(changelogPartition) == 0L, "Never restore first record");
        mockRestoreConsumer.addRecord(new ConsumerRecord<>("stream-thread-test-count-changelog", 0, 0L, "K1".getBytes(), "V1".getBytes()));
        mockRestoreConsumer.addRecord(new ConsumerRecord<>("stream-thread-test-count-changelog", 0, 1L, "K2".getBytes(), "V2".getBytes()));
        TestUtils.waitForCondition(() -> {
            mockRestoreConsumer.assign(changelogPartitionSet);
            return mockRestoreConsumer.position(changelogPartition) == 2L;
        }, "Never finished restore");
    } finally {
        thread.shutdown();
        thread.join(10000);
    }
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) TopicPartition(org.apache.kafka.common.TopicPartition) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Test(org.junit.Test)

Aggregations

PartitionInfo (org.apache.kafka.common.PartitionInfo)227 TopicPartition (org.apache.kafka.common.TopicPartition)142 HashMap (java.util.HashMap)87 Node (org.apache.kafka.common.Node)85 Test (org.junit.Test)82 Cluster (org.apache.kafka.common.Cluster)80 ArrayList (java.util.ArrayList)73 HashSet (java.util.HashSet)67 Set (java.util.Set)38 Map (java.util.Map)34 Test (org.junit.jupiter.api.Test)31 List (java.util.List)30 TaskId (org.apache.kafka.streams.processor.TaskId)25 StreamsConfig (org.apache.kafka.streams.StreamsConfig)16 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)16 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)15 Properties (java.util.Properties)13 MockTime (org.apache.kafka.common.utils.MockTime)13 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)11 HostInfo (org.apache.kafka.streams.state.HostInfo)11