Search in sources :

Example 61 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class LeaderAndIsrResponse method toStruct.

@Override
protected Struct toStruct(short version) {
    Struct struct = new Struct(ApiKeys.LEADER_AND_ISR.responseSchema(version));
    List<Struct> responseDatas = new ArrayList<>(responses.size());
    for (Map.Entry<TopicPartition, Errors> response : responses.entrySet()) {
        Struct partitionData = struct.instance(PARTITIONS_KEY_NAME);
        TopicPartition partition = response.getKey();
        partitionData.set(PARTITIONS_TOPIC_KEY_NAME, partition.topic());
        partitionData.set(PARTITIONS_PARTITION_KEY_NAME, partition.partition());
        partitionData.set(PARTITIONS_ERROR_CODE_KEY_NAME, response.getValue().code());
        responseDatas.add(partitionData);
    }
    struct.set(PARTITIONS_KEY_NAME, responseDatas.toArray());
    struct.set(ERROR_CODE_KEY_NAME, error.code());
    return struct;
}
Also used : Errors(org.apache.kafka.common.protocol.Errors) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) Map(java.util.Map) HashMap(java.util.HashMap) Struct(org.apache.kafka.common.protocol.types.Struct)

Example 62 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class ListOffsetRequest method toStruct.

@Override
protected Struct toStruct() {
    short version = version();
    Struct struct = new Struct(ApiKeys.LIST_OFFSETS.requestSchema(version));
    Map<TopicPartition, ?> targetTimes = partitionTimestamps == null ? offsetData : partitionTimestamps;
    Map<String, Map<Integer, Object>> topicsData = CollectionUtils.groupDataByTopic(targetTimes);
    struct.set(REPLICA_ID_KEY_NAME, replicaId);
    List<Struct> topicArray = new ArrayList<>();
    for (Map.Entry<String, Map<Integer, Object>> topicEntry : topicsData.entrySet()) {
        Struct topicData = struct.instance(TOPICS_KEY_NAME);
        topicData.set(TOPIC_KEY_NAME, topicEntry.getKey());
        List<Struct> partitionArray = new ArrayList<>();
        for (Map.Entry<Integer, Object> partitionEntry : topicEntry.getValue().entrySet()) {
            if (version == 0) {
                PartitionData offsetPartitionData = (PartitionData) partitionEntry.getValue();
                Struct partitionData = topicData.instance(PARTITIONS_KEY_NAME);
                partitionData.set(PARTITION_KEY_NAME, partitionEntry.getKey());
                partitionData.set(TIMESTAMP_KEY_NAME, offsetPartitionData.timestamp);
                partitionData.set(MAX_NUM_OFFSETS_KEY_NAME, offsetPartitionData.maxNumOffsets);
                partitionArray.add(partitionData);
            } else {
                Long timestamp = (Long) partitionEntry.getValue();
                Struct partitionData = topicData.instance(PARTITIONS_KEY_NAME);
                partitionData.set(PARTITION_KEY_NAME, partitionEntry.getKey());
                partitionData.set(TIMESTAMP_KEY_NAME, timestamp);
                partitionArray.add(partitionData);
            }
        }
        topicData.set(PARTITIONS_KEY_NAME, partitionArray.toArray());
        topicArray.add(topicData);
    }
    struct.set(TOPICS_KEY_NAME, topicArray.toArray());
    return struct;
}
Also used : ArrayList(java.util.ArrayList) Struct(org.apache.kafka.common.protocol.types.Struct) TopicPartition(org.apache.kafka.common.TopicPartition) HashMap(java.util.HashMap) Map(java.util.Map)

Example 63 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class CollectionUtils method groupDataByTopic.

/**
     * group partitions by topic
     * @param partitions
     * @return partitions per topic
     */
public static Map<String, List<Integer>> groupDataByTopic(List<TopicPartition> partitions) {
    Map<String, List<Integer>> partitionsByTopic = new HashMap<>();
    for (TopicPartition tp : partitions) {
        String topic = tp.topic();
        List<Integer> topicData = partitionsByTopic.get(topic);
        if (topicData == null) {
            topicData = new ArrayList<>();
            partitionsByTopic.put(topic, topicData);
        }
        topicData.add(tp.partition());
    }
    return partitionsByTopic;
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) List(java.util.List) ArrayList(java.util.ArrayList)

Example 64 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class KafkaConsumerTest method testSeekNegative.

@Test(expected = IllegalArgumentException.class)
public void testSeekNegative() {
    Properties props = new Properties();
    props.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testSeekNegative");
    props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999");
    props.setProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName());
    KafkaConsumer<byte[], byte[]> consumer = newConsumer();
    try {
        consumer.assign(Arrays.asList(new TopicPartition("nonExistTopic", 0)));
        consumer.seek(new TopicPartition("nonExistTopic", 0), -1);
    } finally {
        consumer.close();
    }
}
Also used : MockMetricsReporter(org.apache.kafka.test.MockMetricsReporter) TopicPartition(org.apache.kafka.common.TopicPartition) Properties(java.util.Properties) Test(org.junit.Test)

Example 65 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class WorkerSinkTask method commitOffsets.

private void commitOffsets(long now, boolean closing) {
    if (currentOffsets.isEmpty())
        return;
    committing = true;
    commitSeqno += 1;
    commitStarted = now;
    final Map<TopicPartition, OffsetAndMetadata> taskProvidedOffsets;
    try {
        taskProvidedOffsets = task.preCommit(new HashMap<>(currentOffsets));
    } catch (Throwable t) {
        if (closing) {
            log.warn("{} Offset commit failed during close");
            onCommitCompleted(t, commitSeqno);
        } else {
            log.error("{} Offset commit failed, rewinding to last committed offsets", this, t);
            for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : lastCommittedOffsets.entrySet()) {
                log.debug("{} Rewinding topic partition {} to offset {}", id, entry.getKey(), entry.getValue().offset());
                consumer.seek(entry.getKey(), entry.getValue().offset());
            }
            currentOffsets = new HashMap<>(lastCommittedOffsets);
            onCommitCompleted(t, commitSeqno);
        }
        return;
    } finally {
        // Close the task if needed before committing the offsets.
        if (closing)
            task.close(currentOffsets.keySet());
    }
    if (taskProvidedOffsets.isEmpty()) {
        log.debug("{} Skipping offset commit, task opted-out", this);
        onCommitCompleted(null, commitSeqno);
        return;
    }
    final Map<TopicPartition, OffsetAndMetadata> commitableOffsets = new HashMap<>(lastCommittedOffsets);
    for (Map.Entry<TopicPartition, OffsetAndMetadata> taskProvidedOffsetEntry : taskProvidedOffsets.entrySet()) {
        final TopicPartition partition = taskProvidedOffsetEntry.getKey();
        final OffsetAndMetadata taskProvidedOffset = taskProvidedOffsetEntry.getValue();
        if (commitableOffsets.containsKey(partition)) {
            if (taskProvidedOffset.offset() <= currentOffsets.get(partition).offset()) {
                commitableOffsets.put(partition, taskProvidedOffset);
            } else {
                log.warn("Ignoring invalid task provided offset {}/{} -- not yet consumed", partition, taskProvidedOffset);
            }
        } else {
            log.warn("Ignoring invalid task provided offset {}/{} -- partition not assigned", partition, taskProvidedOffset);
        }
    }
    if (commitableOffsets.equals(lastCommittedOffsets)) {
        log.debug("{} Skipping offset commit, no change since last commit", this);
        onCommitCompleted(null, commitSeqno);
        return;
    }
    log.trace("{} Offsets to commit: {}", this, commitableOffsets);
    doCommit(commitableOffsets, closing, commitSeqno);
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

TopicPartition (org.apache.kafka.common.TopicPartition)1729 HashMap (java.util.HashMap)744 Test (org.junit.Test)519 ArrayList (java.util.ArrayList)416 Map (java.util.Map)361 Test (org.junit.jupiter.api.Test)347 HashSet (java.util.HashSet)281 List (java.util.List)260 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)246 Set (java.util.Set)189 LinkedHashMap (java.util.LinkedHashMap)180 PartitionInfo (org.apache.kafka.common.PartitionInfo)170 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)155 TaskId (org.apache.kafka.streams.processor.TaskId)145 Node (org.apache.kafka.common.Node)140 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)109 KafkaException (org.apache.kafka.common.KafkaException)105 Errors (org.apache.kafka.common.protocol.Errors)105 ByteBuffer (java.nio.ByteBuffer)99 Properties (java.util.Properties)93