Search in sources :

Example 21 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class KafkaBasedLog method start.

public void start() {
    log.info("Starting KafkaBasedLog with topic " + topic);
    producer = createProducer();
    consumer = createConsumer();
    List<TopicPartition> partitions = new ArrayList<>();
    // Until we have admin utilities we can use to check for the existence of this topic and create it if it is missing,
    // we rely on topic auto-creation
    List<PartitionInfo> partitionInfos = null;
    long started = time.milliseconds();
    while (partitionInfos == null && time.milliseconds() - started < CREATE_TOPIC_TIMEOUT_MS) {
        partitionInfos = consumer.partitionsFor(topic);
        Utils.sleep(Math.min(time.milliseconds() - started, 1000));
    }
    if (partitionInfos == null)
        throw new ConnectException("Could not look up partition metadata for offset backing store topic in" + " allotted period. This could indicate a connectivity issue, unavailable topic partitions, or if" + " this is your first use of the topic it may have taken too long to create.");
    for (PartitionInfo partition : partitionInfos) partitions.add(new TopicPartition(partition.topic(), partition.partition()));
    consumer.assign(partitions);
    readToLogEnd();
    thread = new WorkThread();
    thread.start();
    log.info("Finished reading KafkaBasedLog for topic " + topic);
    log.info("Started KafkaBasedLog for topic " + topic);
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) PartitionInfo(org.apache.kafka.common.PartitionInfo) ConnectException(org.apache.kafka.connect.errors.ConnectException)

Example 22 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class WorkerSinkTaskTest method testWakeupInCommitSyncCausesRetry.

@Test
public void testWakeupInCommitSyncCausesRetry() throws Exception {
    expectInitializeTask();
    expectPollInitialAssignment();
    expectConsumerPoll(1);
    expectConversionAndTransformation(1);
    sinkTask.put(EasyMock.<Collection<SinkRecord>>anyObject());
    EasyMock.expectLastCall();
    final List<TopicPartition> partitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2);
    final Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
    offsets.put(TOPIC_PARTITION, new OffsetAndMetadata(FIRST_OFFSET + 1));
    offsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET));
    sinkTask.preCommit(offsets);
    EasyMock.expectLastCall().andReturn(offsets);
    // first one raises wakeup
    consumer.commitSync(EasyMock.<Map<TopicPartition, OffsetAndMetadata>>anyObject());
    EasyMock.expectLastCall().andThrow(new WakeupException());
    // we should retry and complete the commit
    consumer.commitSync(EasyMock.<Map<TopicPartition, OffsetAndMetadata>>anyObject());
    EasyMock.expectLastCall();
    sinkTask.close(new HashSet<>(partitions));
    EasyMock.expectLastCall();
    EasyMock.expect(consumer.position(TOPIC_PARTITION)).andReturn(FIRST_OFFSET);
    EasyMock.expect(consumer.position(TOPIC_PARTITION2)).andReturn(FIRST_OFFSET);
    sinkTask.open(partitions);
    EasyMock.expectLastCall();
    EasyMock.expect(consumer.poll(EasyMock.anyLong())).andAnswer(new IAnswer<ConsumerRecords<byte[], byte[]>>() {

        @Override
        public ConsumerRecords<byte[], byte[]> answer() throws Throwable {
            rebalanceListener.getValue().onPartitionsRevoked(partitions);
            rebalanceListener.getValue().onPartitionsAssigned(partitions);
            return ConsumerRecords.empty();
        }
    });
    EasyMock.expect(consumer.assignment()).andReturn(new HashSet<>(partitions));
    consumer.resume(Collections.singleton(TOPIC_PARTITION));
    EasyMock.expectLastCall();
    consumer.resume(Collections.singleton(TOPIC_PARTITION2));
    EasyMock.expectLastCall();
    statusListener.onResume(taskId);
    EasyMock.expectLastCall();
    PowerMock.replayAll();
    workerTask.initialize(TASK_CONFIG);
    workerTask.initializeAndStart();
    // poll for initial assignment
    workerTask.iteration();
    // first record delivered
    workerTask.iteration();
    // now rebalance with the wakeup triggered
    workerTask.iteration();
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) SinkRecord(org.apache.kafka.connect.sink.SinkRecord) WakeupException(org.apache.kafka.common.errors.WakeupException) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 23 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class WorkerSinkTask method rewind.

private void rewind() {
    Map<TopicPartition, Long> offsets = context.offsets();
    if (offsets.isEmpty()) {
        return;
    }
    for (Map.Entry<TopicPartition, Long> entry : offsets.entrySet()) {
        TopicPartition tp = entry.getKey();
        Long offset = entry.getValue();
        if (offset != null) {
            log.trace("Rewind {} to offset {}.", tp, offset);
            consumer.seek(tp, offset);
            lastCommittedOffsets.put(tp, new OffsetAndMetadata(offset));
            currentOffsets.put(tp, new OffsetAndMetadata(offset));
        } else {
            log.warn("Cannot rewind {} to null offset.", tp);
        }
    }
    context.clearOffsets();
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) HashMap(java.util.HashMap) Map(java.util.Map)

Example 24 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class WorkerSinkTask method commitOffsets.

private void commitOffsets(long now, boolean closing) {
    if (currentOffsets.isEmpty())
        return;
    committing = true;
    commitSeqno += 1;
    commitStarted = now;
    final Map<TopicPartition, OffsetAndMetadata> taskProvidedOffsets;
    try {
        taskProvidedOffsets = task.preCommit(new HashMap<>(currentOffsets));
    } catch (Throwable t) {
        if (closing) {
            log.warn("{} Offset commit failed during close");
            onCommitCompleted(t, commitSeqno);
        } else {
            log.error("{} Offset commit failed, rewinding to last committed offsets", this, t);
            for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : lastCommittedOffsets.entrySet()) {
                log.debug("{} Rewinding topic partition {} to offset {}", id, entry.getKey(), entry.getValue().offset());
                consumer.seek(entry.getKey(), entry.getValue().offset());
            }
            currentOffsets = new HashMap<>(lastCommittedOffsets);
            onCommitCompleted(t, commitSeqno);
        }
        return;
    } finally {
        // Close the task if needed before committing the offsets.
        if (closing)
            task.close(currentOffsets.keySet());
    }
    if (taskProvidedOffsets.isEmpty()) {
        log.debug("{} Skipping offset commit, task opted-out", this);
        onCommitCompleted(null, commitSeqno);
        return;
    }
    final Map<TopicPartition, OffsetAndMetadata> commitableOffsets = new HashMap<>(lastCommittedOffsets);
    for (Map.Entry<TopicPartition, OffsetAndMetadata> taskProvidedOffsetEntry : taskProvidedOffsets.entrySet()) {
        final TopicPartition partition = taskProvidedOffsetEntry.getKey();
        final OffsetAndMetadata taskProvidedOffset = taskProvidedOffsetEntry.getValue();
        if (commitableOffsets.containsKey(partition)) {
            if (taskProvidedOffset.offset() <= currentOffsets.get(partition).offset()) {
                commitableOffsets.put(partition, taskProvidedOffset);
            } else {
                log.warn("Ignoring invalid task provided offset {}/{} -- not yet consumed", partition, taskProvidedOffset);
            }
        } else {
            log.warn("Ignoring invalid task provided offset {}/{} -- partition not assigned", partition, taskProvidedOffset);
        }
    }
    if (commitableOffsets.equals(lastCommittedOffsets)) {
        log.debug("{} Skipping offset commit, no change since last commit", this);
        onCommitCompleted(null, commitSeqno);
        return;
    }
    log.trace("{} Offsets to commit: {}", this, commitableOffsets);
    doCommit(commitableOffsets, closing, commitSeqno);
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) HashMap(java.util.HashMap) Map(java.util.Map)

Example 25 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class StreamsMetadataState method getStreamsMetadataForKey.

private <K> StreamsMetadata getStreamsMetadataForKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner, final SourceTopicsInfo sourceTopicsInfo) {
    final Integer partition = partitioner.partition(key, null, sourceTopicsInfo.maxPartitions);
    final Set<TopicPartition> matchingPartitions = new HashSet<>();
    for (String sourceTopic : sourceTopicsInfo.sourceTopics) {
        matchingPartitions.add(new TopicPartition(sourceTopic, partition));
    }
    for (StreamsMetadata streamsMetadata : allMetadata) {
        final Set<String> stateStoreNames = streamsMetadata.stateStoreNames();
        final Set<TopicPartition> topicPartitions = new HashSet<>(streamsMetadata.topicPartitions());
        topicPartitions.retainAll(matchingPartitions);
        if (stateStoreNames.contains(storeName) && !topicPartitions.isEmpty()) {
            return streamsMetadata;
        }
    }
    return null;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) StreamsMetadata(org.apache.kafka.streams.state.StreamsMetadata) HashSet(java.util.HashSet)

Aggregations

TopicPartition (org.apache.kafka.common.TopicPartition)243 HashMap (java.util.HashMap)128 Test (org.junit.Test)96 Map (java.util.Map)59 ArrayList (java.util.ArrayList)45 HashSet (java.util.HashSet)43 LinkedHashMap (java.util.LinkedHashMap)39 TaskId (org.apache.kafka.streams.processor.TaskId)33 PartitionInfo (org.apache.kafka.common.PartitionInfo)30 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)28 Set (java.util.Set)27 List (java.util.List)23 Metrics (org.apache.kafka.common.metrics.Metrics)20 StreamsConfig (org.apache.kafka.streams.StreamsConfig)19 Node (org.apache.kafka.common.Node)18 Properties (java.util.Properties)17 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)17 MockTime (org.apache.kafka.common.utils.MockTime)17 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)14 Collection (java.util.Collection)13