Search in sources :

Example 6 with CommitFailedException

use of org.apache.kafka.clients.consumer.CommitFailedException in project apache-kafka-on-k8s by banzaicloud.

the class StreamTask method commitOffsets.

/**
 * @throws TaskMigratedException if committing offsets failed (non-EOS)
 *                               or if the task producer got fenced (EOS)
 */
private void commitOffsets(final boolean startNewTransaction) {
    try {
        if (commitOffsetNeeded) {
            log.trace("Committing offsets");
            final Map<TopicPartition, OffsetAndMetadata> consumedOffsetsAndMetadata = new HashMap<>(consumedOffsets.size());
            for (final Map.Entry<TopicPartition, Long> entry : consumedOffsets.entrySet()) {
                final TopicPartition partition = entry.getKey();
                final long offset = entry.getValue() + 1;
                consumedOffsetsAndMetadata.put(partition, new OffsetAndMetadata(offset));
                stateMgr.putOffsetLimit(partition, offset);
            }
            if (eosEnabled) {
                producer.sendOffsetsToTransaction(consumedOffsetsAndMetadata, applicationId);
                producer.commitTransaction();
                transactionInFlight = false;
                if (startNewTransaction) {
                    transactionInFlight = true;
                    producer.beginTransaction();
                }
            } else {
                consumer.commitSync(consumedOffsetsAndMetadata);
            }
            commitOffsetNeeded = false;
        } else if (eosEnabled && !startNewTransaction && transactionInFlight) {
            // need to make sure to commit txn for suspend case
            producer.commitTransaction();
            transactionInFlight = false;
        }
    } catch (final CommitFailedException | ProducerFencedException fatal) {
        throw new TaskMigratedException(this, fatal);
    }
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) HashMap(java.util.HashMap) Map(java.util.Map) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException)

Example 7 with CommitFailedException

use of org.apache.kafka.clients.consumer.CommitFailedException in project kafka by apache.

the class TransactionManagerTest method testUnknownMemberIdInTxnOffsetCommitByGroupMetadata.

@Test
public void testUnknownMemberIdInTxnOffsetCommitByGroupMetadata() {
    final TopicPartition tp = new TopicPartition("foo", 0);
    final String unknownMemberId = "unknownMember";
    doInitTransactions();
    transactionManager.beginTransaction();
    TransactionalRequestResult sendOffsetsResult = transactionManager.sendOffsetsToTransaction(singletonMap(tp, new OffsetAndMetadata(39L)), new ConsumerGroupMetadata(consumerGroupId, 5, unknownMemberId, Optional.empty()));
    prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
    prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
    runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
    client.prepareResponse(request -> {
        TxnOffsetCommitRequest txnOffsetCommitRequest = (TxnOffsetCommitRequest) request;
        assertEquals(consumerGroupId, txnOffsetCommitRequest.data().groupId());
        assertEquals(producerId, txnOffsetCommitRequest.data().producerId());
        assertEquals(epoch, txnOffsetCommitRequest.data().producerEpoch());
        return !txnOffsetCommitRequest.data().memberId().equals(memberId);
    }, new TxnOffsetCommitResponse(0, singletonMap(tp, Errors.UNKNOWN_MEMBER_ID)));
    runUntil(transactionManager::hasError);
    assertTrue(transactionManager.lastError() instanceof CommitFailedException);
    assertTrue(sendOffsetsResult.isCompleted());
    assertFalse(sendOffsetsResult.isSuccessful());
    assertTrue(sendOffsetsResult.error() instanceof CommitFailedException);
    assertAbortableError(CommitFailedException.class);
}
Also used : ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) TxnOffsetCommitResponse(org.apache.kafka.common.requests.TxnOffsetCommitResponse) TxnOffsetCommitRequest(org.apache.kafka.common.requests.TxnOffsetCommitRequest) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) Test(org.junit.jupiter.api.Test)

Example 8 with CommitFailedException

use of org.apache.kafka.clients.consumer.CommitFailedException in project kafka by apache.

the class TransactionManagerTest method testIllegalGenerationInTxnOffsetCommitByGroupMetadata.

@Test
public void testIllegalGenerationInTxnOffsetCommitByGroupMetadata() {
    final TopicPartition tp = new TopicPartition("foo", 0);
    final int illegalGenerationId = 1;
    doInitTransactions();
    transactionManager.beginTransaction();
    TransactionalRequestResult sendOffsetsResult = transactionManager.sendOffsetsToTransaction(singletonMap(tp, new OffsetAndMetadata(39L)), new ConsumerGroupMetadata(consumerGroupId, illegalGenerationId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Optional.empty()));
    prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
    prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
    runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
    prepareTxnOffsetCommitResponse(consumerGroupId, producerId, epoch, singletonMap(tp, Errors.ILLEGAL_GENERATION));
    client.prepareResponse(request -> {
        TxnOffsetCommitRequest txnOffsetCommitRequest = (TxnOffsetCommitRequest) request;
        assertEquals(consumerGroupId, txnOffsetCommitRequest.data().groupId());
        assertEquals(producerId, txnOffsetCommitRequest.data().producerId());
        assertEquals(epoch, txnOffsetCommitRequest.data().producerEpoch());
        return txnOffsetCommitRequest.data().generationId() != generationId;
    }, new TxnOffsetCommitResponse(0, singletonMap(tp, Errors.ILLEGAL_GENERATION)));
    runUntil(transactionManager::hasError);
    assertTrue(transactionManager.lastError() instanceof CommitFailedException);
    assertTrue(sendOffsetsResult.isCompleted());
    assertFalse(sendOffsetsResult.isSuccessful());
    assertTrue(sendOffsetsResult.error() instanceof CommitFailedException);
    assertAbortableError(CommitFailedException.class);
}
Also used : ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) TxnOffsetCommitResponse(org.apache.kafka.common.requests.TxnOffsetCommitResponse) TxnOffsetCommitRequest(org.apache.kafka.common.requests.TxnOffsetCommitRequest) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) Test(org.junit.jupiter.api.Test)

Example 9 with CommitFailedException

use of org.apache.kafka.clients.consumer.CommitFailedException in project kafka by apache.

the class StreamsProducer method commitTransaction.

/**
 * @throws IllegalStateException if EOS is disabled
 * @throws TaskMigratedException
 */
protected void commitTransaction(final Map<TopicPartition, OffsetAndMetadata> offsets, final ConsumerGroupMetadata consumerGroupMetadata) {
    if (!eosEnabled()) {
        throw new IllegalStateException(formatException("Exactly-once is not enabled"));
    }
    maybeBeginTransaction();
    try {
        // EOS-v2 assumes brokers are on version 2.5+ and thus can understand the full set of consumer group metadata
        // Thus if we are using EOS-v1 and can't make this assumption, we must downgrade the request to include only the group id metadata
        final ConsumerGroupMetadata maybeDowngradedGroupMetadata = processingMode == EXACTLY_ONCE_V2 ? consumerGroupMetadata : new ConsumerGroupMetadata(consumerGroupMetadata.groupId());
        producer.sendOffsetsToTransaction(offsets, maybeDowngradedGroupMetadata);
        producer.commitTransaction();
        transactionInFlight = false;
    } catch (final ProducerFencedException | InvalidProducerEpochException | CommitFailedException error) {
        throw new TaskMigratedException(formatException("Producer got fenced trying to commit a transaction"), error);
    } catch (final TimeoutException timeoutException) {
        // re-throw to trigger `task.timeout.ms`
        throw timeoutException;
    } catch (final KafkaException error) {
        throw new StreamsException(formatException("Error encountered trying to commit a transaction"), error);
    }
}
Also used : InvalidProducerEpochException(org.apache.kafka.common.errors.InvalidProducerEpochException) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) StreamsException(org.apache.kafka.streams.errors.StreamsException) KafkaException(org.apache.kafka.common.KafkaException) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 10 with CommitFailedException

use of org.apache.kafka.clients.consumer.CommitFailedException in project streamsx.kafka by IBMStreams.

the class AbstractKafkaConsumerClient method commitOffsets.

/**
 * Commits the offsets given in the map of the CommitInfo instance with the given controls set within the object.
 * This method must only be invoked by the thread that runs the poll loop.
 *
 * @param offsets the offsets per topic partition and control information. The offsets must be the last processed offsets +1.
 * @throws InterruptedException The thread has been interrupted while committing synchronously
 * @throws RuntimeException  All other kinds of unrecoverable exceptions
 */
protected void commitOffsets(CommitInfo offsets) throws RuntimeException {
    final Map<TopicPartition, OffsetAndMetadata> offsetMap = offsets.getMap();
    if (logger.isEnabledFor(DEBUG_LEVEL)) {
        logger.log(DEBUG_LEVEL, "Going to commit offsets: " + offsets);
        if (offsetMap.isEmpty()) {
            logger.debug("no offsets to commit ...");
        }
    }
    if (offsetMap.isEmpty()) {
        return;
    }
    // we can only commit assigned partitions
    Set<TopicPartition> currentAssignment = getConsumer().assignment();
    if (offsets.isCommitPartitionWise()) {
        Map<TopicPartition, OffsetAndMetadata> map = new HashMap<>(1);
        for (TopicPartition tp : offsetMap.keySet()) {
            // do not commit for partitions we are not assigned
            if (!currentAssignment.contains(tp)) {
                continue;
            }
            map.clear();
            map.put(tp, offsetMap.get(tp));
            if (offsets.isCommitSynchronous()) {
                try {
                    consumer.commitSync(map);
                    postOffsetCommit(map);
                } catch (CommitFailedException e) {
                    // the commit failed and cannot be retried. This can only occur if you are using
                    // automatic group management with subscribe(Collection), or if there is an active
                    // group with the same groupId which is using group management.
                    logger.error(Messages.getString("OFFSET_COMMIT_FAILED_FOR_PARTITION", tp, e.getLocalizedMessage()));
                    if (offsets.isThrowOnSynchronousCommitFailure()) {
                        // we usually want the offsets really have committed or restart operator, for example when in a CR
                        throw new ConsumerCommitFailedException(e.getMessage(), e);
                    }
                }
            } else {
                consumer.commitAsync(map, this);
            }
        }
    } else {
        Map<TopicPartition, OffsetAndMetadata> map = new HashMap<>();
        offsetMap.forEach((tp, offsMeta) -> {
            if (currentAssignment.contains(tp)) {
                map.put(tp, offsMeta);
            }
        });
        if (map.isEmpty()) {
            logger.log(DEBUG_LEVEL, "no offsets to commit ... (partitions not assigned)");
            return;
        }
        if (offsets.isCommitSynchronous()) {
            try {
                consumer.commitSync(map);
                postOffsetCommit(map);
            } catch (CommitFailedException e) {
                // if the commit failed and cannot be retried. This can only occur if you are using
                // automatic group management with subscribe(Collection), or if there is an active
                // group with the same groupId which is using group management.
                logger.error(Messages.getString("OFFSET_COMMIT_FAILED", e.getLocalizedMessage()));
                if (offsets.isThrowOnSynchronousCommitFailure()) {
                    // we usually want the offsets really have committed or restart operator, for example when in a CR
                    throw new ConsumerCommitFailedException(e.getMessage(), e);
                }
            }
        } else {
            consumer.commitAsync(map, this);
        }
    }
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) ConsumerCommitFailedException(com.ibm.streamsx.kafka.ConsumerCommitFailedException) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) ConsumerCommitFailedException(com.ibm.streamsx.kafka.ConsumerCommitFailedException) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException)

Aggregations

CommitFailedException (org.apache.kafka.clients.consumer.CommitFailedException)10 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)9 TopicPartition (org.apache.kafka.common.TopicPartition)9 HashMap (java.util.HashMap)6 Map (java.util.Map)5 TaskMigratedException (org.apache.kafka.streams.errors.TaskMigratedException)4 ConsumerGroupMetadata (org.apache.kafka.clients.consumer.ConsumerGroupMetadata)3 RetriableCommitFailedException (org.apache.kafka.clients.consumer.RetriableCommitFailedException)2 KafkaException (org.apache.kafka.common.KafkaException)2 Node (org.apache.kafka.common.Node)2 ProducerFencedException (org.apache.kafka.common.errors.ProducerFencedException)2 TimeoutException (org.apache.kafka.common.errors.TimeoutException)2 OffsetCommitRequest (org.apache.kafka.common.requests.OffsetCommitRequest)2 TxnOffsetCommitRequest (org.apache.kafka.common.requests.TxnOffsetCommitRequest)2 TxnOffsetCommitResponse (org.apache.kafka.common.requests.TxnOffsetCommitResponse)2 StreamsException (org.apache.kafka.streams.errors.StreamsException)2 Test (org.junit.jupiter.api.Test)2 ConsumerCommitFailedException (com.ibm.streamsx.kafka.ConsumerCommitFailedException)1 ArrayList (java.util.ArrayList)1 Collection (java.util.Collection)1