Search in sources :

Example 76 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class ChangelogRecordDeserializationHelper method applyChecksAndUpdatePosition.

public static void applyChecksAndUpdatePosition(final ConsumerRecord<byte[], byte[]> record, final boolean consistencyEnabled, final Position position) {
    if (!consistencyEnabled) {
        return;
    }
    final Header versionHeader = record.headers().lastHeader(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_KEY);
    if (versionHeader == null) {
        return;
    } else {
        switch(versionHeader.value()[0]) {
            case 0:
                final Header vectorHeader = record.headers().lastHeader(CHANGELOG_POSITION_HEADER_KEY);
                if (vectorHeader == null) {
                    throw new StreamsException("This should not happen. Consistency is enabled but the changelog " + "contains records without consistency information.");
                }
                position.merge(PositionSerde.deserialize(ByteBuffer.wrap(vectorHeader.value())));
                break;
            default:
                log.warn("Changelog records have been encoded using a larger version than this server understands." + "Please upgrade your server.");
        }
    }
}
Also used : Header(org.apache.kafka.common.header.Header) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) StreamsException(org.apache.kafka.streams.errors.StreamsException)

Example 77 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class StreamsProducer method maybeBeginTransaction.

private void maybeBeginTransaction() {
    if (eosEnabled() && !transactionInFlight) {
        try {
            producer.beginTransaction();
            transactionInFlight = true;
        } catch (final ProducerFencedException | InvalidProducerEpochException error) {
            throw new TaskMigratedException(formatException("Producer got fenced trying to begin a new transaction"), error);
        } catch (final KafkaException error) {
            throw new StreamsException(formatException("Error encountered trying to begin a new transaction"), error);
        }
    }
}
Also used : InvalidProducerEpochException(org.apache.kafka.common.errors.InvalidProducerEpochException) StreamsException(org.apache.kafka.streams.errors.StreamsException) KafkaException(org.apache.kafka.common.KafkaException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException)

Example 78 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class StreamThread method runLoop.

/**
 * Main event loop for polling, and processing records through topologies.
 *
 * @throws IllegalStateException If store gets registered after initialized is already finished
 * @throws StreamsException      if the store's change log does not contain the partition
 */
// Needed to include StreamsConfig.EXACTLY_ONCE_BETA in error log for UnsupportedVersionException
@SuppressWarnings("deprecation")
boolean runLoop() {
    subscribeConsumer();
    // until the rebalance is completed before we close and commit the tasks
    while (isRunning() || taskManager.isRebalanceInProgress()) {
        try {
            checkForTopologyUpdates();
            // stop polling regardless of the rebalance status since we know there are no tasks left
            if (!isRunning() && topologyMetadata.isEmpty()) {
                log.info("Shutting down thread with empty topology.");
                break;
            }
            maybeSendShutdown();
            final long size = cacheResizeSize.getAndSet(-1L);
            if (size != -1L) {
                cacheResizer.accept(size);
            }
            runOnce();
            if (nextProbingRebalanceMs.get() < time.milliseconds()) {
                log.info("Triggering the followup rebalance scheduled for {} ms.", nextProbingRebalanceMs.get());
                mainConsumer.enforceRebalance();
                nextProbingRebalanceMs.set(Long.MAX_VALUE);
            }
        } catch (final TaskCorruptedException e) {
            log.warn("Detected the states of tasks " + e.corruptedTasks() + " are corrupted. " + "Will close the task as dirty and re-create and bootstrap from scratch.", e);
            try {
                // check if any active task got corrupted. We will trigger a rebalance in that case.
                // once the task corruptions have been handled
                final boolean enforceRebalance = taskManager.handleCorruption(e.corruptedTasks());
                if (enforceRebalance && eosEnabled) {
                    log.info("Active task(s) got corrupted. Triggering a rebalance.");
                    mainConsumer.enforceRebalance();
                }
            } catch (final TaskMigratedException taskMigrated) {
                handleTaskMigrated(taskMigrated);
            }
        } catch (final TaskMigratedException e) {
            handleTaskMigrated(e);
        } catch (final UnsupportedVersionException e) {
            final String errorMessage = e.getMessage();
            if (errorMessage != null && errorMessage.startsWith("Broker unexpectedly doesn't support requireStable flag on version ")) {
                log.error("Shutting down because the Kafka cluster seems to be on a too old version. " + "Setting {}=\"{}\"/\"{}\" requires broker version 2.5 or higher.", StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2, StreamsConfig.EXACTLY_ONCE_BETA);
            }
            failedStreamThreadSensor.record();
            this.streamsUncaughtExceptionHandler.accept(new StreamsException(e), false);
            return false;
        } catch (final StreamsException e) {
            throw e;
        } catch (final Exception e) {
            throw new StreamsException(e);
        }
    }
    return true;
}
Also used : TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) StreamsException(org.apache.kafka.streams.errors.StreamsException) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException)

Example 79 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class StreamThread method resetOffsets.

private void resetOffsets(final Set<TopicPartition> partitions, final Exception cause) {
    final Set<String> loggedTopics = new HashSet<>();
    final Set<TopicPartition> seekToBeginning = new HashSet<>();
    final Set<TopicPartition> seekToEnd = new HashSet<>();
    final Set<TopicPartition> notReset = new HashSet<>();
    for (final TopicPartition partition : partitions) {
        switch(topologyMetadata.offsetResetStrategy(partition.topic())) {
            case EARLIEST:
                addToResetList(partition, seekToBeginning, "Setting topic '{}' to consume from {} offset", "earliest", loggedTopics);
                break;
            case LATEST:
                addToResetList(partition, seekToEnd, "Setting topic '{}' to consume from {} offset", "latest", loggedTopics);
                break;
            case NONE:
                if ("earliest".equals(originalReset)) {
                    addToResetList(partition, seekToBeginning, "No custom setting defined for topic '{}' using original config '{}' for offset reset", "earliest", loggedTopics);
                } else if ("latest".equals(originalReset)) {
                    addToResetList(partition, seekToEnd, "No custom setting defined for topic '{}' using original config '{}' for offset reset", "latest", loggedTopics);
                } else {
                    notReset.add(partition);
                }
                break;
            default:
                throw new IllegalStateException("Unable to locate topic " + partition.topic() + " in the topology");
        }
    }
    if (notReset.isEmpty()) {
        if (!seekToBeginning.isEmpty()) {
            mainConsumer.seekToBeginning(seekToBeginning);
        }
        if (!seekToEnd.isEmpty()) {
            mainConsumer.seekToEnd(seekToEnd);
        }
    } else {
        final String notResetString = notReset.stream().map(TopicPartition::topic).distinct().collect(Collectors.joining(","));
        final String format = String.format("No valid committed offset found for input [%s] and no valid reset policy configured." + " You need to set configuration parameter \"auto.offset.reset\" or specify a topic specific reset " + "policy via StreamsBuilder#stream(..., Consumed.with(Topology.AutoOffsetReset)) or " + "StreamsBuilder#table(..., Consumed.with(Topology.AutoOffsetReset))", notResetString);
        if (cause == null) {
            throw new StreamsException(format);
        } else {
            throw new StreamsException(format, cause);
        }
    }
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) StreamsException(org.apache.kafka.streams.errors.StreamsException) HashSet(java.util.HashSet)

Example 80 with StreamsException

use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.

the class GlobalStreamThread method initialize.

private StateConsumer initialize() {
    StateConsumer stateConsumer = null;
    try {
        final GlobalStateManager stateMgr = new GlobalStateManagerImpl(logContext, time, topology, globalConsumer, stateDirectory, stateRestoreListener, config);
        final GlobalProcessorContextImpl globalProcessorContext = new GlobalProcessorContextImpl(config, stateMgr, streamsMetrics, cache, time);
        stateMgr.setGlobalProcessorContext(globalProcessorContext);
        stateConsumer = new StateConsumer(logContext, globalConsumer, new GlobalStateUpdateTask(logContext, topology, globalProcessorContext, stateMgr, config.defaultDeserializationExceptionHandler()), time, Duration.ofMillis(config.getLong(StreamsConfig.POLL_MS_CONFIG)), config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG));
        try {
            stateConsumer.initialize();
        } catch (final InvalidOffsetException recoverableException) {
            log.error("Bootstrapping global state failed due to inconsistent local state. Will attempt to clean up the local state. You can restart KafkaStreams to recover from this error.", recoverableException);
            closeStateConsumer(stateConsumer, true);
            throw new StreamsException("Bootstrapping global state failed. You can restart KafkaStreams to recover from this error.", recoverableException);
        }
        return stateConsumer;
    } catch (final StreamsException fatalException) {
        closeStateConsumer(stateConsumer, false);
        startupException = fatalException;
    } catch (final Exception fatalException) {
        closeStateConsumer(stateConsumer, false);
        startupException = new StreamsException("Exception caught during initialization of GlobalStreamThread", fatalException);
    }
    return null;
}
Also used : StreamsException(org.apache.kafka.streams.errors.StreamsException) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) StreamsException(org.apache.kafka.streams.errors.StreamsException) IOException(java.io.IOException) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException)

Aggregations

StreamsException (org.apache.kafka.streams.errors.StreamsException)186 Test (org.junit.Test)90 KafkaException (org.apache.kafka.common.KafkaException)41 TopicPartition (org.apache.kafka.common.TopicPartition)38 TimeoutException (org.apache.kafka.common.errors.TimeoutException)36 HashMap (java.util.HashMap)27 Map (java.util.Map)25 HashSet (java.util.HashSet)18 Properties (java.util.Properties)17 TaskId (org.apache.kafka.streams.processor.TaskId)14 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)13 StreamsConfig (org.apache.kafka.streams.StreamsConfig)12 ArrayList (java.util.ArrayList)11 ExecutionException (java.util.concurrent.ExecutionException)11 TaskMigratedException (org.apache.kafka.streams.errors.TaskMigratedException)11 IOException (java.io.IOException)10 Set (java.util.Set)10 LogContext (org.apache.kafka.common.utils.LogContext)10 MockTime (org.apache.kafka.common.utils.MockTime)10 StateStore (org.apache.kafka.streams.processor.StateStore)10