use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class ChangelogRecordDeserializationHelper method applyChecksAndUpdatePosition.
public static void applyChecksAndUpdatePosition(final ConsumerRecord<byte[], byte[]> record, final boolean consistencyEnabled, final Position position) {
if (!consistencyEnabled) {
return;
}
final Header versionHeader = record.headers().lastHeader(ChangelogRecordDeserializationHelper.CHANGELOG_VERSION_HEADER_KEY);
if (versionHeader == null) {
return;
} else {
switch(versionHeader.value()[0]) {
case 0:
final Header vectorHeader = record.headers().lastHeader(CHANGELOG_POSITION_HEADER_KEY);
if (vectorHeader == null) {
throw new StreamsException("This should not happen. Consistency is enabled but the changelog " + "contains records without consistency information.");
}
position.merge(PositionSerde.deserialize(ByteBuffer.wrap(vectorHeader.value())));
break;
default:
log.warn("Changelog records have been encoded using a larger version than this server understands." + "Please upgrade your server.");
}
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class StreamsProducer method maybeBeginTransaction.
private void maybeBeginTransaction() {
if (eosEnabled() && !transactionInFlight) {
try {
producer.beginTransaction();
transactionInFlight = true;
} catch (final ProducerFencedException | InvalidProducerEpochException error) {
throw new TaskMigratedException(formatException("Producer got fenced trying to begin a new transaction"), error);
} catch (final KafkaException error) {
throw new StreamsException(formatException("Error encountered trying to begin a new transaction"), error);
}
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class StreamThread method runLoop.
/**
* Main event loop for polling, and processing records through topologies.
*
* @throws IllegalStateException If store gets registered after initialized is already finished
* @throws StreamsException if the store's change log does not contain the partition
*/
// Needed to include StreamsConfig.EXACTLY_ONCE_BETA in error log for UnsupportedVersionException
@SuppressWarnings("deprecation")
boolean runLoop() {
subscribeConsumer();
// until the rebalance is completed before we close and commit the tasks
while (isRunning() || taskManager.isRebalanceInProgress()) {
try {
checkForTopologyUpdates();
// stop polling regardless of the rebalance status since we know there are no tasks left
if (!isRunning() && topologyMetadata.isEmpty()) {
log.info("Shutting down thread with empty topology.");
break;
}
maybeSendShutdown();
final long size = cacheResizeSize.getAndSet(-1L);
if (size != -1L) {
cacheResizer.accept(size);
}
runOnce();
if (nextProbingRebalanceMs.get() < time.milliseconds()) {
log.info("Triggering the followup rebalance scheduled for {} ms.", nextProbingRebalanceMs.get());
mainConsumer.enforceRebalance();
nextProbingRebalanceMs.set(Long.MAX_VALUE);
}
} catch (final TaskCorruptedException e) {
log.warn("Detected the states of tasks " + e.corruptedTasks() + " are corrupted. " + "Will close the task as dirty and re-create and bootstrap from scratch.", e);
try {
// check if any active task got corrupted. We will trigger a rebalance in that case.
// once the task corruptions have been handled
final boolean enforceRebalance = taskManager.handleCorruption(e.corruptedTasks());
if (enforceRebalance && eosEnabled) {
log.info("Active task(s) got corrupted. Triggering a rebalance.");
mainConsumer.enforceRebalance();
}
} catch (final TaskMigratedException taskMigrated) {
handleTaskMigrated(taskMigrated);
}
} catch (final TaskMigratedException e) {
handleTaskMigrated(e);
} catch (final UnsupportedVersionException e) {
final String errorMessage = e.getMessage();
if (errorMessage != null && errorMessage.startsWith("Broker unexpectedly doesn't support requireStable flag on version ")) {
log.error("Shutting down because the Kafka cluster seems to be on a too old version. " + "Setting {}=\"{}\"/\"{}\" requires broker version 2.5 or higher.", StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_V2, StreamsConfig.EXACTLY_ONCE_BETA);
}
failedStreamThreadSensor.record();
this.streamsUncaughtExceptionHandler.accept(new StreamsException(e), false);
return false;
} catch (final StreamsException e) {
throw e;
} catch (final Exception e) {
throw new StreamsException(e);
}
}
return true;
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class StreamThread method resetOffsets.
private void resetOffsets(final Set<TopicPartition> partitions, final Exception cause) {
final Set<String> loggedTopics = new HashSet<>();
final Set<TopicPartition> seekToBeginning = new HashSet<>();
final Set<TopicPartition> seekToEnd = new HashSet<>();
final Set<TopicPartition> notReset = new HashSet<>();
for (final TopicPartition partition : partitions) {
switch(topologyMetadata.offsetResetStrategy(partition.topic())) {
case EARLIEST:
addToResetList(partition, seekToBeginning, "Setting topic '{}' to consume from {} offset", "earliest", loggedTopics);
break;
case LATEST:
addToResetList(partition, seekToEnd, "Setting topic '{}' to consume from {} offset", "latest", loggedTopics);
break;
case NONE:
if ("earliest".equals(originalReset)) {
addToResetList(partition, seekToBeginning, "No custom setting defined for topic '{}' using original config '{}' for offset reset", "earliest", loggedTopics);
} else if ("latest".equals(originalReset)) {
addToResetList(partition, seekToEnd, "No custom setting defined for topic '{}' using original config '{}' for offset reset", "latest", loggedTopics);
} else {
notReset.add(partition);
}
break;
default:
throw new IllegalStateException("Unable to locate topic " + partition.topic() + " in the topology");
}
}
if (notReset.isEmpty()) {
if (!seekToBeginning.isEmpty()) {
mainConsumer.seekToBeginning(seekToBeginning);
}
if (!seekToEnd.isEmpty()) {
mainConsumer.seekToEnd(seekToEnd);
}
} else {
final String notResetString = notReset.stream().map(TopicPartition::topic).distinct().collect(Collectors.joining(","));
final String format = String.format("No valid committed offset found for input [%s] and no valid reset policy configured." + " You need to set configuration parameter \"auto.offset.reset\" or specify a topic specific reset " + "policy via StreamsBuilder#stream(..., Consumed.with(Topology.AutoOffsetReset)) or " + "StreamsBuilder#table(..., Consumed.with(Topology.AutoOffsetReset))", notResetString);
if (cause == null) {
throw new StreamsException(format);
} else {
throw new StreamsException(format, cause);
}
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class GlobalStreamThread method initialize.
private StateConsumer initialize() {
StateConsumer stateConsumer = null;
try {
final GlobalStateManager stateMgr = new GlobalStateManagerImpl(logContext, time, topology, globalConsumer, stateDirectory, stateRestoreListener, config);
final GlobalProcessorContextImpl globalProcessorContext = new GlobalProcessorContextImpl(config, stateMgr, streamsMetrics, cache, time);
stateMgr.setGlobalProcessorContext(globalProcessorContext);
stateConsumer = new StateConsumer(logContext, globalConsumer, new GlobalStateUpdateTask(logContext, topology, globalProcessorContext, stateMgr, config.defaultDeserializationExceptionHandler()), time, Duration.ofMillis(config.getLong(StreamsConfig.POLL_MS_CONFIG)), config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG));
try {
stateConsumer.initialize();
} catch (final InvalidOffsetException recoverableException) {
log.error("Bootstrapping global state failed due to inconsistent local state. Will attempt to clean up the local state. You can restart KafkaStreams to recover from this error.", recoverableException);
closeStateConsumer(stateConsumer, true);
throw new StreamsException("Bootstrapping global state failed. You can restart KafkaStreams to recover from this error.", recoverableException);
}
return stateConsumer;
} catch (final StreamsException fatalException) {
closeStateConsumer(stateConsumer, false);
startupException = fatalException;
} catch (final Exception fatalException) {
closeStateConsumer(stateConsumer, false);
startupException = new StreamsException("Exception caught during initialization of GlobalStreamThread", fatalException);
}
return null;
}
Aggregations