use of org.apache.kafka.streams.errors.ProcessorStateException in project apache-kafka-on-k8s by banzaicloud.
the class AbstractTask method updateOffsetLimits.
protected void updateOffsetLimits() {
for (final TopicPartition partition : partitions) {
try {
// TODO: batch API?
final OffsetAndMetadata metadata = consumer.committed(partition);
final long offset = metadata != null ? metadata.offset() : 0L;
stateMgr.putOffsetLimit(partition, offset);
if (log.isTraceEnabled()) {
log.trace("Updating store offset limits {} for changelog {}", offset, partition);
}
} catch (final AuthorizationException e) {
throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e);
} catch (final WakeupException e) {
throw e;
} catch (final KafkaException e) {
throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e);
}
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project apache-kafka-on-k8s by banzaicloud.
the class Segments method renameSegmentFile.
private void renameSegmentFile(final File parent, final String segmentName, final long segmentId) {
final File newName = new File(parent, segmentName(segmentId));
final File oldName = new File(parent, segmentName);
if (!oldName.renameTo(newName)) {
throw new ProcessorStateException("Unable to rename old style segment from: " + oldName + " to new name: " + newName);
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project apache-kafka-on-k8s by banzaicloud.
the class StandbyTaskTest method shouldCloseStateMangerOnTaskCloseWhenCommitFailed.
@Test
public void shouldCloseStateMangerOnTaskCloseWhenCommitFailed() throws Exception {
consumer.assign(Utils.mkList(globalTopicPartition));
final Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
committedOffsets.put(new TopicPartition(globalTopicPartition.topic(), globalTopicPartition.partition()), new OffsetAndMetadata(100L));
consumer.commitSync(committedOffsets);
restoreStateConsumer.updatePartitions(globalStoreName, Utils.mkList(new PartitionInfo(globalStoreName, 0, Node.noNode(), new Node[0], new Node[0])));
final StreamsConfig config = createConfig(baseDir);
final AtomicBoolean closedStateManager = new AtomicBoolean(false);
final StandbyTask task = new StandbyTask(taskId, ktablePartitions, ktableTopology, consumer, changelogReader, config, null, stateDirectory) {
@Override
public void commit() {
throw new RuntimeException("KABOOM!");
}
@Override
void closeStateManager(final boolean writeCheckpoint) throws ProcessorStateException {
closedStateManager.set(true);
}
};
task.initializeStateStores();
try {
task.close(true, false);
fail("should have thrown exception");
} catch (Exception e) {
// expected
}
assertTrue(closedStateManager.get());
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class GlobalStateManagerImpl method flush.
@Override
public void flush() {
log.debug("Flushing all global globalStores registered in the state manager");
for (final Map.Entry<String, Optional<StateStore>> entry : globalStores.entrySet()) {
if (entry.getValue().isPresent()) {
final StateStore store = entry.getValue().get();
try {
log.trace("Flushing global store={}", store.name());
store.flush();
} catch (final RuntimeException e) {
throw new ProcessorStateException(String.format("Failed to flush global state store %s", store.name()), e);
}
} else {
throw new IllegalStateException("Expected " + entry.getKey() + " to have been initialized");
}
}
}
use of org.apache.kafka.streams.errors.ProcessorStateException in project kafka by apache.
the class ProcessorStateManager method close.
/**
* {@link StateStore#close() Close} all stores (even in case of failure).
* Log all exceptions and re-throw the first exception that occurred at the end.
*
* @throws ProcessorStateException if any error happens when closing the state stores
*/
@Override
public void close() throws ProcessorStateException {
log.debug("Closing its state manager and all the registered state stores: {}", stores);
changelogReader.unregister(getAllChangelogTopicPartitions());
RuntimeException firstException = null;
// are not closed by a ProcessorNode yet
if (!stores.isEmpty()) {
for (final Map.Entry<String, StateStoreMetadata> entry : stores.entrySet()) {
final StateStore store = entry.getValue().stateStore;
log.trace("Closing store {}", store.name());
try {
store.close();
} catch (final RuntimeException exception) {
if (firstException == null) {
// do NOT wrap the error if it is actually caused by Streams itself
if (exception instanceof StreamsException)
firstException = exception;
else
firstException = new ProcessorStateException(format("%sFailed to close state store %s", logPrefix, store.name()), exception);
}
log.error("Failed to close state store {}: ", store.name(), exception);
}
}
stores.clear();
}
if (firstException != null) {
throw firstException;
}
}
Aggregations