use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class StreamTask method resetOffsetsIfNeededAndInitializeMetadata.
private void resetOffsetsIfNeededAndInitializeMetadata(final java.util.function.Consumer<Set<TopicPartition>> offsetResetter) {
try {
final Map<TopicPartition, OffsetAndMetadata> offsetsAndMetadata = mainConsumer.committed(inputPartitions());
for (final Map.Entry<TopicPartition, OffsetAndMetadata> committedEntry : offsetsAndMetadata.entrySet()) {
if (resetOffsetsForPartitions.contains(committedEntry.getKey())) {
final OffsetAndMetadata offsetAndMetadata = committedEntry.getValue();
if (offsetAndMetadata != null) {
mainConsumer.seek(committedEntry.getKey(), offsetAndMetadata);
resetOffsetsForPartitions.remove(committedEntry.getKey());
}
}
}
offsetResetter.accept(resetOffsetsForPartitions);
resetOffsetsForPartitions.clear();
initializeTaskTime(offsetsAndMetadata.entrySet().stream().filter(e -> e.getValue() != null).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)));
} catch (final TimeoutException timeoutException) {
log.warn("Encountered {} while trying to fetch committed offsets, will retry initializing the metadata in the next loop." + "\nConsider overwriting consumer config {} to a larger value to avoid timeout errors", time.toString(), ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG);
// re-throw to trigger `task.timeout.ms`
throw timeoutException;
} catch (final KafkaException e) {
throw new StreamsException(String.format("task [%s] Failed to initialize offsets for %s", id, inputPartitions()), e);
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class ProcessorNodeTest method testTopologyLevelClassCastException.
@Test
public void testTopologyLevelClassCastException() {
// Serdes configuration is missing and no default is set which will trigger an exception
final StreamsBuilder builder = new StreamsBuilder();
builder.<String, String>stream("streams-plaintext-input").flatMapValues(value -> Collections.singletonList(""));
final Topology topology = builder.build();
final Properties config = new Properties();
config.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class);
config.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.ByteArraySerde.class);
try (final TopologyTestDriver testDriver = new TopologyTestDriver(topology, config)) {
final TestInputTopic<String, String> topic = testDriver.createInputTopic("streams-plaintext-input", new StringSerializer(), new StringSerializer());
final StreamsException se = assertThrows(StreamsException.class, () -> topic.pipeInput("a-key", "a value"));
final String msg = se.getMessage();
assertTrue("Error about class cast with serdes", msg.contains("ClassCastException"));
assertTrue("Error about class cast with serdes", msg.contains("Serdes"));
}
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class GlobalStateManagerImplTest method shouldNotRetryWhenPositionThrowsTimeoutExceptionAndTaskTimeoutIsZero.
@Test
public void shouldNotRetryWhenPositionThrowsTimeoutExceptionAndTaskTimeoutIsZero() {
final AtomicInteger numberOfCalls = new AtomicInteger(0);
consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public synchronized long position(final TopicPartition partition) {
numberOfCalls.incrementAndGet();
throw new TimeoutException("KABOOM!");
}
};
initializeConsumer(0, 0, t1, t2, t3, t4);
streamsConfig = new StreamsConfig(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, "appId"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"), mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()), mkEntry(StreamsConfig.TASK_TIMEOUT_MS_CONFIG, 0L)));
stateManager = new GlobalStateManagerImpl(new LogContext("mock"), time, topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
processorContext.setStateManger(stateManager);
stateManager.setGlobalProcessorContext(processorContext);
final StreamsException expected = assertThrows(StreamsException.class, () -> stateManager.initialize());
final Throwable cause = expected.getCause();
assertThat(cause, instanceOf(TimeoutException.class));
assertThat(cause.getMessage(), equalTo("KABOOM!"));
assertEquals(numberOfCalls.get(), 1);
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class GlobalStateManagerImplTest method shouldNotRetryWhenEndOffsetsThrowsTimeoutExceptionAndTaskTimeoutIsZero.
@Test
public void shouldNotRetryWhenEndOffsetsThrowsTimeoutExceptionAndTaskTimeoutIsZero() {
final AtomicInteger numberOfCalls = new AtomicInteger(0);
consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public synchronized Map<TopicPartition, Long> endOffsets(final Collection<TopicPartition> partitions) {
numberOfCalls.incrementAndGet();
throw new TimeoutException("KABOOM!");
}
};
initializeConsumer(0, 0, t1, t2, t3, t4);
streamsConfig = new StreamsConfig(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, "appId"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"), mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()), mkEntry(StreamsConfig.TASK_TIMEOUT_MS_CONFIG, 0L)));
stateManager = new GlobalStateManagerImpl(new LogContext("mock"), time, topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
processorContext.setStateManger(stateManager);
stateManager.setGlobalProcessorContext(processorContext);
final StreamsException expected = assertThrows(StreamsException.class, () -> stateManager.initialize());
final Throwable cause = expected.getCause();
assertThat(cause, instanceOf(TimeoutException.class));
assertThat(cause.getMessage(), equalTo("KABOOM!"));
assertEquals(numberOfCalls.get(), 1);
}
use of org.apache.kafka.streams.errors.StreamsException in project kafka by apache.
the class GlobalStateTaskTest method maybeDeserialize.
private void maybeDeserialize(final GlobalStateUpdateTask globalStateTask, final byte[] key, final byte[] recordValue, final boolean failExpected) {
final ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>(topic2, 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, key, recordValue, new RecordHeaders(), Optional.empty());
globalStateTask.initialize();
try {
globalStateTask.update(record);
if (failExpected) {
fail("Should have failed to deserialize.");
}
} catch (final StreamsException e) {
if (!failExpected) {
fail("Shouldn't have failed to deserialize.");
}
}
}
Aggregations