use of org.apache.kafka.streams.errors.StreamsException in project apache-kafka-on-k8s by banzaicloud.
the class GlobalStateManagerImplTest method shouldReleaseLockIfExceptionWhenLoadingCheckpoints.
@Test
public void shouldReleaseLockIfExceptionWhenLoadingCheckpoints() throws IOException {
writeCorruptCheckpoint();
try {
stateManager.initialize();
} catch (StreamsException e) {
// expected
}
final StateDirectory stateDir = new StateDirectory(streamsConfig, new MockTime());
try {
// should be able to get the lock now as it should've been released
assertTrue(stateDir.lockGlobalState());
} finally {
stateDir.unlockGlobalState();
}
}
use of org.apache.kafka.streams.errors.StreamsException in project apache-kafka-on-k8s by banzaicloud.
the class GlobalStateManagerImplTest method shouldRetryWhenEndOffsetsThrowsTimeoutException.
@Test
public void shouldRetryWhenEndOffsetsThrowsTimeoutException() {
final int retries = 2;
final AtomicInteger numberOfCalls = new AtomicInteger(0);
consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public synchronized Map<TopicPartition, Long> endOffsets(Collection<org.apache.kafka.common.TopicPartition> partitions) {
numberOfCalls.incrementAndGet();
throw new TimeoutException();
}
};
streamsConfig = new StreamsConfig(new Properties() {
{
put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
put(StreamsConfig.RETRIES_CONFIG, retries);
}
});
try {
new GlobalStateManagerImpl(new LogContext("mock"), topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
} catch (final StreamsException expected) {
assertEquals(numberOfCalls.get(), retries);
}
}
use of org.apache.kafka.streams.errors.StreamsException in project apache-kafka-on-k8s by banzaicloud.
the class GlobalStateManagerImplTest method shouldRetryWhenPartitionsForThrowsTimeoutException.
@Test
public void shouldRetryWhenPartitionsForThrowsTimeoutException() {
final int retries = 2;
final AtomicInteger numberOfCalls = new AtomicInteger(0);
consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public synchronized List<PartitionInfo> partitionsFor(String topic) {
numberOfCalls.incrementAndGet();
throw new TimeoutException();
}
};
streamsConfig = new StreamsConfig(new Properties() {
{
put(StreamsConfig.APPLICATION_ID_CONFIG, "appId");
put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234");
put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath());
put(StreamsConfig.RETRIES_CONFIG, retries);
}
});
try {
new GlobalStateManagerImpl(new LogContext("mock"), topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
} catch (final StreamsException expected) {
assertEquals(numberOfCalls.get(), retries);
}
}
use of org.apache.kafka.streams.errors.StreamsException in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnSubsequentCallIfASendFailsWithDefaultExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionOnSubsequentCallIfASendFailsWithDefaultExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new DefaultProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
try {
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
fail("Should have thrown StreamsException");
} catch (final StreamsException expected) {
/* ok */
}
}
use of org.apache.kafka.streams.errors.StreamsException in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldThrowStreamsExceptionOnFlushIfASendFailedWithDefaultExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldThrowStreamsExceptionOnFlushIfASendFailedWithDefaultExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new DefaultProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
try {
collector.flush();
fail("Should have thrown StreamsException");
} catch (final StreamsException expected) {
/* ok */
}
}
Aggregations