use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class RestoreIntegrationTest method shouldRestoreStateFromSourceTopic.
@Test
public void shouldRestoreStateFromSourceTopic() throws Exception {
final AtomicInteger numReceived = new AtomicInteger(0);
final StreamsBuilder builder = new StreamsBuilder();
final Properties props = props();
props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);
// restoring from 1000 to 4000 (committed), and then process from 4000 to 5000 on each of the two partitions
final int offsetLimitDelta = 1000;
final int offsetCheckpointed = 1000;
createStateForRestoration(inputStream, 0);
setCommittedOffset(inputStream, offsetLimitDelta);
final StateDirectory stateDirectory = new StateDirectory(new StreamsConfig(props), new MockTime(), true, false);
// note here the checkpointed offset is the last processed record's offset, so without control message we should write this offset - 1
new OffsetCheckpoint(new File(stateDirectory.getOrCreateDirectoryForTask(new TaskId(0, 0)), ".checkpoint")).write(Collections.singletonMap(new TopicPartition(inputStream, 0), (long) offsetCheckpointed - 1));
new OffsetCheckpoint(new File(stateDirectory.getOrCreateDirectoryForTask(new TaskId(0, 1)), ".checkpoint")).write(Collections.singletonMap(new TopicPartition(inputStream, 1), (long) offsetCheckpointed - 1));
final CountDownLatch startupLatch = new CountDownLatch(1);
final CountDownLatch shutdownLatch = new CountDownLatch(1);
builder.table(inputStream, Materialized.<Integer, Integer, KeyValueStore<Bytes, byte[]>>as("store").withKeySerde(Serdes.Integer()).withValueSerde(Serdes.Integer())).toStream().foreach((key, value) -> {
if (numReceived.incrementAndGet() == offsetLimitDelta * 2) {
shutdownLatch.countDown();
}
});
kafkaStreams = new KafkaStreams(builder.build(props), props);
kafkaStreams.setStateListener((newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
startupLatch.countDown();
}
});
final AtomicLong restored = new AtomicLong(0);
kafkaStreams.setGlobalStateRestoreListener(new StateRestoreListener() {
@Override
public void onRestoreStart(final TopicPartition topicPartition, final String storeName, final long startingOffset, final long endingOffset) {
}
@Override
public void onBatchRestored(final TopicPartition topicPartition, final String storeName, final long batchEndOffset, final long numRestored) {
}
@Override
public void onRestoreEnd(final TopicPartition topicPartition, final String storeName, final long totalRestored) {
restored.addAndGet(totalRestored);
}
});
kafkaStreams.start();
assertTrue(startupLatch.await(30, TimeUnit.SECONDS));
assertThat(restored.get(), equalTo((long) numberOfKeys - offsetLimitDelta * 2 - offsetCheckpointed * 2));
assertTrue(shutdownLatch.await(30, TimeUnit.SECONDS));
assertThat(numReceived.get(), equalTo(offsetLimitDelta * 2));
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StateDirectoryIntegrationTest method testCleanUpStateDirIfEmpty.
@Test
public void testCleanUpStateDirIfEmpty() throws InterruptedException {
final String uniqueTestName = safeUniqueTestName(getClass(), testName);
// Create Topic
final String input = uniqueTestName + "-input";
CLUSTER.createTopic(input);
final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
// Create Test Records
producer.send(new ProducerRecord<>(input, "a"));
producer.send(new ProducerRecord<>(input, "b"));
producer.send(new ProducerRecord<>(input, "c"));
// Create Topology
final String storeName = uniqueTestName + "-input-table";
final StreamsBuilder builder = new StreamsBuilder();
builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
final Topology topology = builder.build();
// State Store Directory
final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
// Create KafkaStreams instance
final String applicationId = uniqueTestName + "-app";
final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
// Create StateListener
final CountDownLatch runningLatch = new CountDownLatch(1);
final CountDownLatch notRunningLatch = new CountDownLatch(1);
final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
runningLatch.countDown();
}
if (newState == KafkaStreams.State.NOT_RUNNING) {
notRunningLatch.countDown();
}
};
streams.setStateListener(stateListener);
// Application state directory
final File appDir = new File(stateDir, applicationId);
// Validate application state directory is created.
streams.start();
try {
runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't start in time.", e);
}
// State directory exists
assertTrue((new File(stateDir)).exists());
// Application state directory Exists
assertTrue(appDir.exists());
// Validate StateStore directory is deleted.
streams.close();
try {
notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't cleaned up in time.", e);
}
streams.cleanUp();
// Root state store exists
assertTrue((new File(stateDir)).exists());
// case 1: the state directory is cleaned up without any problems.
// case 2: The state directory is not cleaned up, for it does not include any checkpoint file.
// case 3: The state directory is not cleaned up, for it includes a checkpoint file but it is empty.
assertTrue(appDir.exists() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && f.listFiles().length > 0 && !(new File(f, ".checkpoint")).exists()).findFirst().isPresent() || Arrays.stream(appDir.listFiles()).filter((File f) -> f.isDirectory() && (new File(f, ".checkpoint")).length() == 0L).findFirst().isPresent());
} finally {
CLUSTER.deleteAllTopicsAndWait(0L);
}
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class ProcessorContextImplTest method streamsConfigMock.
private StreamsConfig streamsConfigMock() {
final StreamsConfig streamsConfig = mock(StreamsConfig.class);
expect(streamsConfig.originals()).andStubReturn(Collections.emptyMap());
expect(streamsConfig.values()).andStubReturn(Collections.emptyMap());
expect(streamsConfig.getString(StreamsConfig.APPLICATION_ID_CONFIG)).andStubReturn("add-id");
expect(streamsConfig.defaultValueSerde()).andStubReturn(Serdes.ByteArray());
expect(streamsConfig.defaultKeySerde()).andStubReturn(Serdes.ByteArray());
replay(streamsConfig);
return streamsConfig;
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class GlobalStateManagerImplTest method shouldNotRetryWhenPositionThrowsTimeoutExceptionAndTaskTimeoutIsZero.
@Test
public void shouldNotRetryWhenPositionThrowsTimeoutExceptionAndTaskTimeoutIsZero() {
final AtomicInteger numberOfCalls = new AtomicInteger(0);
consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public synchronized long position(final TopicPartition partition) {
numberOfCalls.incrementAndGet();
throw new TimeoutException("KABOOM!");
}
};
initializeConsumer(0, 0, t1, t2, t3, t4);
streamsConfig = new StreamsConfig(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, "appId"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"), mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()), mkEntry(StreamsConfig.TASK_TIMEOUT_MS_CONFIG, 0L)));
stateManager = new GlobalStateManagerImpl(new LogContext("mock"), time, topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
processorContext.setStateManger(stateManager);
stateManager.setGlobalProcessorContext(processorContext);
final StreamsException expected = assertThrows(StreamsException.class, () -> stateManager.initialize());
final Throwable cause = expected.getCause();
assertThat(cause, instanceOf(TimeoutException.class));
assertThat(cause.getMessage(), equalTo("KABOOM!"));
assertEquals(numberOfCalls.get(), 1);
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class GlobalStateManagerImplTest method shouldNotRetryWhenEndOffsetsThrowsTimeoutExceptionAndTaskTimeoutIsZero.
@Test
public void shouldNotRetryWhenEndOffsetsThrowsTimeoutExceptionAndTaskTimeoutIsZero() {
final AtomicInteger numberOfCalls = new AtomicInteger(0);
consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public synchronized Map<TopicPartition, Long> endOffsets(final Collection<TopicPartition> partitions) {
numberOfCalls.incrementAndGet();
throw new TimeoutException("KABOOM!");
}
};
initializeConsumer(0, 0, t1, t2, t3, t4);
streamsConfig = new StreamsConfig(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, "appId"), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "dummy:1234"), mkEntry(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getPath()), mkEntry(StreamsConfig.TASK_TIMEOUT_MS_CONFIG, 0L)));
stateManager = new GlobalStateManagerImpl(new LogContext("mock"), time, topology, consumer, stateDirectory, stateRestoreListener, streamsConfig);
processorContext.setStateManger(stateManager);
stateManager.setGlobalProcessorContext(processorContext);
final StreamsException expected = assertThrows(StreamsException.class, () -> stateManager.initialize());
final Throwable cause = expected.getCause();
assertThat(cause, instanceOf(TimeoutException.class));
assertThat(cause.getMessage(), equalTo("KABOOM!"));
assertEquals(numberOfCalls.get(), 1);
}
Aggregations