use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class InternalTopicManagerTest method shouldCompleteValidateWhenTopicLeaderNotAvailableAndThenDescribeSuccess.
@Test
public void shouldCompleteValidateWhenTopicLeaderNotAvailableAndThenDescribeSuccess() {
final AdminClient admin = EasyMock.createNiceMock(AdminClient.class);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, new StreamsConfig(config));
final TopicPartitionInfo partitionInfo = new TopicPartitionInfo(0, broker1, Collections.singletonList(broker1), Collections.singletonList(broker1));
final KafkaFutureImpl<TopicDescription> topicDescriptionFailFuture = new KafkaFutureImpl<>();
topicDescriptionFailFuture.completeExceptionally(new LeaderNotAvailableException("Leader Not Available!"));
final KafkaFutureImpl<TopicDescription> topicDescriptionSuccessFuture = new KafkaFutureImpl<>();
topicDescriptionSuccessFuture.complete(new TopicDescription(topic1, false, Collections.singletonList(partitionInfo), Collections.emptySet()));
EasyMock.expect(admin.describeTopics(Collections.singleton(topic1))).andReturn(new MockDescribeTopicsResult(Collections.singletonMap(topic1, topicDescriptionFailFuture))).once();
EasyMock.expect(admin.describeTopics(Collections.singleton(topic1))).andReturn(new MockDescribeTopicsResult(Collections.singletonMap(topic1, topicDescriptionSuccessFuture))).once();
EasyMock.replay(admin);
final InternalTopicConfig internalTopicConfig = new RepartitionTopicConfig(topic1, Collections.emptyMap());
internalTopicConfig.setNumberOfPartitions(1);
topicManager.makeReady(Collections.singletonMap(topic1, internalTopicConfig));
EasyMock.verify(admin);
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class InternalTopicManagerTest method setupCleanUpScenario.
private void setupCleanUpScenario(final AdminClient admin, final StreamsConfig streamsConfig, final InternalTopicConfig internalTopicConfig1, final InternalTopicConfig internalTopicConfig2) {
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicFailFuture1 = new KafkaFutureImpl<>();
createTopicFailFuture1.completeExceptionally(new TopicExistsException("exists"));
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicFailFuture2 = new KafkaFutureImpl<>();
createTopicFailFuture2.completeExceptionally(new IllegalStateException("Nobody expects the Spanish inquisition"));
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicSuccessfulFuture = new KafkaFutureImpl<>();
createTopicSuccessfulFuture.complete(new TopicMetadataAndConfig(Uuid.randomUuid(), 1, 1, new Config(Collections.emptyList())));
final NewTopic newTopic1 = newTopic(topic1, internalTopicConfig1, streamsConfig);
final NewTopic newTopic2 = newTopic(topic2, internalTopicConfig2, streamsConfig);
EasyMock.expect(admin.createTopics(mkSet(newTopic1, newTopic2))).andAnswer(() -> new MockCreateTopicsResult(mkMap(mkEntry(topic1, createTopicSuccessfulFuture), mkEntry(topic2, createTopicFailFuture1))));
EasyMock.expect(admin.createTopics(mkSet(newTopic2))).andAnswer(() -> new MockCreateTopicsResult(mkMap(mkEntry(topic2, createTopicFailFuture2))));
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class InternalTopicManagerTest method shouldCleanUpWhenCreateTopicsTimesOut.
@Test
public void shouldCleanUpWhenCreateTopicsTimesOut() {
final AdminClient admin = EasyMock.createNiceMock(AdminClient.class);
final StreamsConfig streamsConfig = new StreamsConfig(config);
final MockTime time = new MockTime((Integer) config.get(StreamsConfig.consumerPrefix(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG)) / 3);
final InternalTopicManager topicManager = new InternalTopicManager(time, admin, streamsConfig);
final InternalTopicConfig internalTopicConfig1 = setupRepartitionTopicConfig(topic1, 1);
final InternalTopicConfig internalTopicConfig2 = setupRepartitionTopicConfig(topic2, 1);
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicFailFuture1 = new KafkaFutureImpl<>();
createTopicFailFuture1.completeExceptionally(new TopicExistsException("exists"));
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicSuccessfulFuture = new KafkaFutureImpl<>();
createTopicSuccessfulFuture.complete(new TopicMetadataAndConfig(Uuid.randomUuid(), 1, 1, new Config(Collections.emptyList())));
final NewTopic newTopic1 = newTopic(topic1, internalTopicConfig1, streamsConfig);
final NewTopic newTopic2 = newTopic(topic2, internalTopicConfig2, streamsConfig);
EasyMock.expect(admin.createTopics(mkSet(newTopic1, newTopic2))).andAnswer(() -> new MockCreateTopicsResult(mkMap(mkEntry(topic1, createTopicSuccessfulFuture), mkEntry(topic2, createTopicFailFuture1))));
final KafkaFutureImpl<TopicMetadataAndConfig> createTopicFutureThatNeverCompletes = new KafkaFutureImpl<>();
EasyMock.expect(admin.createTopics(mkSet(newTopic2))).andStubAnswer(() -> new MockCreateTopicsResult(mkMap(mkEntry(topic2, createTopicFutureThatNeverCompletes))));
final KafkaFutureImpl<Void> deleteTopicSuccessfulFuture = new KafkaFutureImpl<>();
deleteTopicSuccessfulFuture.complete(null);
EasyMock.expect(admin.deleteTopics(mkSet(topic1))).andAnswer(() -> new MockDeleteTopicsResult(mkMap(mkEntry(topic1, deleteTopicSuccessfulFuture))));
EasyMock.replay(admin);
assertThrows(TimeoutException.class, () -> topicManager.setup(mkMap(mkEntry(topic1, internalTopicConfig1), mkEntry(topic2, internalTopicConfig2))));
EasyMock.verify(admin);
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StateDirectoryIntegrationTest method testNotCleanUpStateDirIfNotEmpty.
@Test
public void testNotCleanUpStateDirIfNotEmpty() throws InterruptedException {
final String uniqueTestName = safeUniqueTestName(getClass(), testName);
// Create Topic
final String input = uniqueTestName + "-input";
CLUSTER.createTopic(input);
final Properties producerConfig = mkProperties(mkMap(mkEntry(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()), mkEntry(ProducerConfig.ACKS_CONFIG, "all"), mkEntry(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName()), mkEntry(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getCanonicalName())));
try (final KafkaProducer<String, String> producer = new KafkaProducer<>(producerConfig, Serdes.String().serializer(), Serdes.String().serializer())) {
// Create Test Records
producer.send(new ProducerRecord<>(input, "a"));
producer.send(new ProducerRecord<>(input, "b"));
producer.send(new ProducerRecord<>(input, "c"));
// Create Topology
final String storeName = uniqueTestName + "-input-table";
final StreamsBuilder builder = new StreamsBuilder();
builder.table(input, Materialized.<String, String, KeyValueStore<Bytes, byte[]>>as(storeName).withKeySerde(Serdes.String()).withValueSerde(Serdes.String()));
final Topology topology = builder.build();
// State Store Directory
final String stateDir = TestUtils.tempDirectory(uniqueTestName).getPath();
// Create KafkaStreams instance
final String applicationId = uniqueTestName + "-app";
final Properties streamsConfig = mkProperties(mkMap(mkEntry(StreamsConfig.APPLICATION_ID_CONFIG, applicationId), mkEntry(StreamsConfig.STATE_DIR_CONFIG, stateDir), mkEntry(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers())));
final KafkaStreams streams = new KafkaStreams(topology, streamsConfig);
// Create StateListener
final CountDownLatch runningLatch = new CountDownLatch(1);
final CountDownLatch notRunningLatch = new CountDownLatch(1);
final KafkaStreams.StateListener stateListener = (newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING) {
runningLatch.countDown();
}
if (newState == KafkaStreams.State.NOT_RUNNING) {
notRunningLatch.countDown();
}
};
streams.setStateListener(stateListener);
// Application state directory
final File appDir = new File(stateDir, applicationId);
// Validate application state directory is created.
streams.start();
try {
runningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't start in time.", e);
}
// State directory exists
assertTrue((new File(stateDir)).exists());
// Application state directory Exists
assertTrue(appDir.exists());
try {
assertTrue((new File(appDir, "dummy")).createNewFile());
} catch (final IOException e) {
throw new RuntimeException("Failed to create dummy file.", e);
}
// Validate StateStore directory is deleted.
streams.close();
try {
notRunningLatch.await(IntegrationTestUtils.DEFAULT_TIMEOUT, TimeUnit.MILLISECONDS);
} catch (final InterruptedException e) {
throw new RuntimeException("Streams didn't cleaned up in time.", e);
}
streams.cleanUp();
// Root state store exists
assertTrue((new File(stateDir)).exists());
// Application state store exists
assertTrue(appDir.exists());
} finally {
CLUSTER.deleteAllTopicsAndWait(0L);
}
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class RestoreIntegrationTest method shouldRestoreStateFromChangelogTopic.
@Test
public void shouldRestoreStateFromChangelogTopic() throws Exception {
final String changelog = appId + "-store-changelog";
CLUSTER.createTopic(changelog, 2, 1);
final AtomicInteger numReceived = new AtomicInteger(0);
final StreamsBuilder builder = new StreamsBuilder();
final Properties props = props();
// restoring from 1000 to 5000, and then process from 5000 to 10000 on each of the two partitions
final int offsetCheckpointed = 1000;
createStateForRestoration(changelog, 0);
createStateForRestoration(inputStream, 10000);
final StateDirectory stateDirectory = new StateDirectory(new StreamsConfig(props), new MockTime(), true, false);
// note here the checkpointed offset is the last processed record's offset, so without control message we should write this offset - 1
new OffsetCheckpoint(new File(stateDirectory.getOrCreateDirectoryForTask(new TaskId(0, 0)), ".checkpoint")).write(Collections.singletonMap(new TopicPartition(changelog, 0), (long) offsetCheckpointed - 1));
new OffsetCheckpoint(new File(stateDirectory.getOrCreateDirectoryForTask(new TaskId(0, 1)), ".checkpoint")).write(Collections.singletonMap(new TopicPartition(changelog, 1), (long) offsetCheckpointed - 1));
final CountDownLatch startupLatch = new CountDownLatch(1);
final CountDownLatch shutdownLatch = new CountDownLatch(1);
builder.table(inputStream, Consumed.with(Serdes.Integer(), Serdes.Integer()), Materialized.as("store")).toStream().foreach((key, value) -> {
if (numReceived.incrementAndGet() == numberOfKeys) {
shutdownLatch.countDown();
}
});
kafkaStreams = new KafkaStreams(builder.build(), props);
kafkaStreams.setStateListener((newState, oldState) -> {
if (newState == KafkaStreams.State.RUNNING && oldState == KafkaStreams.State.REBALANCING) {
startupLatch.countDown();
}
});
final AtomicLong restored = new AtomicLong(0);
kafkaStreams.setGlobalStateRestoreListener(new StateRestoreListener() {
@Override
public void onRestoreStart(final TopicPartition topicPartition, final String storeName, final long startingOffset, final long endingOffset) {
}
@Override
public void onBatchRestored(final TopicPartition topicPartition, final String storeName, final long batchEndOffset, final long numRestored) {
}
@Override
public void onRestoreEnd(final TopicPartition topicPartition, final String storeName, final long totalRestored) {
restored.addAndGet(totalRestored);
}
});
kafkaStreams.start();
assertTrue(startupLatch.await(30, TimeUnit.SECONDS));
assertThat(restored.get(), equalTo((long) numberOfKeys - 2 * offsetCheckpointed));
assertTrue(shutdownLatch.await(30, TimeUnit.SECONDS));
assertThat(numReceived.get(), equalTo(numberOfKeys));
}
Aggregations