use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class RocksDBStoreTest method shouldNotThrowWhenRestoringOnMissingHeaders.
@Test
public void shouldNotThrowWhenRestoringOnMissingHeaders() {
final List<KeyValue<byte[], byte[]>> entries = getChangelogRecordsWithoutHeaders();
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, MockRocksDbConfigSetter.class);
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(dir, Serdes.String(), Serdes.String(), new StreamsConfig(props));
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
context.restore(rocksDBStore.name(), entries);
assertThat(rocksDBStore.getPosition(), is(Position.emptyPosition()));
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class RocksDBStoreTest method shouldRestoreRecordsAndConsistencyVectorMultipleTopics.
@Test
public void shouldRestoreRecordsAndConsistencyVectorMultipleTopics() {
final List<ConsumerRecord<byte[], byte[]>> entries = getChangelogRecordsMultipleTopics();
final Properties props = StreamsTestUtils.getStreamsConfig();
props.put(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG, MockRocksDbConfigSetter.class);
props.put(InternalConfig.IQ_CONSISTENCY_OFFSET_VECTOR_ENABLED, true);
dir = TestUtils.tempDirectory();
context = new InternalMockProcessorContext<>(dir, Serdes.String(), Serdes.String(), new StreamsConfig(props));
rocksDBStore.init((StateStoreContext) context, rocksDBStore);
context.restoreWithHeaders(rocksDBStore.name(), entries);
assertEquals("a", stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "1")))));
assertEquals("b", stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "2")))));
assertEquals("c", stringDeserializer.deserialize(null, rocksDBStore.get(new Bytes(stringSerializer.serialize(null, "3")))));
assertThat(rocksDBStore.getPosition(), Matchers.notNullValue());
assertThat(rocksDBStore.getPosition().getPartitionPositions("A"), Matchers.notNullValue());
assertThat(rocksDBStore.getPosition().getPartitionPositions("A"), hasEntry(0, 3L));
assertThat(rocksDBStore.getPosition().getPartitionPositions("B"), Matchers.notNullValue());
assertThat(rocksDBStore.getPosition().getPartitionPositions("B"), hasEntry(0, 2L));
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamThreadTest method shouldCommitAfterCommitInterval.
@Test
public void shouldCommitAfterCommitInterval() {
final long commitInterval = 100L;
final long commitLatency = 10L;
final Properties props = configProps(false);
props.setProperty(StreamsConfig.STATE_DIR_CONFIG, stateDir);
props.setProperty(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Long.toString(commitInterval));
final StreamsConfig config = new StreamsConfig(props);
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
EasyMock.replay(consumer, consumerGroupMetadata);
final AtomicBoolean committed = new AtomicBoolean(false);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
final TaskManager taskManager = new TaskManager(null, null, null, null, null, null, null, topologyMetadata, null, null) {
@Override
int commit(final Collection<Task> tasksToCommit) {
committed.set(true);
// we advance time to make sure the commit delay is considered when computing the next commit timestamp
mockTime.sleep(commitLatency);
return 1;
}
};
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata);
thread.setNow(mockTime.milliseconds());
thread.maybeCommit();
assertTrue(committed.get());
mockTime.sleep(commitInterval);
committed.set(false);
thread.setNow(mockTime.milliseconds());
thread.maybeCommit();
assertFalse(committed.get());
mockTime.sleep(1);
committed.set(false);
thread.setNow(mockTime.milliseconds());
thread.maybeCommit();
assertTrue(committed.get());
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamThreadTest method shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenSuspendingTasks.
@Test
public void shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenSuspendingTasks() {
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
internalTopologyBuilder.addSource(null, "name", null, null, null, topic1);
internalTopologyBuilder.addSink("out", "output", null, null, null, "name");
thread.setState(StreamThread.State.STARTING);
thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final List<TopicPartition> assignedPartitions = new ArrayList<>();
// assign single partition
assignedPartitions.add(t1p1);
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(assignedPartitions);
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
assertThat(thread.activeTasks().size(), equalTo(1));
// need to process a record to enable committing
addRecord(mockConsumer, 0L);
thread.runOnce();
clientSupplier.producers.get(0).commitTransactionException = new ProducerFencedException("Producer is fenced");
assertThrows(TaskMigratedException.class, () -> thread.rebalanceListener().onPartitionsRevoked(assignedPartitions));
assertFalse(clientSupplier.producers.get(0).transactionCommitted());
assertFalse(clientSupplier.producers.get(0).closed());
assertEquals(1, thread.activeTasks().size());
}
use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.
the class StreamThreadTest method shouldNotCommitBeforeTheCommitInterval.
@Test
public void shouldNotCommitBeforeTheCommitInterval() {
final long commitInterval = 1000L;
final Properties props = configProps(false);
props.setProperty(StreamsConfig.STATE_DIR_CONFIG, stateDir);
props.setProperty(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Long.toString(commitInterval));
final StreamsConfig config = new StreamsConfig(props);
final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
final TaskManager taskManager = mockTaskManagerCommit(consumer, 1, 1);
EasyMock.replay(consumer, consumerGroupMetadata);
final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
topologyMetadata.buildAndRewriteTopology();
final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata);
thread.setNow(mockTime.milliseconds());
thread.maybeCommit();
mockTime.sleep(commitInterval - 10L);
thread.setNow(mockTime.milliseconds());
thread.maybeCommit();
verify(taskManager);
}
Aggregations