use of org.apache.kafka.test.NoOpRecordCollector in project kafka by apache.
the class StreamTaskTest method shouldCheckpointOffsetsOnCommit.
@SuppressWarnings("unchecked")
@Test
public void shouldCheckpointOffsetsOnCommit() throws Exception {
final String storeName = "test";
final String changelogTopic = ProcessorStateManager.storeChangelogTopic("appId", storeName);
final InMemoryKeyValueStore inMemoryStore = new InMemoryKeyValueStore(storeName, null, null) {
@Override
public void init(final ProcessorContext context, final StateStore root) {
context.register(root, true, null);
}
@Override
public boolean persistent() {
return true;
}
};
final ProcessorTopology topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(), Collections.<String, SourceNode>emptyMap(), Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>singletonList(inMemoryStore), Collections.singletonMap(storeName, changelogTopic), Collections.<StateStore>emptyList());
final TopicPartition partition = new TopicPartition(changelogTopic, 0);
final NoOpRecordCollector recordCollector = new NoOpRecordCollector() {
@Override
public Map<TopicPartition, Long> offsets() {
return Collections.singletonMap(partition, 543L);
}
};
restoreStateConsumer.updatePartitions(changelogTopic, Collections.singletonList(new PartitionInfo(changelogTopic, 0, null, new Node[0], new Node[0])));
restoreStateConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L));
restoreStateConsumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L));
final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
final TaskId taskId = new TaskId(0, 0);
final MockTime time = new MockTime();
final StreamsConfig config = createConfig(baseDir);
final StreamTask streamTask = new StreamTask(taskId, "appId", partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, new ThreadCache("testCache", 0, streamsMetrics), time, recordCollector);
time.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG));
streamTask.commit();
final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateDirectory.directoryForTask(taskId), ProcessorStateManager.CHECKPOINT_FILE_NAME));
assertThat(checkpoint.read(), equalTo(Collections.singletonMap(partition, 544L)));
}
use of org.apache.kafka.test.NoOpRecordCollector in project kafka by apache.
the class StreamTaskTest method shouldFlushRecordCollectorOnFlushState.
@Test
public void shouldFlushRecordCollectorOnFlushState() throws Exception {
final AtomicBoolean flushed = new AtomicBoolean(false);
final NoOpRecordCollector recordCollector = new NoOpRecordCollector() {
@Override
public void flush() {
flushed.set(true);
}
};
final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
final StreamTask streamTask = new StreamTask(taskId00, "appId", partitions, topology, consumer, changelogReader, createConfig(baseDir), streamsMetrics, stateDirectory, testCache, time, recordCollector);
streamTask.flushState();
assertTrue(flushed.get());
}
use of org.apache.kafka.test.NoOpRecordCollector in project kafka by apache.
the class ProcessorStateManagerTest method testFlushAndClose.
@Test
public void testFlushAndClose() throws IOException {
checkpoint.write(Collections.<TopicPartition, Long>emptyMap());
// set up ack'ed offsets
final HashMap<TopicPartition, Long> ackedOffsets = new HashMap<>();
ackedOffsets.put(new TopicPartition(persistentStoreTopicName, 1), 123L);
ackedOffsets.put(new TopicPartition(nonPersistentStoreTopicName, 1), 456L);
ackedOffsets.put(new TopicPartition(ProcessorStateManager.storeChangelogTopic(applicationId, "otherTopic"), 1), 789L);
ProcessorStateManager stateMgr = new ProcessorStateManager(taskId, noPartitions, false, stateDirectory, new HashMap<String, String>() {
{
put(persistentStoreName, persistentStoreTopicName);
put(nonPersistentStoreName, nonPersistentStoreTopicName);
}
}, changelogReader);
try {
// make sure the checkpoint file isn't deleted
assertTrue(checkpointFile.exists());
stateMgr.register(persistentStore, true, persistentStore.stateRestoreCallback);
stateMgr.register(nonPersistentStore, true, nonPersistentStore.stateRestoreCallback);
} finally {
// close the state manager with the ack'ed offsets
stateMgr.flush(new MockProcessorContext(StateSerdes.withBuiltinTypes("foo", String.class, String.class), new NoOpRecordCollector()));
stateMgr.close(ackedOffsets);
}
// make sure all stores are closed, and the checkpoint file is written.
assertTrue(persistentStore.flushed);
assertTrue(persistentStore.closed);
assertTrue(nonPersistentStore.flushed);
assertTrue(nonPersistentStore.closed);
assertTrue(checkpointFile.exists());
// the checkpoint file should contain an offset from the persistent store only.
final Map<TopicPartition, Long> checkpointedOffsets = checkpoint.read();
assertEquals(1, checkpointedOffsets.size());
assertEquals(new Long(124), checkpointedOffsets.get(new TopicPartition(persistentStoreTopicName, 1)));
}
use of org.apache.kafka.test.NoOpRecordCollector in project kafka by apache.
the class ChangeLoggingKeyValueBytesStoreTest method before.
@Before
public void before() {
final NoOpRecordCollector collector = new NoOpRecordCollector() {
@Override
public <K, V> void send(final String topic, K key, V value, Integer partition, Long timestamp, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
sent.put(key, value);
}
};
final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(), Serdes.String(), Serdes.Long(), collector, new ThreadCache("testCache", 0, new MockStreamsMetrics(new Metrics())));
context.setTime(0);
store.init(context, store);
}
use of org.apache.kafka.test.NoOpRecordCollector in project kafka by apache.
the class ChangeLoggingKeyValueStoreTest method before.
@Before
public void before() {
final NoOpRecordCollector collector = new NoOpRecordCollector() {
@Override
public <K, V> void send(final String topic, K key, V value, Integer partition, Long timestamp, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
sent.put(key, value);
}
};
final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(), Serdes.String(), Serdes.Long(), collector, new ThreadCache("testCache", 0, new MockStreamsMetrics(new Metrics())));
context.setTime(0);
store.init(context, store);
}
Aggregations