use of org.apache.kafka.streams.processor.StateStore in project kafka by apache.
the class StreamTaskTest method shouldCheckpointOffsetsOnCommit.
@SuppressWarnings("unchecked")
@Test
public void shouldCheckpointOffsetsOnCommit() throws Exception {
final String storeName = "test";
final String changelogTopic = ProcessorStateManager.storeChangelogTopic("appId", storeName);
final InMemoryKeyValueStore inMemoryStore = new InMemoryKeyValueStore(storeName, null, null) {
@Override
public void init(final ProcessorContext context, final StateStore root) {
context.register(root, true, null);
}
@Override
public boolean persistent() {
return true;
}
};
final ProcessorTopology topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(), Collections.<String, SourceNode>emptyMap(), Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>singletonList(inMemoryStore), Collections.singletonMap(storeName, changelogTopic), Collections.<StateStore>emptyList());
final TopicPartition partition = new TopicPartition(changelogTopic, 0);
final NoOpRecordCollector recordCollector = new NoOpRecordCollector() {
@Override
public Map<TopicPartition, Long> offsets() {
return Collections.singletonMap(partition, 543L);
}
};
restoreStateConsumer.updatePartitions(changelogTopic, Collections.singletonList(new PartitionInfo(changelogTopic, 0, null, new Node[0], new Node[0])));
restoreStateConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L));
restoreStateConsumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L));
final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
final TaskId taskId = new TaskId(0, 0);
final MockTime time = new MockTime();
final StreamsConfig config = createConfig(baseDir);
final StreamTask streamTask = new StreamTask(taskId, "appId", partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, new ThreadCache("testCache", 0, streamsMetrics), time, recordCollector);
time.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG));
streamTask.commit();
final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateDirectory.directoryForTask(taskId), ProcessorStateManager.CHECKPOINT_FILE_NAME));
assertThat(checkpoint.read(), equalTo(Collections.singletonMap(partition, 544L)));
}
use of org.apache.kafka.streams.processor.StateStore in project kafka by apache.
the class StateStoreTestUtils method newKeyValueStore.
public static <K, V> KeyValueStore<K, V> newKeyValueStore(String name, Class<K> keyType, Class<V> valueType) {
final InMemoryKeyValueStoreSupplier<K, V> supplier = new InMemoryKeyValueStoreSupplier<>(name, null, null, new MockTime(), false, Collections.<String, String>emptyMap());
final StateStore stateStore = supplier.get();
stateStore.init(new MockProcessorContext(StateSerdes.withBuiltinTypes(name, keyType, valueType), new NoOpRecordCollector()), stateStore);
return (KeyValueStore<K, V>) stateStore;
}
use of org.apache.kafka.streams.processor.StateStore in project kafka by apache.
the class GlobalStateManagerImplTest method before.
@Before
public void before() throws IOException {
final Map<String, String> storeToTopic = new HashMap<>();
storeToTopic.put("t1-store", "t1");
storeToTopic.put("t2-store", "t2");
final Map<StateStore, ProcessorNode> storeToProcessorNode = new HashMap<>();
store1 = new NoOpReadOnlyStore<>("t1-store");
storeToProcessorNode.put(store1, new MockProcessorNode(-1));
store2 = new NoOpReadOnlyStore("t2-store");
storeToProcessorNode.put(store2, new MockProcessorNode(-1));
topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(), Collections.<String, SourceNode>emptyMap(), Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>emptyList(), storeToTopic, Arrays.<StateStore>asList(store1, store2));
context = new NoOpProcessorContext();
stateDirPath = TestUtils.tempDirectory().getPath();
stateDirectory = new StateDirectory("appId", stateDirPath, time);
consumer = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
stateManager = new GlobalStateManagerImpl(topology, consumer, stateDirectory);
checkpointFile = new File(stateManager.baseDir(), ProcessorStateManager.CHECKPOINT_FILE_NAME);
}
use of org.apache.kafka.streams.processor.StateStore in project kafka by apache.
the class GlobalStreamThreadTest method shouldCloseStateStoresOnClose.
@Test
public void shouldCloseStateStoresOnClose() throws Exception {
initializeConsumer();
globalStreamThread.start();
final StateStore globalStore = builder.globalStateStores().get("bar");
assertTrue(globalStore.isOpen());
globalStreamThread.close();
globalStreamThread.join();
assertFalse(globalStore.isOpen());
}
use of org.apache.kafka.streams.processor.StateStore in project kafka by apache.
the class GlobalStateManagerImpl method initialize.
@Override
public Set<String> initialize(final InternalProcessorContext processorContext) {
try {
if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) {
throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir));
}
} catch (IOException e) {
throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir));
}
try {
this.checkpointableOffsets.putAll(checkpoint.read());
} catch (IOException e) {
try {
stateDirectory.unlockGlobalState();
} catch (IOException e1) {
log.error("failed to unlock the global state directory", e);
}
throw new StreamsException("Failed to read checkpoints for global state stores", e);
}
final List<StateStore> stateStores = topology.globalStateStores();
for (final StateStore stateStore : stateStores) {
globalStoreNames.add(stateStore.name());
stateStore.init(processorContext, stateStore);
}
return Collections.unmodifiableSet(globalStoreNames);
}
Aggregations