use of org.apache.kafka.streams.state.internals.OffsetCheckpoint in project kafka by apache.
the class GlobalStateManagerImplTest method shouldRestoreRecordsFromCheckpointToHighwatermark.
@Test
public void shouldRestoreRecordsFromCheckpointToHighwatermark() throws Exception {
initializeConsumer(5, 6, t1);
final OffsetCheckpoint offsetCheckpoint = new OffsetCheckpoint(new File(stateManager.baseDir(), ProcessorStateManager.CHECKPOINT_FILE_NAME));
offsetCheckpoint.write(Collections.singletonMap(t1, 6L));
stateManager.initialize(context);
final TheStateRestoreCallback stateRestoreCallback = new TheStateRestoreCallback();
stateManager.register(store1, false, stateRestoreCallback);
assertEquals(5, stateRestoreCallback.restored.size());
}
use of org.apache.kafka.streams.state.internals.OffsetCheckpoint in project kafka by apache.
the class StreamTaskTest method shouldCheckpointOffsetsOnCommit.
@SuppressWarnings("unchecked")
@Test
public void shouldCheckpointOffsetsOnCommit() throws Exception {
final String storeName = "test";
final String changelogTopic = ProcessorStateManager.storeChangelogTopic("appId", storeName);
final InMemoryKeyValueStore inMemoryStore = new InMemoryKeyValueStore(storeName, null, null) {
@Override
public void init(final ProcessorContext context, final StateStore root) {
context.register(root, true, null);
}
@Override
public boolean persistent() {
return true;
}
};
final ProcessorTopology topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(), Collections.<String, SourceNode>emptyMap(), Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>singletonList(inMemoryStore), Collections.singletonMap(storeName, changelogTopic), Collections.<StateStore>emptyList());
final TopicPartition partition = new TopicPartition(changelogTopic, 0);
final NoOpRecordCollector recordCollector = new NoOpRecordCollector() {
@Override
public Map<TopicPartition, Long> offsets() {
return Collections.singletonMap(partition, 543L);
}
};
restoreStateConsumer.updatePartitions(changelogTopic, Collections.singletonList(new PartitionInfo(changelogTopic, 0, null, new Node[0], new Node[0])));
restoreStateConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L));
restoreStateConsumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L));
final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics());
final TaskId taskId = new TaskId(0, 0);
final MockTime time = new MockTime();
final StreamsConfig config = createConfig(baseDir);
final StreamTask streamTask = new StreamTask(taskId, "appId", partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, new ThreadCache("testCache", 0, streamsMetrics), time, recordCollector);
time.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG));
streamTask.commit();
final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateDirectory.directoryForTask(taskId), ProcessorStateManager.CHECKPOINT_FILE_NAME));
assertThat(checkpoint.read(), equalTo(Collections.singletonMap(partition, 544L)));
}
use of org.apache.kafka.streams.state.internals.OffsetCheckpoint in project kafka by apache.
the class StandbyTaskTest method testUpdate.
@SuppressWarnings("unchecked")
@Test
public void testUpdate() throws Exception {
StreamsConfig config = createConfig(baseDir);
StandbyTask task = new StandbyTask(taskId, applicationId, topicPartitions, topology, consumer, changelogReader, config, null, stateDirectory);
restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
for (ConsumerRecord<Integer, Integer> record : Arrays.asList(new ConsumerRecord<>(partition2.topic(), partition2.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, 100), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, 100), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 3, 100))) {
restoreStateConsumer.bufferRecord(record);
}
for (Map.Entry<TopicPartition, Long> entry : task.checkpointedOffsets().entrySet()) {
TopicPartition partition = entry.getKey();
long offset = entry.getValue();
if (offset >= 0) {
restoreStateConsumer.seek(partition, offset);
} else {
restoreStateConsumer.seekToBeginning(singleton(partition));
}
}
task.update(partition2, restoreStateConsumer.poll(100).records(partition2));
StandbyContextImpl context = (StandbyContextImpl) task.context();
MockStateStoreSupplier.MockStateStore store1 = (MockStateStoreSupplier.MockStateStore) context.getStateMgr().getStore(storeName1);
MockStateStoreSupplier.MockStateStore store2 = (MockStateStoreSupplier.MockStateStore) context.getStateMgr().getStore(storeName2);
assertEquals(Collections.emptyList(), store1.keys);
assertEquals(Utils.mkList(1, 2, 3), store2.keys);
task.closeStateManager(true);
File taskDir = stateDirectory.directoryForTask(taskId);
OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME));
Map<TopicPartition, Long> offsets = checkpoint.read();
assertEquals(1, offsets.size());
assertEquals(new Long(30L + 1L), offsets.get(partition2));
}
use of org.apache.kafka.streams.state.internals.OffsetCheckpoint in project kafka by apache.
the class StandbyTaskTest method testUpdateKTable.
@SuppressWarnings("unchecked")
@Test
public void testUpdateKTable() throws Exception {
consumer.assign(Utils.mkList(ktable));
Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(0L));
consumer.commitSync(committedOffsets);
restoreStateConsumer.updatePartitions("ktable1", Utils.mkList(new PartitionInfo("ktable1", 0, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 1, Node.noNode(), new Node[0], new Node[0]), new PartitionInfo("ktable1", 2, Node.noNode(), new Node[0], new Node[0])));
StreamsConfig config = createConfig(baseDir);
StandbyTask task = new StandbyTask(taskId, applicationId, ktablePartitions, ktableTopology, consumer, changelogReader, config, null, stateDirectory);
restoreStateConsumer.assign(new ArrayList<>(task.changeLogPartitions()));
for (ConsumerRecord<Integer, Integer> record : Arrays.asList(new ConsumerRecord<>(ktable.topic(), ktable.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 3, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 40, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 4, 100), new ConsumerRecord<>(ktable.topic(), ktable.partition(), 50, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 5, 100))) {
restoreStateConsumer.bufferRecord(record);
}
for (Map.Entry<TopicPartition, Long> entry : task.checkpointedOffsets().entrySet()) {
TopicPartition partition = entry.getKey();
long offset = entry.getValue();
if (offset >= 0) {
restoreStateConsumer.seek(partition, offset);
} else {
restoreStateConsumer.seekToBeginning(singleton(partition));
}
}
// The commit offset is at 0L. Records should not be processed
List<ConsumerRecord<byte[], byte[]>> remaining = task.update(ktable, restoreStateConsumer.poll(100).records(ktable));
assertEquals(5, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(10L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// The commit offset has not reached, yet.
remaining = task.update(ktable, remaining);
assertEquals(5, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(11L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// one record should be processed.
remaining = task.update(ktable, remaining);
assertEquals(4, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(45L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// The commit offset is now 45. All record except for the last one should be processed.
remaining = task.update(ktable, remaining);
assertEquals(1, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(50L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// The commit offset is now 50. Still the last record remains.
remaining = task.update(ktable, remaining);
assertEquals(1, remaining.size());
committedOffsets.put(new TopicPartition(ktable.topic(), ktable.partition()), new OffsetAndMetadata(60L));
consumer.commitSync(committedOffsets);
// update offset limits
task.commit();
// The commit offset is now 60. No record should be left.
remaining = task.update(ktable, remaining);
assertNull(remaining);
task.closeStateManager(true);
File taskDir = stateDirectory.directoryForTask(taskId);
OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME));
Map<TopicPartition, Long> offsets = checkpoint.read();
assertEquals(1, offsets.size());
assertEquals(new Long(51L), offsets.get(ktable));
}
use of org.apache.kafka.streams.state.internals.OffsetCheckpoint in project apache-kafka-on-k8s by banzaicloud.
the class GlobalStateManagerImplTest method writeCheckpoint.
private Map<TopicPartition, Long> writeCheckpoint() throws IOException {
final OffsetCheckpoint checkpoint = new OffsetCheckpoint(checkpointFile);
final Map<TopicPartition, Long> expected = Collections.singletonMap(t1, 1L);
checkpoint.write(expected);
return expected;
}
Aggregations