use of org.apache.kafka.streams.state.internals.OffsetCheckpoint in project kafka by apache.
the class TaskManagerTest method writeCheckpointFile.
private void writeCheckpointFile(final TaskId task, final Map<TopicPartition, Long> offsets) throws Exception {
final File checkpointFile = getCheckpointFile(task);
assertThat(checkpointFile.createNewFile(), is(true));
new OffsetCheckpoint(checkpointFile).write(offsets);
expect(stateDirectory.checkpointFileFor(task)).andReturn(checkpointFile);
}
use of org.apache.kafka.streams.state.internals.OffsetCheckpoint in project kafka by apache.
the class StreamThreadTest method shouldUpdateStandbyTask.
@SuppressWarnings("unchecked")
@Test
public void shouldUpdateStandbyTask() throws Exception {
final String storeName1 = "count-one";
final String storeName2 = "table-two";
final String changelogName1 = APPLICATION_ID + "-" + storeName1 + "-changelog";
final String changelogName2 = APPLICATION_ID + "-" + storeName2 + "-changelog";
final TopicPartition partition1 = new TopicPartition(changelogName1, 1);
final TopicPartition partition2 = new TopicPartition(changelogName2, 1);
internalStreamsBuilder.stream(Collections.singleton(topic1), consumed).groupByKey().count(Materialized.as(storeName1));
final MaterializedInternal<Object, Object, KeyValueStore<Bytes, byte[]>> materialized = new MaterializedInternal<>(Materialized.as(storeName2), internalStreamsBuilder, "");
internalStreamsBuilder.table(topic2, new ConsumedInternal<>(), materialized);
internalStreamsBuilder.buildAndOptimizeTopology();
final StreamThread thread = createStreamThread(CLIENT_ID, config, false);
final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer;
restoreConsumer.updatePartitions(changelogName1, Collections.singletonList(new PartitionInfo(changelogName1, 1, null, new Node[0], new Node[0])));
restoreConsumer.updateEndOffsets(Collections.singletonMap(partition1, 10L));
restoreConsumer.updateBeginningOffsets(Collections.singletonMap(partition1, 0L));
restoreConsumer.updateEndOffsets(Collections.singletonMap(partition2, 10L));
restoreConsumer.updateBeginningOffsets(Collections.singletonMap(partition2, 0L));
final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateDirectory.getOrCreateDirectoryForTask(task3), CHECKPOINT_FILE_NAME));
checkpoint.write(Collections.singletonMap(partition2, 5L));
thread.setState(StreamThread.State.STARTING);
thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>();
// assign single partition
standbyTasks.put(task1, Collections.singleton(t1p1));
standbyTasks.put(task3, Collections.singleton(t2p1));
thread.taskManager().handleAssignment(emptyMap(), standbyTasks);
thread.taskManager().tryToCompleteRestoration(mockTime.milliseconds(), null);
thread.rebalanceListener().onPartitionsAssigned(Collections.emptyList());
thread.runOnce();
final StandbyTask standbyTask1 = standbyTask(thread.taskManager(), t1p1);
final StandbyTask standbyTask2 = standbyTask(thread.taskManager(), t2p1);
assertEquals(task1, standbyTask1.id());
assertEquals(task3, standbyTask2.id());
final KeyValueStore<Object, Long> store1 = (KeyValueStore<Object, Long>) standbyTask1.getStore(storeName1);
final KeyValueStore<Object, Long> store2 = (KeyValueStore<Object, Long>) standbyTask2.getStore(storeName2);
assertEquals(0L, store1.approximateNumEntries());
assertEquals(0L, store2.approximateNumEntries());
// let the store1 be restored from 0 to 10; store2 be restored from 5 (checkpointed) to 10
for (long i = 0L; i < 10L; i++) {
restoreConsumer.addRecord(new ConsumerRecord<>(changelogName1, 1, i, ("K" + i).getBytes(), ("V" + i).getBytes()));
restoreConsumer.addRecord(new ConsumerRecord<>(changelogName2, 1, i, ("K" + i).getBytes(), ("V" + i).getBytes()));
}
thread.runOnce();
assertEquals(10L, store1.approximateNumEntries());
assertEquals(4L, store2.approximateNumEntries());
thread.taskManager().shutdown(true);
}
Aggregations