use of org.apache.kafka.test.MockStateRestoreListener.RESTORE_BATCH in project kafka by apache.
the class StoreChangelogReaderTest method shouldRestoreToLimitInStandbyState.
@Test
public void shouldRestoreToLimitInStandbyState() {
final Map<TaskId, Task> mockTasks = mock(Map.class);
EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
final AtomicLong offset = new AtomicLong(7L);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(offset.get())));
}
};
final long now = time.milliseconds();
final Properties properties = new Properties();
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.setMainConsumer(consumer);
changelogReader.transitToUpdateStandby();
consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
changelogReader.register(tp, standbyStateManager);
assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
changelogReader.restore(mockTasks);
assertNull(callback.restoreTopicPartition);
assertNull(callback.storeNameCalledStates.get(RESTORE_START));
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
// null key should be ignored
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
changelogReader.restore(mockTasks);
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
assertNull(callback.storeNameCalledStates.get(RESTORE_END));
assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
offset.set(10L);
time.setCurrentTimeMs(now + 100L);
// should not try to read committed offsets if interval has not reached
changelogReader.restore(mockTasks);
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
time.setCurrentTimeMs(now + 101L);
// the first restore would only update the limit, same below
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
offset.set(15L);
// after we've updated once, the timer should be reset and we should not try again until next interval elapsed
time.setCurrentTimeMs(now + 201L);
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
// once we are in update active mode, we should not try to update limit offset
time.setCurrentTimeMs(now + 202L);
changelogReader.enforceRestoreActive();
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.transitToUpdateStandby();
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 12L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 13L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 14L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 15L, "key".getBytes(), "value".getBytes()));
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(9L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(1, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
}
Aggregations