Search in sources :

Example 1 with RESTORE_END

use of org.apache.kafka.test.MockStateRestoreListener.RESTORE_END in project kafka by apache.

the class StoreChangelogReaderTest method shouldRestoreToLimitInStandbyState.

@Test
public void shouldRestoreToLimitInStandbyState() {
    final Map<TaskId, Task> mockTasks = mock(Map.class);
    EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
    EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
    EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
    final AtomicLong offset = new AtomicLong(7L);
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
            return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(offset.get())));
        }
    };
    final long now = time.milliseconds();
    final Properties properties = new Properties();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
    final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
    changelogReader.setMainConsumer(consumer);
    changelogReader.transitToUpdateStandby();
    consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
    changelogReader.register(tp, standbyStateManager);
    assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    changelogReader.restore(mockTasks);
    assertNull(callback.restoreTopicPartition);
    assertNull(callback.storeNameCalledStates.get(RESTORE_START));
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
    // null key should be ignored
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
    changelogReader.restore(mockTasks);
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    assertNull(callback.storeNameCalledStates.get(RESTORE_END));
    assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
    offset.set(10L);
    time.setCurrentTimeMs(now + 100L);
    // should not try to read committed offsets if interval has not reached
    changelogReader.restore(mockTasks);
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    time.setCurrentTimeMs(now + 101L);
    // the first restore would only update the limit, same below
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    offset.set(15L);
    // after we've updated once, the timer should be reset and we should not try again until next interval elapsed
    time.setCurrentTimeMs(now + 201L);
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    // once we are in update active mode, we should not try to update limit offset
    time.setCurrentTimeMs(now + 202L);
    changelogReader.enforceRestoreActive();
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.transitToUpdateStandby();
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 12L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 13L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 14L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 15L, "key".getBytes(), "value".getBytes()));
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(9L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(1, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Mock(org.easymock.Mock) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) ACTIVE(org.apache.kafka.streams.processor.internals.Task.TaskType.ACTIVE) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) LogContext(org.apache.kafka.common.utils.LogContext) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) Parameterized(org.junit.runners.Parameterized) EasyMockSupport(org.easymock.EasyMockSupport) TopicPartition(org.apache.kafka.common.TopicPartition) RESTORE_END(org.apache.kafka.test.MockStateRestoreListener.RESTORE_END) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) Collectors(java.util.stream.Collectors) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) EasyMock.resetToDefault(org.easymock.EasyMock.resetToDefault) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) STANDBY_UPDATING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.STANDBY_UPDATING) Assert.assertFalse(org.junit.Assert.assertFalse) Matchers.equalTo(org.hamcrest.Matchers.equalTo) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockType(org.easymock.MockType) RESTORE_START(org.apache.kafka.test.MockStateRestoreListener.RESTORE_START) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskId(org.apache.kafka.streams.processor.TaskId) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Function(java.util.function.Function) Collections.singletonMap(java.util.Collections.singletonMap) EasyMock.replay(org.easymock.EasyMock.replay) EasyMockRule(org.easymock.EasyMockRule) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) STANDBY(org.apache.kafka.streams.processor.internals.Task.TaskType.STANDBY) RESTORE_BATCH(org.apache.kafka.test.MockStateRestoreListener.RESTORE_BATCH) ACTIVE_RESTORING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.ACTIVE_RESTORING) Before(org.junit.Before) EasyMock.anyObject(org.easymock.EasyMock.anyObject) StateStoreMetadata(org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) EasyMock.expectLastCall(org.easymock.EasyMock.expectLastCall) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Matchers.hasItem(org.hamcrest.Matchers.hasItem) Assert.assertNull(org.junit.Assert.assertNull) StateStore(org.apache.kafka.streams.processor.StateStore) EasyMock.anyLong(org.easymock.EasyMock.anyLong) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) EasyMock.verify(org.easymock.EasyMock.verify) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) Properties(java.util.Properties) AtomicLong(java.util.concurrent.atomic.AtomicLong) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

Duration (java.time.Duration)1 Collections (java.util.Collections)1 Collections.singletonMap (java.util.Collections.singletonMap)1 Map (java.util.Map)1 Properties (java.util.Properties)1 Set (java.util.Set)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 Function (java.util.function.Function)1 Collectors (java.util.stream.Collectors)1 ListOffsetsOptions (org.apache.kafka.clients.admin.ListOffsetsOptions)1 ListOffsetsResult (org.apache.kafka.clients.admin.ListOffsetsResult)1 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)1 OffsetSpec (org.apache.kafka.clients.admin.OffsetSpec)1 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)1 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)1 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)1 OffsetResetStrategy (org.apache.kafka.clients.consumer.OffsetResetStrategy)1 KafkaException (org.apache.kafka.common.KafkaException)1 PartitionInfo (org.apache.kafka.common.PartitionInfo)1