Search in sources :

Example 1 with Mock

use of org.easymock.Mock in project kafka by apache.

the class StoreChangelogReaderTest method shouldRestoreToLimitInStandbyState.

@Test
public void shouldRestoreToLimitInStandbyState() {
    final Map<TaskId, Task> mockTasks = mock(Map.class);
    EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
    EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
    EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
    final AtomicLong offset = new AtomicLong(7L);
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
            return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(offset.get())));
        }
    };
    final long now = time.milliseconds();
    final Properties properties = new Properties();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
    final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
    changelogReader.setMainConsumer(consumer);
    changelogReader.transitToUpdateStandby();
    consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
    changelogReader.register(tp, standbyStateManager);
    assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    changelogReader.restore(mockTasks);
    assertNull(callback.restoreTopicPartition);
    assertNull(callback.storeNameCalledStates.get(RESTORE_START));
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
    // null key should be ignored
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
    changelogReader.restore(mockTasks);
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    assertNull(callback.storeNameCalledStates.get(RESTORE_END));
    assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
    offset.set(10L);
    time.setCurrentTimeMs(now + 100L);
    // should not try to read committed offsets if interval has not reached
    changelogReader.restore(mockTasks);
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    time.setCurrentTimeMs(now + 101L);
    // the first restore would only update the limit, same below
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    offset.set(15L);
    // after we've updated once, the timer should be reset and we should not try again until next interval elapsed
    time.setCurrentTimeMs(now + 201L);
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    // once we are in update active mode, we should not try to update limit offset
    time.setCurrentTimeMs(now + 202L);
    changelogReader.enforceRestoreActive();
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.transitToUpdateStandby();
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 12L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 13L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 14L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 15L, "key".getBytes(), "value".getBytes()));
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(9L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(1, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Mock(org.easymock.Mock) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) ACTIVE(org.apache.kafka.streams.processor.internals.Task.TaskType.ACTIVE) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) LogContext(org.apache.kafka.common.utils.LogContext) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) Parameterized(org.junit.runners.Parameterized) EasyMockSupport(org.easymock.EasyMockSupport) TopicPartition(org.apache.kafka.common.TopicPartition) RESTORE_END(org.apache.kafka.test.MockStateRestoreListener.RESTORE_END) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) Collectors(java.util.stream.Collectors) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) EasyMock.resetToDefault(org.easymock.EasyMock.resetToDefault) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) STANDBY_UPDATING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.STANDBY_UPDATING) Assert.assertFalse(org.junit.Assert.assertFalse) Matchers.equalTo(org.hamcrest.Matchers.equalTo) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockType(org.easymock.MockType) RESTORE_START(org.apache.kafka.test.MockStateRestoreListener.RESTORE_START) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskId(org.apache.kafka.streams.processor.TaskId) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Function(java.util.function.Function) Collections.singletonMap(java.util.Collections.singletonMap) EasyMock.replay(org.easymock.EasyMock.replay) EasyMockRule(org.easymock.EasyMockRule) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) STANDBY(org.apache.kafka.streams.processor.internals.Task.TaskType.STANDBY) RESTORE_BATCH(org.apache.kafka.test.MockStateRestoreListener.RESTORE_BATCH) ACTIVE_RESTORING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.ACTIVE_RESTORING) Before(org.junit.Before) EasyMock.anyObject(org.easymock.EasyMock.anyObject) StateStoreMetadata(org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) EasyMock.expectLastCall(org.easymock.EasyMock.expectLastCall) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Matchers.hasItem(org.hamcrest.Matchers.hasItem) Assert.assertNull(org.junit.Assert.assertNull) StateStore(org.apache.kafka.streams.processor.StateStore) EasyMock.anyLong(org.easymock.EasyMock.anyLong) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) EasyMock.verify(org.easymock.EasyMock.verify) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) Properties(java.util.Properties) AtomicLong(java.util.concurrent.atomic.AtomicLong) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 2 with Mock

use of org.easymock.Mock in project kafka by apache.

the class StoreChangelogReaderTest method shouldRequestCommittedOffsetsAndHandleTimeoutException.

@Test
public void shouldRequestCommittedOffsetsAndHandleTimeoutException() {
    final TaskId taskId = new TaskId(0, 0);
    final Task mockTask = mock(Task.class);
    if (type == ACTIVE) {
        mockTask.clearTaskTimeout();
    }
    mockTask.maybeInitTaskTimeoutOrThrow(anyLong(), anyObject());
    EasyMock.expectLastCall();
    EasyMock.expect(stateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
    EasyMock.expect(storeMetadata.offset()).andReturn(5L).anyTimes();
    EasyMock.expect(stateManager.changelogOffsets()).andReturn(singletonMap(tp, 5L));
    EasyMock.expect(stateManager.taskId()).andReturn(taskId).anyTimes();
    EasyMock.replay(mockTask, stateManager, storeMetadata, store);
    final AtomicBoolean functionCalled = new AtomicBoolean(false);
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
            if (functionCalled.get()) {
                return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(10L)));
            } else {
                functionCalled.set(true);
                throw new TimeoutException("KABOOM!");
            }
        }
    };
    adminClient.updateEndOffsets(Collections.singletonMap(tp, 20L));
    final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
    changelogReader.setMainConsumer(consumer);
    changelogReader.register(tp, stateManager);
    changelogReader.restore(Collections.singletonMap(taskId, mockTask));
    assertEquals(type == ACTIVE ? StoreChangelogReader.ChangelogState.REGISTERED : StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    if (type == ACTIVE) {
        assertNull(changelogReader.changelogMetadata(tp).endOffset());
    } else {
        assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
    }
    assertTrue(functionCalled.get());
    verify(mockTask);
    resetToDefault(mockTask);
    if (type == ACTIVE) {
        mockTask.clearTaskTimeout();
        mockTask.clearTaskTimeout();
        expectLastCall();
    }
    replay(mockTask);
    changelogReader.restore(Collections.singletonMap(taskId, mockTask));
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(type == ACTIVE ? 10L : 0L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(6L, consumer.position(tp));
    verify(mockTask);
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Mock(org.easymock.Mock) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) ACTIVE(org.apache.kafka.streams.processor.internals.Task.TaskType.ACTIVE) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) LogContext(org.apache.kafka.common.utils.LogContext) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) Parameterized(org.junit.runners.Parameterized) EasyMockSupport(org.easymock.EasyMockSupport) TopicPartition(org.apache.kafka.common.TopicPartition) RESTORE_END(org.apache.kafka.test.MockStateRestoreListener.RESTORE_END) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) Collectors(java.util.stream.Collectors) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) EasyMock.resetToDefault(org.easymock.EasyMock.resetToDefault) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) STANDBY_UPDATING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.STANDBY_UPDATING) Assert.assertFalse(org.junit.Assert.assertFalse) Matchers.equalTo(org.hamcrest.Matchers.equalTo) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockType(org.easymock.MockType) RESTORE_START(org.apache.kafka.test.MockStateRestoreListener.RESTORE_START) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskId(org.apache.kafka.streams.processor.TaskId) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Function(java.util.function.Function) Collections.singletonMap(java.util.Collections.singletonMap) EasyMock.replay(org.easymock.EasyMock.replay) EasyMockRule(org.easymock.EasyMockRule) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) STANDBY(org.apache.kafka.streams.processor.internals.Task.TaskType.STANDBY) RESTORE_BATCH(org.apache.kafka.test.MockStateRestoreListener.RESTORE_BATCH) ACTIVE_RESTORING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.ACTIVE_RESTORING) Before(org.junit.Before) EasyMock.anyObject(org.easymock.EasyMock.anyObject) StateStoreMetadata(org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) EasyMock.expectLastCall(org.easymock.EasyMock.expectLastCall) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Matchers.hasItem(org.hamcrest.Matchers.hasItem) Assert.assertNull(org.junit.Assert.assertNull) StateStore(org.apache.kafka.streams.processor.StateStore) EasyMock.anyLong(org.easymock.EasyMock.anyLong) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) EasyMock.verify(org.easymock.EasyMock.verify) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Aggregations

Duration (java.time.Duration)2 Collections (java.util.Collections)2 Collections.singletonMap (java.util.Collections.singletonMap)2 Map (java.util.Map)2 Properties (java.util.Properties)2 Set (java.util.Set)2 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 Function (java.util.function.Function)2 Collectors (java.util.stream.Collectors)2 ListOffsetsOptions (org.apache.kafka.clients.admin.ListOffsetsOptions)2 ListOffsetsResult (org.apache.kafka.clients.admin.ListOffsetsResult)2 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)2 OffsetSpec (org.apache.kafka.clients.admin.OffsetSpec)2 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)2 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)2 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)2 OffsetResetStrategy (org.apache.kafka.clients.consumer.OffsetResetStrategy)2 KafkaException (org.apache.kafka.common.KafkaException)2 PartitionInfo (org.apache.kafka.common.PartitionInfo)2