use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StoreChangelogReaderTest method shouldRestoreToLimitInStandbyState.
@Test
public void shouldRestoreToLimitInStandbyState() {
final Map<TaskId, Task> mockTasks = mock(Map.class);
EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
final AtomicLong offset = new AtomicLong(7L);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(offset.get())));
}
};
final long now = time.milliseconds();
final Properties properties = new Properties();
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.setMainConsumer(consumer);
changelogReader.transitToUpdateStandby();
consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
changelogReader.register(tp, standbyStateManager);
assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
changelogReader.restore(mockTasks);
assertNull(callback.restoreTopicPartition);
assertNull(callback.storeNameCalledStates.get(RESTORE_START));
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
// null key should be ignored
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
changelogReader.restore(mockTasks);
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
assertNull(callback.storeNameCalledStates.get(RESTORE_END));
assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
offset.set(10L);
time.setCurrentTimeMs(now + 100L);
// should not try to read committed offsets if interval has not reached
changelogReader.restore(mockTasks);
assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
time.setCurrentTimeMs(now + 101L);
// the first restore would only update the limit, same below
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
offset.set(15L);
// after we've updated once, the timer should be reset and we should not try again until next interval elapsed
time.setCurrentTimeMs(now + 201L);
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
// once we are in update active mode, we should not try to update limit offset
time.setCurrentTimeMs(now + 202L);
changelogReader.enforceRestoreActive();
changelogReader.restore(mockTasks);
assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.transitToUpdateStandby();
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 12L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 13L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 14L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 15L, "key".getBytes(), "value".getBytes()));
changelogReader.restore(mockTasks);
assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
assertEquals(9L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(1, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StoreChangelogReaderTest method shouldThrowIfUnsubscribeFail.
@Test
public void shouldThrowIfUnsubscribeFail() {
EasyMock.replay(stateManager, storeMetadata, store);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public void unsubscribe() {
throw kaboom;
}
};
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
final StreamsException thrown = assertThrows(StreamsException.class, changelogReader::clear);
assertEquals(kaboom, thrown.getCause());
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StoreChangelogReaderTest method shouldNotUpdateLimitForNonSourceStandbyChangelog.
@Test
public void shouldNotUpdateLimitForNonSourceStandbyChangelog() {
final Map<TaskId, Task> mockTasks = mock(Map.class);
EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(false).anyTimes();
EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
throw new AssertionError("Should not try to fetch committed offsets");
}
};
final Properties properties = new Properties();
properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.setMainConsumer(consumer);
changelogReader.transitToUpdateStandby();
consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
changelogReader.register(tp, standbyStateManager);
assertNull(changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
// if there's no records fetchable, nothings gets restored
changelogReader.restore(mockTasks);
assertNull(callback.restoreTopicPartition);
assertNull(callback.storeNameCalledStates.get(RESTORE_START));
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertNull(changelogReader.changelogMetadata(tp).endOffset());
assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
// null key should be ignored
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
// we should be able to restore to the log end offsets since there's no limit
changelogReader.restore(mockTasks);
assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
assertNull(changelogReader.changelogMetadata(tp).endOffset());
assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
assertNull(callback.storeNameCalledStates.get(RESTORE_END));
assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StoreChangelogReaderTest method shouldThrowIfCommittedOffsetsFail.
@Test
public void shouldThrowIfCommittedOffsetsFail() {
final TaskId taskId = new TaskId(0, 0);
EasyMock.expect(stateManager.taskId()).andReturn(taskId);
EasyMock.expect(stateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
EasyMock.expect(storeMetadata.offset()).andReturn(10L).anyTimes();
EasyMock.replay(stateManager, storeMetadata, store);
final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {
@Override
public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
throw kaboom;
}
};
adminClient.updateEndOffsets(Collections.singletonMap(tp, 10L));
final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
changelogReader.setMainConsumer(consumer);
changelogReader.register(tp, stateManager);
final StreamsException thrown = assertThrows(StreamsException.class, () -> changelogReader.restore(Collections.singletonMap(taskId, mock(Task.class))));
assertEquals(kaboom, thrown.getCause());
}
use of org.apache.kafka.clients.consumer.MockConsumer in project kafka by apache.
the class StreamThreadTest method shouldNotCloseTaskProducerWhenSuspending.
@Test
public void shouldNotCloseTaskProducerWhenSuspending() {
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
internalTopologyBuilder.addSource(null, "name", null, null, null, topic1);
internalTopologyBuilder.addSink("out", "output", null, null, null, "name");
thread.setState(StreamThread.State.STARTING);
thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final List<TopicPartition> assignedPartitions = new ArrayList<>();
// assign single partition
assignedPartitions.add(t1p1);
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(assignedPartitions);
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
assertThat(thread.activeTasks().size(), equalTo(1));
// need to process a record to enable committing
addRecord(mockConsumer, 0L);
thread.runOnce();
thread.rebalanceListener().onPartitionsRevoked(assignedPartitions);
assertTrue(clientSupplier.producers.get(0).transactionCommitted());
assertFalse(clientSupplier.producers.get(0).closed());
assertEquals(1, thread.activeTasks().size());
}
Aggregations