Search in sources :

Example 51 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamThreadTest method shouldInjectProducerPerThreadUsingClientSupplierOnCreateIfEosV2Enabled.

@Test
public void shouldInjectProducerPerThreadUsingClientSupplierOnCreateIfEosV2Enabled() {
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    final Properties props = configProps(true);
    final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(props), true);
    thread.setState(StreamThread.State.STARTING);
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptyList());
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    assignedPartitions.add(t1p2);
    activeTasks.put(task1, Collections.singleton(t1p1));
    activeTasks.put(task2, Collections.singleton(t1p2));
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
    mockConsumer.assign(assignedPartitions);
    final Map<TopicPartition, Long> beginOffsets = new HashMap<>();
    beginOffsets.put(t1p1, 0L);
    beginOffsets.put(t1p2, 0L);
    mockConsumer.updateBeginningOffsets(beginOffsets);
    thread.rebalanceListener().onPartitionsAssigned(new HashSet<>(assignedPartitions));
    thread.runOnce();
    assertThat(clientSupplier.producers.size(), is(1));
    assertSame(clientSupplier.consumer, thread.mainConsumer());
    assertSame(clientSupplier.restoreConsumer, thread.restoreConsumer());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Properties(java.util.Properties) TopicPartition(org.apache.kafka.common.TopicPartition) AtomicLong(java.util.concurrent.atomic.AtomicLong) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 52 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamThreadTest method shouldOnlyCompleteShutdownAfterRebalanceNotInProgress.

@Test
public void shouldOnlyCompleteShutdownAfterRebalanceNotInProgress() throws InterruptedException {
    internalTopologyBuilder.addSource(null, "source1", null, null, null, topic1);
    final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
    thread.start();
    TestUtils.waitForCondition(() -> thread.state() == StreamThread.State.STARTING, 10 * 1000, "Thread never started.");
    thread.rebalanceListener().onPartitionsRevoked(Collections.emptyList());
    thread.taskManager().handleRebalanceStart(Collections.singleton(topic1));
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    assignedPartitions.add(t1p2);
    activeTasks.put(task1, Collections.singleton(t1p1));
    activeTasks.put(task2, Collections.singleton(t1p2));
    thread.taskManager().handleAssignment(activeTasks, emptyMap());
    thread.shutdown();
    // even if thread is no longer running, it should still be polling
    // as long as the rebalance is still ongoing
    assertFalse(thread.isRunning());
    Thread.sleep(1000);
    assertEquals(Utils.mkSet(task1, task2), thread.taskManager().activeTaskIds());
    assertEquals(StreamThread.State.PENDING_SHUTDOWN, thread.state());
    thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
    TestUtils.waitForCondition(() -> thread.state() == StreamThread.State.DEAD, 10 * 1000, "Thread never shut down.");
    assertEquals(Collections.emptySet(), thread.taskManager().activeTaskIds());
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 53 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount.

@Test
public void shouldThrowTaskAssignmentExceptionWhenUnableToResolvePartitionCount() {
    builder = new CorruptedInternalTopologyBuilder();
    topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(configProps()));
    final InternalStreamsBuilder streamsBuilder = new InternalStreamsBuilder(builder);
    final KStream<String, String> inputTopic = streamsBuilder.stream(singleton("topic1"), new ConsumedInternal<>());
    final KTable<String, String> inputTable = streamsBuilder.table("topic2", new ConsumedInternal<>(), new MaterializedInternal<>(Materialized.as("store")));
    inputTopic.groupBy((k, v) -> k, Grouped.with("GroupName", Serdes.String(), Serdes.String())).windowedBy(TimeWindows.of(Duration.ofMinutes(10))).aggregate(() -> "", (k, v, a) -> a + k).leftJoin(inputTable, v -> v, (x, y) -> x + y);
    streamsBuilder.buildAndOptimizeTopology();
    configureDefault();
    subscriptions.put("consumer", new Subscription(singletonList("topic"), defaultSubscriptionInfo.encode()));
    final Map<String, Assignment> assignments = partitionAssignor.assign(metadata, new GroupSubscription(subscriptions)).groupAssignment();
    assertThat(AssignmentInfo.decode(assignments.get("consumer").userData()).errCode(), equalTo(AssignorError.ASSIGNMENT_ERROR.code()));
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) ConsumedInternal(org.apache.kafka.streams.kstream.internals.ConsumedInternal) KafkaException(org.apache.kafka.common.KafkaException) Collections.singletonList(java.util.Collections.singletonList) AdminClient(org.apache.kafka.clients.admin.AdminClient) Cluster(org.apache.kafka.common.Cluster) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) MockApiProcessorSupplier(org.apache.kafka.test.MockApiProcessorSupplier) Arrays.asList(java.util.Arrays.asList) Duration(java.time.Duration) Map(java.util.Map) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) TASK_0_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_0) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) TASK_0_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_1) Set(java.util.Set) TASK_0_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_2) TASK_0_3(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_3) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) EMPTY_TASKS(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.EMPTY_TASKS) RebalanceProtocol(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Assert.assertFalse(org.junit.Assert.assertFalse) Node(org.apache.kafka.common.Node) Matchers.is(org.hamcrest.Matchers.is) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) HostInfo(org.apache.kafka.streams.state.HostInfo) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) RunWith(org.junit.runner.RunWith) EasyMock.mock(org.easymock.EasyMock.mock) ArrayList(java.util.ArrayList) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) StickyTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Capture(org.easymock.Capture) KTable(org.apache.kafka.streams.kstream.KTable) KeyValueMapper(org.apache.kafka.streams.kstream.KeyValueMapper) Properties(java.util.Properties) Utils.mkSortedSet(org.apache.kafka.common.utils.Utils.mkSortedSet) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Grouped(org.apache.kafka.streams.kstream.Grouped) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) AssignorError(org.apache.kafka.streams.processor.internals.assignment.AssignorError) InternalConfig(org.apache.kafka.streams.StreamsConfig.InternalConfig) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) Subtopology(org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology) Assert.assertEquals(org.junit.Assert.assertEquals) FallbackPriorTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.FallbackPriorTaskAssignor) SortedSet(java.util.SortedSet) InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) ByteBuffer(java.nio.ByteBuffer) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Collections.singleton(java.util.Collections.singleton) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) TopologyWrapper(org.apache.kafka.streams.TopologyWrapper) Serdes(org.apache.kafka.common.serialization.Serdes) Assert.fail(org.junit.Assert.fail) Parameterized(org.junit.runners.Parameterized) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.emptyList(java.util.Collections.emptyList) LATEST_SUPPORTED_VERSION(org.apache.kafka.streams.processor.internals.assignment.StreamsAssignmentProtocolVersions.LATEST_SUPPORTED_VERSION) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) TASK_2_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_2_0) UUID(java.util.UUID) TASK_2_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_2_1) MaterializedInternal(org.apache.kafka.streams.kstream.internals.MaterializedInternal) AssignmentTestUtils.getInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.getInfo) Collectors(java.util.stream.Collectors) AssignorConfiguration(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) List(java.util.List) ValueJoiner(org.apache.kafka.streams.kstream.ValueJoiner) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Materialized(org.apache.kafka.streams.kstream.Materialized) StreamsPartitionAssignor.assignTasksToThreads(org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor.assignTasksToThreads) Duration.ofMillis(java.time.Duration.ofMillis) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) TaskId(org.apache.kafka.streams.processor.TaskId) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Assert.assertThrows(org.junit.Assert.assertThrows) CoreMatchers.not(org.hamcrest.CoreMatchers.not) EMPTY_CHANGELOG_END_OFFSETS(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.EMPTY_CHANGELOG_END_OFFSETS) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) AssignmentTestUtils.createMockAdminClientForAssignor(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.createMockAdminClientForAssignor) HashSet(java.util.HashSet) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Admin(org.apache.kafka.clients.admin.Admin) Collections.singletonMap(java.util.Collections.singletonMap) UUID_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.UUID_1) HighAvailabilityTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.HighAvailabilityTaskAssignor) UUID_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.UUID_2) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Matchers.empty(org.hamcrest.Matchers.empty) Collections.emptySet(java.util.Collections.emptySet) TASK_1_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_1) TASK_1_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_0) TASK_1_3(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_3) TASK_1_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_2) EasyMock.expect(org.easymock.EasyMock.expect) ConfigException(org.apache.kafka.common.config.ConfigException) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ClientState(org.apache.kafka.streams.processor.internals.assignment.ClientState) TaskAssignor(org.apache.kafka.streams.processor.internals.assignment.TaskAssignor) Matchers.anEmptyMap(org.hamcrest.Matchers.anEmptyMap) Collections(java.util.Collections) InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 54 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StoreChangelogReaderTest method shouldRestoreToLimitInStandbyState.

@Test
public void shouldRestoreToLimitInStandbyState() {
    final Map<TaskId, Task> mockTasks = mock(Map.class);
    EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
    EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
    EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
    final AtomicLong offset = new AtomicLong(7L);
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
            return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(offset.get())));
        }
    };
    final long now = time.milliseconds();
    final Properties properties = new Properties();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
    final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
    changelogReader.setMainConsumer(consumer);
    changelogReader.transitToUpdateStandby();
    consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
    changelogReader.register(tp, standbyStateManager);
    assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    changelogReader.restore(mockTasks);
    assertNull(callback.restoreTopicPartition);
    assertNull(callback.storeNameCalledStates.get(RESTORE_START));
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
    // null key should be ignored
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
    changelogReader.restore(mockTasks);
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    assertNull(callback.storeNameCalledStates.get(RESTORE_END));
    assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
    offset.set(10L);
    time.setCurrentTimeMs(now + 100L);
    // should not try to read committed offsets if interval has not reached
    changelogReader.restore(mockTasks);
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    time.setCurrentTimeMs(now + 101L);
    // the first restore would only update the limit, same below
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    offset.set(15L);
    // after we've updated once, the timer should be reset and we should not try again until next interval elapsed
    time.setCurrentTimeMs(now + 201L);
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    // once we are in update active mode, we should not try to update limit offset
    time.setCurrentTimeMs(now + 202L);
    changelogReader.enforceRestoreActive();
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.transitToUpdateStandby();
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 12L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 13L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 14L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 15L, "key".getBytes(), "value".getBytes()));
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(9L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(1, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Mock(org.easymock.Mock) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) ACTIVE(org.apache.kafka.streams.processor.internals.Task.TaskType.ACTIVE) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) LogContext(org.apache.kafka.common.utils.LogContext) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) Parameterized(org.junit.runners.Parameterized) EasyMockSupport(org.easymock.EasyMockSupport) TopicPartition(org.apache.kafka.common.TopicPartition) RESTORE_END(org.apache.kafka.test.MockStateRestoreListener.RESTORE_END) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) Collectors(java.util.stream.Collectors) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) EasyMock.resetToDefault(org.easymock.EasyMock.resetToDefault) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) STANDBY_UPDATING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.STANDBY_UPDATING) Assert.assertFalse(org.junit.Assert.assertFalse) Matchers.equalTo(org.hamcrest.Matchers.equalTo) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockType(org.easymock.MockType) RESTORE_START(org.apache.kafka.test.MockStateRestoreListener.RESTORE_START) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskId(org.apache.kafka.streams.processor.TaskId) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Function(java.util.function.Function) Collections.singletonMap(java.util.Collections.singletonMap) EasyMock.replay(org.easymock.EasyMock.replay) EasyMockRule(org.easymock.EasyMockRule) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) STANDBY(org.apache.kafka.streams.processor.internals.Task.TaskType.STANDBY) RESTORE_BATCH(org.apache.kafka.test.MockStateRestoreListener.RESTORE_BATCH) ACTIVE_RESTORING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.ACTIVE_RESTORING) Before(org.junit.Before) EasyMock.anyObject(org.easymock.EasyMock.anyObject) StateStoreMetadata(org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) EasyMock.expectLastCall(org.easymock.EasyMock.expectLastCall) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Matchers.hasItem(org.hamcrest.Matchers.hasItem) Assert.assertNull(org.junit.Assert.assertNull) StateStore(org.apache.kafka.streams.processor.StateStore) EasyMock.anyLong(org.easymock.EasyMock.anyLong) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) EasyMock.verify(org.easymock.EasyMock.verify) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) Properties(java.util.Properties) AtomicLong(java.util.concurrent.atomic.AtomicLong) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 55 with StreamsConfig

use of org.apache.kafka.streams.StreamsConfig in project kafka by apache.

the class StoreChangelogReaderTest method shouldNotUpdateLimitForNonSourceStandbyChangelog.

@Test
public void shouldNotUpdateLimitForNonSourceStandbyChangelog() {
    final Map<TaskId, Task> mockTasks = mock(Map.class);
    EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
    EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(false).anyTimes();
    EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
            throw new AssertionError("Should not try to fetch committed offsets");
        }
    };
    final Properties properties = new Properties();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
    final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
    changelogReader.setMainConsumer(consumer);
    changelogReader.transitToUpdateStandby();
    consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
    changelogReader.register(tp, standbyStateManager);
    assertNull(changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    // if there's no records fetchable, nothings gets restored
    changelogReader.restore(mockTasks);
    assertNull(callback.restoreTopicPartition);
    assertNull(callback.storeNameCalledStates.get(RESTORE_START));
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertNull(changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
    // null key should be ignored
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
    // we should be able to restore to the log end offsets since there's no limit
    changelogReader.restore(mockTasks);
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertNull(changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    assertNull(callback.storeNameCalledStates.get(RESTORE_END));
    assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) Properties(java.util.Properties) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Aggregations

StreamsConfig (org.apache.kafka.streams.StreamsConfig)219 Test (org.junit.Test)173 Properties (java.util.Properties)84 HashMap (java.util.HashMap)69 TopicPartition (org.apache.kafka.common.TopicPartition)66 TaskId (org.apache.kafka.streams.processor.TaskId)54 MockTime (org.apache.kafka.common.utils.MockTime)53 Set (java.util.Set)36 ArrayList (java.util.ArrayList)33 HashSet (java.util.HashSet)33 Metrics (org.apache.kafka.common.metrics.Metrics)33 File (java.io.File)32 AdminClient (org.apache.kafka.clients.admin.AdminClient)31 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)31 LogContext (org.apache.kafka.common.utils.LogContext)31 Map (java.util.Map)30 TimeoutException (org.apache.kafka.common.errors.TimeoutException)30 KafkaFutureImpl (org.apache.kafka.common.internals.KafkaFutureImpl)30 Before (org.junit.Before)27 List (java.util.List)22