Search in sources :

Example 6 with Collections.singletonMap

use of java.util.Collections.singletonMap in project kafka by apache.

the class StoreChangelogReaderTest method shouldRestoreToLimitInStandbyState.

@Test
public void shouldRestoreToLimitInStandbyState() {
    final Map<TaskId, Task> mockTasks = mock(Map.class);
    EasyMock.expect(mockTasks.get(null)).andReturn(mock(Task.class)).anyTimes();
    EasyMock.expect(standbyStateManager.changelogAsSource(tp)).andReturn(true).anyTimes();
    EasyMock.replay(mockTasks, standbyStateManager, storeMetadata, store);
    final AtomicLong offset = new AtomicLong(7L);
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(OffsetResetStrategy.EARLIEST) {

        @Override
        public Map<TopicPartition, OffsetAndMetadata> committed(final Set<TopicPartition> partitions) {
            return partitions.stream().collect(Collectors.toMap(Function.identity(), partition -> new OffsetAndMetadata(offset.get())));
        }
    };
    final long now = time.milliseconds();
    final Properties properties = new Properties();
    properties.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 100L);
    final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig("test-reader", properties));
    final StoreChangelogReader changelogReader = new StoreChangelogReader(time, config, logContext, adminClient, consumer, callback);
    changelogReader.setMainConsumer(consumer);
    changelogReader.transitToUpdateStandby();
    consumer.updateBeginningOffsets(Collections.singletonMap(tp, 5L));
    changelogReader.register(tp, standbyStateManager);
    assertEquals(0L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    changelogReader.restore(mockTasks);
    assertNull(callback.restoreTopicPartition);
    assertNull(callback.storeNameCalledStates.get(RESTORE_START));
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(0L, changelogReader.changelogMetadata(tp).totalRestored());
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 5L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 6L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 7L, "key".getBytes(), "value".getBytes()));
    // null key should be ignored
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 8L, null, "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 9L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 10L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 11L, "key".getBytes(), "value".getBytes()));
    changelogReader.restore(mockTasks);
    assertEquals(StoreChangelogReader.ChangelogState.RESTORING, changelogReader.changelogMetadata(tp).state());
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    assertNull(callback.storeNameCalledStates.get(RESTORE_END));
    assertNull(callback.storeNameCalledStates.get(RESTORE_BATCH));
    offset.set(10L);
    time.setCurrentTimeMs(now + 100L);
    // should not try to read committed offsets if interval has not reached
    changelogReader.restore(mockTasks);
    assertEquals(7L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    time.setCurrentTimeMs(now + 101L);
    // the first restore would only update the limit, same below
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(2L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(4, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    offset.set(15L);
    // after we've updated once, the timer should be reset and we should not try again until next interval elapsed
    time.setCurrentTimeMs(now + 201L);
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    // once we are in update active mode, we should not try to update limit offset
    time.setCurrentTimeMs(now + 202L);
    changelogReader.enforceRestoreActive();
    changelogReader.restore(mockTasks);
    assertEquals(10L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.transitToUpdateStandby();
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(4L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(2, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(6L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 12L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 13L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 14L, "key".getBytes(), "value".getBytes()));
    consumer.addRecord(new ConsumerRecord<>(topicName, 0, 15L, "key".getBytes(), "value".getBytes()));
    changelogReader.restore(mockTasks);
    assertEquals(15L, (long) changelogReader.changelogMetadata(tp).endOffset());
    assertEquals(9L, changelogReader.changelogMetadata(tp).totalRestored());
    assertEquals(1, changelogReader.changelogMetadata(tp).bufferedRecords().size());
    assertEquals(0, changelogReader.changelogMetadata(tp).bufferedLimitIndex());
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Mock(org.easymock.Mock) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) ACTIVE(org.apache.kafka.streams.processor.internals.Task.TaskType.ACTIVE) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) LogContext(org.apache.kafka.common.utils.LogContext) After(org.junit.After) Duration(java.time.Duration) Map(java.util.Map) Parameterized(org.junit.runners.Parameterized) EasyMockSupport(org.easymock.EasyMockSupport) TopicPartition(org.apache.kafka.common.TopicPartition) RESTORE_END(org.apache.kafka.test.MockStateRestoreListener.RESTORE_END) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) Collectors(java.util.stream.Collectors) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) EasyMock.resetToDefault(org.easymock.EasyMock.resetToDefault) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) STANDBY_UPDATING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.STANDBY_UPDATING) Assert.assertFalse(org.junit.Assert.assertFalse) Matchers.equalTo(org.hamcrest.Matchers.equalTo) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockType(org.easymock.MockType) RESTORE_START(org.apache.kafka.test.MockStateRestoreListener.RESTORE_START) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskId(org.apache.kafka.streams.processor.TaskId) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Function(java.util.function.Function) Collections.singletonMap(java.util.Collections.singletonMap) EasyMock.replay(org.easymock.EasyMock.replay) EasyMockRule(org.easymock.EasyMockRule) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) STANDBY(org.apache.kafka.streams.processor.internals.Task.TaskType.STANDBY) RESTORE_BATCH(org.apache.kafka.test.MockStateRestoreListener.RESTORE_BATCH) ACTIVE_RESTORING(org.apache.kafka.streams.processor.internals.StoreChangelogReader.ChangelogReaderState.ACTIVE_RESTORING) Before(org.junit.Before) EasyMock.anyObject(org.easymock.EasyMock.anyObject) StateStoreMetadata(org.apache.kafka.streams.processor.internals.ProcessorStateManager.StateStoreMetadata) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Properties(java.util.Properties) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) EasyMock.expectLastCall(org.easymock.EasyMock.expectLastCall) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) Matchers.hasItem(org.hamcrest.Matchers.hasItem) Assert.assertNull(org.junit.Assert.assertNull) StateStore(org.apache.kafka.streams.processor.StateStore) EasyMock.anyLong(org.easymock.EasyMock.anyLong) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) ListOffsetsOptions(org.apache.kafka.clients.admin.ListOffsetsOptions) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) EasyMock.verify(org.easymock.EasyMock.verify) Collections(java.util.Collections) Assert.assertEquals(org.junit.Assert.assertEquals) TaskId(org.apache.kafka.streams.processor.TaskId) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) Properties(java.util.Properties) AtomicLong(java.util.concurrent.atomic.AtomicLong) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 7 with Collections.singletonMap

use of java.util.Collections.singletonMap in project lucene-solr by apache.

the class RequestParams method createParamSet.

public static ParamSet createParamSet(Map map, Long version) {
    Map copy = getDeepCopy(map, 3);
    Map meta = (Map) copy.remove("");
    if (meta == null && version != null) {
        meta = Collections.singletonMap("v", version);
    }
    Map invariants = (Map) copy.remove(INVARIANTS);
    Map appends = (Map) copy.remove(APPENDS);
    return new ParamSet(copy, invariants, appends, meta);
}
Also used : ImmutableMap(com.google.common.collect.ImmutableMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) Collections.singletonMap(java.util.Collections.singletonMap)

Example 8 with Collections.singletonMap

use of java.util.Collections.singletonMap in project apache-kafka-on-k8s by banzaicloud.

the class KafkaConsumerTest method testFetchProgressWithMissingPartitionPosition.

@Test
public void testFetchProgressWithMissingPartitionPosition() {
    // Verifies that we can make progress on one partition while we are awaiting
    // a reset on another partition.
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 2);
    Node node = cluster.nodes().get(0);
    Metadata metadata = createMetadata();
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    KafkaConsumer<String, String> consumer = newConsumerNoAutoCommit(time, client, metadata);
    consumer.assign(Arrays.asList(tp0, tp1));
    consumer.seekToEnd(singleton(tp0));
    consumer.seekToBeginning(singleton(tp1));
    client.prepareResponse(new MockClient.RequestMatcher() {

        @Override
        public boolean matches(AbstractRequest body) {
            ListOffsetRequest request = (ListOffsetRequest) body;
            Map<TopicPartition, Long> expectedTimestamps = new HashMap<>();
            expectedTimestamps.put(tp0, ListOffsetRequest.LATEST_TIMESTAMP);
            expectedTimestamps.put(tp1, ListOffsetRequest.EARLIEST_TIMESTAMP);
            return expectedTimestamps.equals(request.partitionTimestamps());
        }
    }, listOffsetsResponse(Collections.singletonMap(tp0, 50L), Collections.singletonMap(tp1, Errors.NOT_LEADER_FOR_PARTITION)));
    client.prepareResponse(new MockClient.RequestMatcher() {

        @Override
        public boolean matches(AbstractRequest body) {
            FetchRequest request = (FetchRequest) body;
            return request.fetchData().keySet().equals(singleton(tp0)) && request.fetchData().get(tp0).fetchOffset == 50L;
        }
    }, fetchResponse(tp0, 50L, 5));
    ConsumerRecords<String, String> records = consumer.poll(5);
    assertEquals(5, records.count());
    assertEquals(singleton(tp0), records.partitions());
}
Also used : Node(org.apache.kafka.common.Node) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) Metadata(org.apache.kafka.clients.Metadata) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) ListOffsetRequest(org.apache.kafka.common.requests.ListOffsetRequest) FetchRequest(org.apache.kafka.common.requests.FetchRequest) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) Collections.singletonMap(java.util.Collections.singletonMap) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.Test)

Example 9 with Collections.singletonMap

use of java.util.Collections.singletonMap in project kafka by apache.

the class ConsumerCoordinatorTest method testPerformAssignmentShouldUpdateGroupSubscriptionAfterAssignmentIfNeeded.

@SuppressWarnings("unchecked")
@Test
public void testPerformAssignmentShouldUpdateGroupSubscriptionAfterAssignmentIfNeeded() {
    SubscriptionState mockSubscriptionState = Mockito.mock(SubscriptionState.class);
    // the consumer only subscribed to "topic1"
    Map<String, List<String>> memberSubscriptions = singletonMap(consumerId, singletonList(topic1));
    List<JoinGroupResponseData.JoinGroupResponseMember> metadata = new ArrayList<>();
    for (Map.Entry<String, List<String>> subscriptionEntry : memberSubscriptions.entrySet()) {
        ConsumerPartitionAssignor.Subscription subscription = new ConsumerPartitionAssignor.Subscription(subscriptionEntry.getValue());
        ByteBuffer buf = ConsumerProtocol.serializeSubscription(subscription);
        metadata.add(new JoinGroupResponseData.JoinGroupResponseMember().setMemberId(subscriptionEntry.getKey()).setMetadata(buf.array()));
    }
    // normal case: the assignment result will have partitions for only the subscribed topic: "topic1"
    partitionAssignor.prepare(Collections.singletonMap(consumerId, singletonList(t1p)));
    try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) {
        coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false);
        ArgumentCaptor<Collection<String>> topicsCaptor = ArgumentCaptor.forClass(Collection.class);
        // groupSubscribe should be only called 1 time, which is before assignment,
        // because the assigned topics are the same as the subscribed topics
        Mockito.verify(mockSubscriptionState, Mockito.times(1)).groupSubscribe(topicsCaptor.capture());
        List<Collection<String>> capturedTopics = topicsCaptor.getAllValues();
        // expected the final group subscribed topics to be updated to "topic1"
        Set<String> expectedTopicsGotCalled = new HashSet<>(Arrays.asList(topic1));
        assertEquals(expectedTopicsGotCalled, capturedTopics.get(0));
    }
    Mockito.clearInvocations(mockSubscriptionState);
    // unsubscribed topic partition assigned case: the assignment result will have partitions for (1) subscribed topic: "topic1"
    // and (2) the additional unsubscribed topic: "topic2". We should add "topic2" into group subscription list
    partitionAssignor.prepare(Collections.singletonMap(consumerId, Arrays.asList(t1p, t2p)));
    try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, false, mockSubscriptionState)) {
        coordinator.onLeaderElected("1", partitionAssignor.name(), metadata, false);
        ArgumentCaptor<Collection<String>> topicsCaptor = ArgumentCaptor.forClass(Collection.class);
        // groupSubscribe should be called 2 times, once before assignment, once after assignment
        // (because the assigned topics are not the same as the subscribed topics)
        Mockito.verify(mockSubscriptionState, Mockito.times(2)).groupSubscribe(topicsCaptor.capture());
        List<Collection<String>> capturedTopics = topicsCaptor.getAllValues();
        // expected the final group subscribed topics to be updated to "topic1" and "topic2"
        Set<String> expectedTopicsGotCalled = new HashSet<>(Arrays.asList(topic1, topic2));
        assertEquals(expectedTopicsGotCalled, capturedTopics.get(1));
    }
}
Also used : JoinGroupResponseData(org.apache.kafka.common.message.JoinGroupResponseData) ArrayList(java.util.ArrayList) ByteBuffer(java.nio.ByteBuffer) Metrics(org.apache.kafka.common.metrics.Metrics) Collection(java.util.Collection) Collections.singletonList(java.util.Collections.singletonList) ArrayList(java.util.ArrayList) Collections.emptyList(java.util.Collections.emptyList) List(java.util.List) ConsumerPartitionAssignor(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) Map(java.util.Map) HashMap(java.util.HashMap) Collections.singletonMap(java.util.Collections.singletonMap) HashSet(java.util.HashSet) Test(org.junit.jupiter.api.Test)

Example 10 with Collections.singletonMap

use of java.util.Collections.singletonMap in project kafka by apache.

the class FetcherTest method testEpochSetInFetchRequest.

@Test
public void testEpochSetInFetchRequest() {
    buildFetcher();
    subscriptions.assignFromUser(singleton(tp0));
    MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), Collections.singletonMap(topicName, 4), tp -> 99, topicIds);
    client.updateMetadata(metadataResponse);
    subscriptions.seek(tp0, 10);
    assertEquals(1, fetcher.sendFetches());
    // Check for epoch in outgoing request
    MockClient.RequestMatcher matcher = body -> {
        if (body instanceof FetchRequest) {
            FetchRequest fetchRequest = (FetchRequest) body;
            fetchRequest.fetchData(topicNames).values().forEach(partitionData -> {
                assertTrue(partitionData.currentLeaderEpoch.isPresent(), "Expected Fetcher to set leader epoch in request");
                assertEquals(99, partitionData.currentLeaderEpoch.get().longValue(), "Expected leader epoch to match epoch from metadata update");
            });
            return true;
        } else {
            fail("Should have seen FetchRequest");
            return false;
        }
    };
    client.prepareResponse(matcher, fullFetchResponse(tidp0, this.records, Errors.NONE, 100L, 0));
    consumerClient.pollNoWakeup();
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ListOffsetsRequest(org.apache.kafka.common.requests.ListOffsetsRequest) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) DefaultRecordBatch(org.apache.kafka.common.record.DefaultRecordBatch) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Collections.singletonList(java.util.Collections.singletonList) ClientUtils(org.apache.kafka.clients.ClientUtils) Cluster(org.apache.kafka.common.Cluster) Future(java.util.concurrent.Future) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) DataOutputStream(java.io.DataOutputStream) ApiVersionsResponse(org.apache.kafka.common.requests.ApiVersionsResponse) Arrays.asList(java.util.Arrays.asList) RecordBatch(org.apache.kafka.common.record.RecordBatch) ListOffsetsResponse(org.apache.kafka.common.requests.ListOffsetsResponse) LogContext(org.apache.kafka.common.utils.LogContext) Duration(java.time.Duration) Map(java.util.Map) FetchResponse(org.apache.kafka.common.requests.FetchResponse) TimestampType(org.apache.kafka.common.record.TimestampType) Sensor(org.apache.kafka.common.metrics.Sensor) CompressionType(org.apache.kafka.common.record.CompressionType) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) InvalidTopicException(org.apache.kafka.common.errors.InvalidTopicException) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) TopicIdPartition(org.apache.kafka.common.TopicIdPartition) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Metrics(org.apache.kafka.common.metrics.Metrics) ApiMessageType(org.apache.kafka.common.message.ApiMessageType) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) ListOffsetsTopicResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse) Errors(org.apache.kafka.common.protocol.Errors) Node(org.apache.kafka.common.Node) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) NodeApiVersions(org.apache.kafka.clients.NodeApiVersions) Records(org.apache.kafka.common.record.Records) Assertions.assertThrows(org.junit.jupiter.api.Assertions.assertThrows) Assertions.fail(org.junit.jupiter.api.Assertions.fail) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) OffsetForLeaderPartition(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition) ClientDnsLookup(org.apache.kafka.clients.ClientDnsLookup) Assertions.assertNull(org.junit.jupiter.api.Assertions.assertNull) RequestTestUtils(org.apache.kafka.common.requests.RequestTestUtils) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ListOffsetsPartitionResponse(org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse) OffsetForLeaderEpochRequestData(org.apache.kafka.common.message.OffsetForLeaderEpochRequestData) INVALID_SESSION_ID(org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID) ArrayList(java.util.ArrayList) LinkedHashMap(java.util.LinkedHashMap) UNDEFINED_EPOCH(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH) NetworkClient(org.apache.kafka.clients.NetworkClient) Deserializer(org.apache.kafka.common.serialization.Deserializer) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) TestUtils.assertOptional(org.apache.kafka.test.TestUtils.assertOptional) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) MockSelector(org.apache.kafka.test.MockSelector) Field(java.lang.reflect.Field) ApiVersions(org.apache.kafka.clients.ApiVersions) MetricNameTemplate(org.apache.kafka.common.MetricNameTemplate) OffsetForLeaderEpochResponseData(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData) Assertions.assertArrayEquals(org.junit.jupiter.api.Assertions.assertArrayEquals) AfterEach(org.junit.jupiter.api.AfterEach) NetworkReceive(org.apache.kafka.common.network.NetworkReceive) SimpleRecord(org.apache.kafka.common.record.SimpleRecord) PartitionData(org.apache.kafka.common.requests.FetchRequest.PartitionData) BytesDeserializer(org.apache.kafka.common.serialization.BytesDeserializer) ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) LogTruncationException(org.apache.kafka.clients.consumer.LogTruncationException) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) ControlRecordType(org.apache.kafka.common.record.ControlRecordType) ByteBuffer(java.nio.ByteBuffer) ClientRequest(org.apache.kafka.clients.ClientRequest) FetchResponseData(org.apache.kafka.common.message.FetchResponseData) Record(org.apache.kafka.common.record.Record) Collections.singleton(java.util.Collections.singleton) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) BufferSupplier(org.apache.kafka.common.utils.BufferSupplier) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MetricName(org.apache.kafka.common.MetricName) OffsetForLeaderTopicResult(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult) ListOffsetsTopic(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsTopic) TopicPartition(org.apache.kafka.common.TopicPartition) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) LegacyRecord(org.apache.kafka.common.record.LegacyRecord) Collections.emptyList(java.util.Collections.emptyList) MetricConfig(org.apache.kafka.common.metrics.MetricConfig) ClusterResourceListeners(org.apache.kafka.common.internals.ClusterResourceListeners) Collectors(java.util.stream.Collectors) ListOffsetsResponseData(org.apache.kafka.common.message.ListOffsetsResponseData) Test(org.junit.jupiter.api.Test) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) Header(org.apache.kafka.common.header.Header) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) OffsetsForLeaderEpochResponse(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse) Uuid(org.apache.kafka.common.Uuid) EpochEndOffset(org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset) Metadata(org.apache.kafka.clients.Metadata) EndTransactionMarker(org.apache.kafka.common.record.EndTransactionMarker) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) FetchSessionHandler(org.apache.kafka.clients.FetchSessionHandler) HashMap(java.util.HashMap) Function(java.util.function.Function) HashSet(java.util.HashSet) MetadataRequest(org.apache.kafka.common.requests.MetadataRequest) Collections.singletonMap(java.util.Collections.singletonMap) ExecutorService(java.util.concurrent.ExecutorService) Utils(org.apache.kafka.common.utils.Utils) UNDEFINED_EPOCH_OFFSET(org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) MockClient(org.apache.kafka.clients.MockClient) ListOffsetsPartition(org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition) Iterator(java.util.Iterator) ApiKeys(org.apache.kafka.common.protocol.ApiKeys) TimeUnit(java.util.concurrent.TimeUnit) IsolationLevel(org.apache.kafka.common.IsolationLevel) DelayedReceive(org.apache.kafka.test.DelayedReceive) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) OffsetsForLeaderEpochRequest(org.apache.kafka.common.requests.OffsetsForLeaderEpochRequest) Collections(java.util.Collections) MetadataResponse(org.apache.kafka.common.requests.MetadataResponse) FetchRequest(org.apache.kafka.common.requests.FetchRequest) MockClient(org.apache.kafka.clients.MockClient) Test(org.junit.jupiter.api.Test)

Aggregations

Collections.singletonMap (java.util.Collections.singletonMap)12 Map (java.util.Map)12 Test (org.junit.Test)7 Utils.mkMap (org.apache.kafka.common.utils.Utils.mkMap)6 HashMap (java.util.HashMap)5 Set (java.util.Set)5 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)5 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)5 TopicPartition (org.apache.kafka.common.TopicPartition)5 TimeoutException (org.apache.kafka.common.errors.TimeoutException)5 LogContext (org.apache.kafka.common.utils.LogContext)5 MockTime (org.apache.kafka.common.utils.MockTime)5 Utils.mkSet (org.apache.kafka.common.utils.Utils.mkSet)5 Duration (java.time.Duration)4 Collections (java.util.Collections)4 Function (java.util.function.Function)4 Collectors (java.util.stream.Collectors)4 ListOffsetsOptions (org.apache.kafka.clients.admin.ListOffsetsOptions)4 MockAdminClient (org.apache.kafka.clients.admin.MockAdminClient)4 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)4