Search in sources :

Example 31 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project samza by apache.

the class TestKafkaSystemConsumer method testStartConsumer.

@Test
public void testStartConsumer() {
    final Consumer consumer = Mockito.mock(Consumer.class);
    final KafkaConsumerProxyFactory kafkaConsumerProxyFactory = Mockito.mock(KafkaConsumerProxyFactory.class);
    final KafkaSystemConsumerMetrics kafkaSystemConsumerMetrics = new KafkaSystemConsumerMetrics(TEST_SYSTEM, new NoOpMetricsRegistry());
    final SystemStreamPartition testSystemStreamPartition1 = new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, new Partition(0));
    final SystemStreamPartition testSystemStreamPartition2 = new SystemStreamPartition(TEST_SYSTEM, TEST_STREAM, new Partition(1));
    final String testOffset = "1";
    final KafkaConsumerProxy kafkaConsumerProxy = Mockito.mock(KafkaConsumerProxy.class);
    Mockito.when(kafkaConsumerProxyFactory.create(Mockito.anyObject())).thenReturn(kafkaConsumerProxy);
    Mockito.doNothing().when(consumer).seek(new TopicPartition(TEST_STREAM, 0), 1);
    Mockito.doNothing().when(consumer).seek(new TopicPartition(TEST_STREAM, 1), 1);
    KafkaSystemConsumer kafkaSystemConsumer = new KafkaSystemConsumer(consumer, TEST_SYSTEM, new MapConfig(), TEST_CLIENT_ID, kafkaConsumerProxyFactory, kafkaSystemConsumerMetrics, new SystemClock());
    kafkaSystemConsumer.register(testSystemStreamPartition1, testOffset);
    kafkaSystemConsumer.register(testSystemStreamPartition2, testOffset);
    kafkaSystemConsumer.startConsumer();
    Mockito.verify(consumer).seek(new TopicPartition(TEST_STREAM, 0), 1);
    Mockito.verify(consumer).seek(new TopicPartition(TEST_STREAM, 1), 1);
    Mockito.verify(kafkaConsumerProxy).start();
    Mockito.verify(kafkaConsumerProxy).addTopicPartition(testSystemStreamPartition1, Long.valueOf(testOffset));
    Mockito.verify(kafkaConsumerProxy).addTopicPartition(testSystemStreamPartition2, Long.valueOf(testOffset));
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) Partition(org.apache.samza.Partition) SystemStreamPartition(org.apache.samza.system.SystemStreamPartition) Consumer(org.apache.kafka.clients.consumer.Consumer) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) NoOpMetricsRegistry(org.apache.samza.util.NoOpMetricsRegistry) SystemClock(org.apache.samza.util.SystemClock) TopicPartition(org.apache.kafka.common.TopicPartition) MapConfig(org.apache.samza.config.MapConfig) SystemStreamPartition(org.apache.samza.system.SystemStreamPartition) Test(org.junit.Test)

Example 32 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.

the class MirrorConnectorsIntegrationBaseTest method waitForConsumerGroupOffsetSync.

/*
     * given consumer group, topics and expected number of records, make sure the consumer group
     * offsets are eventually synced to the expected offset numbers
     */
protected static <T> void waitForConsumerGroupOffsetSync(EmbeddedConnectCluster connect, Consumer<T, T> consumer, List<String> topics, String consumerGroupId, int numRecords) throws InterruptedException {
    try (Admin adminClient = connect.kafka().createAdminClient()) {
        List<TopicPartition> tps = new ArrayList<>(NUM_PARTITIONS * topics.size());
        for (int partitionIndex = 0; partitionIndex < NUM_PARTITIONS; partitionIndex++) {
            for (String topic : topics) {
                tps.add(new TopicPartition(topic, partitionIndex));
            }
        }
        long expectedTotalOffsets = numRecords * topics.size();
        waitForCondition(() -> {
            Map<TopicPartition, OffsetAndMetadata> consumerGroupOffsets = adminClient.listConsumerGroupOffsets(consumerGroupId).partitionsToOffsetAndMetadata().get();
            long consumerGroupOffsetTotal = consumerGroupOffsets.values().stream().mapToLong(OffsetAndMetadata::offset).sum();
            Map<TopicPartition, Long> offsets = consumer.endOffsets(tps, CONSUMER_POLL_TIMEOUT_MS);
            long totalOffsets = offsets.values().stream().mapToLong(l -> l).sum();
            // make sure the consumer group offsets are synced to expected number
            return totalOffsets == expectedTotalOffsets && consumerGroupOffsetTotal > 0;
        }, OFFSET_SYNC_DURATION_MS, "Consumer group offset sync is not complete in time");
    }
}
Also used : BeforeEach(org.junit.jupiter.api.BeforeEach) Arrays(java.util.Arrays) DescribeConfigsResult(org.apache.kafka.clients.admin.DescribeConfigsResult) MirrorSourceConnector(org.apache.kafka.connect.mirror.MirrorSourceConnector) LoggerFactory(org.slf4j.LoggerFactory) Assertions.assertNotEquals(org.junit.jupiter.api.Assertions.assertNotEquals) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) Assertions.assertFalse(org.junit.jupiter.api.Assertions.assertFalse) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Duration(java.time.Duration) Map(java.util.Map) Tag(org.junit.jupiter.api.Tag) ReplicationPolicy(org.apache.kafka.connect.mirror.ReplicationPolicy) TopicConfig(org.apache.kafka.common.config.TopicConfig) EmbeddedConnectCluster(org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) TestUtils.generateRecords(org.apache.kafka.connect.mirror.TestUtils.generateRecords) MirrorMakerConfig(org.apache.kafka.connect.mirror.MirrorMakerConfig) Collection(java.util.Collection) Set(java.util.Set) MirrorCheckpointConnector(org.apache.kafka.connect.mirror.MirrorCheckpointConnector) Test(org.junit.jupiter.api.Test) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assertions.assertTrue(org.junit.jupiter.api.Assertions.assertTrue) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Config(org.apache.kafka.clients.admin.Config) Assertions.assertNotNull(org.junit.jupiter.api.Assertions.assertNotNull) Connector(org.apache.kafka.connect.connector.Connector) Exit(org.apache.kafka.common.utils.Exit) UngracefulShutdownException(org.apache.kafka.connect.util.clusters.UngracefulShutdownException) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DefaultConfigPropertyFilter(org.apache.kafka.connect.mirror.DefaultConfigPropertyFilter) ConfigResource(org.apache.kafka.common.config.ConfigResource) Checkpoint(org.apache.kafka.connect.mirror.Checkpoint) SourceAndTarget(org.apache.kafka.connect.mirror.SourceAndTarget) Admin(org.apache.kafka.clients.admin.Admin) Assertions.assertEquals(org.junit.jupiter.api.Assertions.assertEquals) EmbeddedKafkaCluster(org.apache.kafka.connect.util.clusters.EmbeddedKafkaCluster) Properties(java.util.Properties) Logger(org.slf4j.Logger) TestUtils.waitForCondition(org.apache.kafka.test.TestUtils.waitForCondition) AdminClientConfig(org.apache.kafka.clients.admin.AdminClientConfig) MirrorClient(org.apache.kafka.connect.mirror.MirrorClient) TimeUnit(java.util.concurrent.TimeUnit) AfterEach(org.junit.jupiter.api.AfterEach) MirrorHeartbeatConnector(org.apache.kafka.connect.mirror.MirrorHeartbeatConnector) Collections(java.util.Collections) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Admin(org.apache.kafka.clients.admin.Admin) Checkpoint(org.apache.kafka.connect.mirror.Checkpoint)

Example 33 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.

the class StreamsPartitionAssignorTest method shouldRequestCommittedOffsetsForPreexistingSourceChangelogs.

@Test
public void shouldRequestCommittedOffsetsForPreexistingSourceChangelogs() {
    final Set<TopicPartition> changelogs = mkSet(new TopicPartition("topic1", 0), new TopicPartition("topic1", 1), new TopicPartition("topic1", 2));
    final StreamsBuilder streamsBuilder = new StreamsBuilder();
    streamsBuilder.table("topic1", Materialized.as("store"));
    final Properties props = new Properties();
    props.putAll(configProps());
    props.put(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE);
    builder = TopologyWrapper.getInternalTopologyBuilder(streamsBuilder.build(props));
    topologyMetadata = new TopologyMetadata(builder, new StreamsConfig(props));
    subscriptions.put("consumer10", new Subscription(singletonList("topic1"), defaultSubscriptionInfo.encode()));
    createDefaultMockTaskManager();
    configurePartitionAssignorWith(singletonMap(StreamsConfig.TOPOLOGY_OPTIMIZATION_CONFIG, StreamsConfig.OPTIMIZE));
    overwriteInternalTopicManagerWithMock(false);
    final Consumer<byte[], byte[]> consumerClient = referenceContainer.mainConsumer;
    EasyMock.expect(consumerClient.committed(EasyMock.eq(changelogs))).andReturn(changelogs.stream().collect(Collectors.toMap(tp -> tp, tp -> new OffsetAndMetadata(Long.MAX_VALUE)))).once();
    EasyMock.replay(consumerClient);
    partitionAssignor.assign(metadata, new GroupSubscription(subscriptions));
    EasyMock.verify(consumerClient);
}
Also used : InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) MockTime(org.apache.kafka.common.utils.MockTime) ConsumedInternal(org.apache.kafka.streams.kstream.internals.ConsumedInternal) KafkaException(org.apache.kafka.common.KafkaException) Collections.singletonList(java.util.Collections.singletonList) AdminClient(org.apache.kafka.clients.admin.AdminClient) Cluster(org.apache.kafka.common.Cluster) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) MockApiProcessorSupplier(org.apache.kafka.test.MockApiProcessorSupplier) Arrays.asList(java.util.Arrays.asList) Duration(java.time.Duration) Map(java.util.Map) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) TASK_0_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_0) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) TASK_0_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_1) Set(java.util.Set) TASK_0_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_2) TASK_0_3(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_0_3) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) EMPTY_TASKS(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.EMPTY_TASKS) RebalanceProtocol(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.RebalanceProtocol) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Assert.assertFalse(org.junit.Assert.assertFalse) Node(org.apache.kafka.common.Node) Matchers.is(org.hamcrest.Matchers.is) SubscriptionInfo(org.apache.kafka.streams.processor.internals.assignment.SubscriptionInfo) HostInfo(org.apache.kafka.streams.state.HostInfo) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) RunWith(org.junit.runner.RunWith) EasyMock.mock(org.easymock.EasyMock.mock) ArrayList(java.util.ArrayList) MockInternalTopicManager(org.apache.kafka.test.MockInternalTopicManager) StickyTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.StickyTaskAssignor) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Capture(org.easymock.Capture) KTable(org.apache.kafka.streams.kstream.KTable) KeyValueMapper(org.apache.kafka.streams.kstream.KeyValueMapper) Properties(java.util.Properties) Utils.mkSortedSet(org.apache.kafka.common.utils.Utils.mkSortedSet) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) Grouped(org.apache.kafka.streams.kstream.Grouped) Assert.assertNotEquals(org.junit.Assert.assertNotEquals) AssignorError(org.apache.kafka.streams.processor.internals.assignment.AssignorError) InternalConfig(org.apache.kafka.streams.StreamsConfig.InternalConfig) TimeWindows(org.apache.kafka.streams.kstream.TimeWindows) Subtopology(org.apache.kafka.streams.processor.internals.TopologyMetadata.Subtopology) Assert.assertEquals(org.junit.Assert.assertEquals) FallbackPriorTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.FallbackPriorTaskAssignor) SortedSet(java.util.SortedSet) InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) ByteBuffer(java.nio.ByteBuffer) ListOffsetsResult(org.apache.kafka.clients.admin.ListOffsetsResult) Collections.singleton(java.util.Collections.singleton) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) TopologyWrapper(org.apache.kafka.streams.TopologyWrapper) Serdes(org.apache.kafka.common.serialization.Serdes) Assert.fail(org.junit.Assert.fail) Parameterized(org.junit.runners.Parameterized) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) Collections.emptyList(java.util.Collections.emptyList) LATEST_SUPPORTED_VERSION(org.apache.kafka.streams.processor.internals.assignment.StreamsAssignmentProtocolVersions.LATEST_SUPPORTED_VERSION) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) TASK_2_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_2_0) UUID(java.util.UUID) TASK_2_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_2_1) MaterializedInternal(org.apache.kafka.streams.kstream.internals.MaterializedInternal) AssignmentTestUtils.getInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.getInfo) Collectors(java.util.stream.Collectors) AssignorConfiguration(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration) ListOffsetsResultInfo(org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo) List(java.util.List) ValueJoiner(org.apache.kafka.streams.kstream.ValueJoiner) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Materialized(org.apache.kafka.streams.kstream.Materialized) StreamsPartitionAssignor.assignTasksToThreads(org.apache.kafka.streams.processor.internals.StreamsPartitionAssignor.assignTasksToThreads) Duration.ofMillis(java.time.Duration.ofMillis) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Assignment(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Assignment) TaskId(org.apache.kafka.streams.processor.TaskId) AssignmentInfo(org.apache.kafka.streams.processor.internals.assignment.AssignmentInfo) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) Assert.assertThrows(org.junit.Assert.assertThrows) CoreMatchers.not(org.hamcrest.CoreMatchers.not) EMPTY_CHANGELOG_END_OFFSETS(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.EMPTY_CHANGELOG_END_OFFSETS) HashMap(java.util.HashMap) KStream(org.apache.kafka.streams.kstream.KStream) AssignmentTestUtils.createMockAdminClientForAssignor(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.createMockAdminClientForAssignor) HashSet(java.util.HashSet) JoinWindows(org.apache.kafka.streams.kstream.JoinWindows) Admin(org.apache.kafka.clients.admin.Admin) Collections.singletonMap(java.util.Collections.singletonMap) UUID_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.UUID_1) HighAvailabilityTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.HighAvailabilityTaskAssignor) UUID_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.UUID_2) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Matchers.empty(org.hamcrest.Matchers.empty) Collections.emptySet(java.util.Collections.emptySet) TASK_1_1(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_1) TASK_1_0(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_0) TASK_1_3(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_3) TASK_1_2(org.apache.kafka.streams.processor.internals.assignment.AssignmentTestUtils.TASK_1_2) EasyMock.expect(org.easymock.EasyMock.expect) ConfigException(org.apache.kafka.common.config.ConfigException) OffsetSpec(org.apache.kafka.clients.admin.OffsetSpec) ClientState(org.apache.kafka.streams.processor.internals.assignment.ClientState) TaskAssignor(org.apache.kafka.streams.processor.internals.assignment.TaskAssignor) Matchers.anEmptyMap(org.hamcrest.Matchers.anEmptyMap) Collections(java.util.Collections) TopicPartition(org.apache.kafka.common.TopicPartition) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Properties(java.util.Properties) GroupSubscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.GroupSubscription) Subscription(org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 34 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.

the class HighAvailabilityTaskAssignorIntegrationTest method shouldScaleOutWithWarmupTasks.

private void shouldScaleOutWithWarmupTasks(final Function<String, Materialized<Object, Object, KeyValueStore<Bytes, byte[]>>> materializedFunction) throws InterruptedException {
    final String testId = safeUniqueTestName(getClass(), testName);
    final String appId = "appId_" + System.currentTimeMillis() + "_" + testId;
    final String inputTopic = "input" + testId;
    final Set<TopicPartition> inputTopicPartitions = mkSet(new TopicPartition(inputTopic, 0), new TopicPartition(inputTopic, 1));
    final String storeName = "store" + testId;
    final String storeChangelog = appId + "-store" + testId + "-changelog";
    final Set<TopicPartition> changelogTopicPartitions = mkSet(new TopicPartition(storeChangelog, 0), new TopicPartition(storeChangelog, 1));
    IntegrationTestUtils.cleanStateBeforeTest(CLUSTER, 2, inputTopic, storeChangelog);
    final ReentrantLock assignmentLock = new ReentrantLock();
    final AtomicInteger assignmentsCompleted = new AtomicInteger(0);
    final Map<Integer, Boolean> assignmentsStable = new ConcurrentHashMap<>();
    final AtomicBoolean assignmentStable = new AtomicBoolean(false);
    final AssignmentListener assignmentListener = stable -> {
        assignmentLock.lock();
        try {
            final int thisAssignmentIndex = assignmentsCompleted.incrementAndGet();
            assignmentsStable.put(thisAssignmentIndex, stable);
            assignmentStable.set(stable);
        } finally {
            assignmentLock.unlock();
        }
    };
    final StreamsBuilder builder = new StreamsBuilder();
    builder.table(inputTopic, materializedFunction.apply(storeName));
    final Topology topology = builder.build();
    final int numberOfRecords = 500;
    produceTestData(inputTopic, numberOfRecords);
    try (final KafkaStreams kafkaStreams0 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
        final KafkaStreams kafkaStreams1 = new KafkaStreams(topology, streamsProperties(appId, assignmentListener));
        final Consumer<String, String> consumer = new KafkaConsumer<>(getConsumerProperties())) {
        kafkaStreams0.start();
        // sanity check: just make sure we actually wrote all the input records
        TestUtils.waitForCondition(() -> getEndOffsetSum(inputTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the input topic: " + getEndOffsetSum(inputTopicPartitions, consumer));
        // wait until all the input records are in the changelog
        TestUtils.waitForCondition(() -> getEndOffsetSum(changelogTopicPartitions, consumer) == numberOfRecords, 120_000L, () -> "Input records haven't all been written to the changelog: " + getEndOffsetSum(changelogTopicPartitions, consumer));
        final AtomicLong instance1TotalRestored = new AtomicLong(-1);
        final AtomicLong instance1NumRestored = new AtomicLong(-1);
        final CountDownLatch restoreCompleteLatch = new CountDownLatch(1);
        kafkaStreams1.setGlobalStateRestoreListener(new StateRestoreListener() {

            @Override
            public void onRestoreStart(final TopicPartition topicPartition, final String storeName, final long startingOffset, final long endingOffset) {
            }

            @Override
            public void onBatchRestored(final TopicPartition topicPartition, final String storeName, final long batchEndOffset, final long numRestored) {
                instance1NumRestored.accumulateAndGet(numRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
            }

            @Override
            public void onRestoreEnd(final TopicPartition topicPartition, final String storeName, final long totalRestored) {
                instance1TotalRestored.accumulateAndGet(totalRestored, (prev, restored) -> prev == -1 ? restored : prev + restored);
                restoreCompleteLatch.countDown();
            }
        });
        final int assignmentsBeforeScaleOut = assignmentsCompleted.get();
        kafkaStreams1.start();
        TestUtils.waitForCondition(() -> {
            assignmentLock.lock();
            try {
                if (assignmentsCompleted.get() > assignmentsBeforeScaleOut) {
                    assertFalseNoRetry(assignmentsStable.get(assignmentsBeforeScaleOut + 1), "the first assignment after adding a node should be unstable while we warm up the state.");
                    return true;
                } else {
                    return false;
                }
            } finally {
                assignmentLock.unlock();
            }
        }, 120_000L, "Never saw a first assignment after scale out: " + assignmentsCompleted.get());
        TestUtils.waitForCondition(assignmentStable::get, 120_000L, "Assignment hasn't become stable: " + assignmentsCompleted.get() + " Note, if this does fail, check and see if the new instance just failed to catch up within" + " the probing rebalance interval. A full minute should be long enough to read ~500 records" + " in any test environment, but you never know...");
        restoreCompleteLatch.await();
        // We should finalize the restoration without having restored any records (because they're already in
        // the store. Otherwise, we failed to properly re-use the state from the standby.
        assertThat(instance1TotalRestored.get(), is(0L));
        // Belt-and-suspenders check that we never even attempt to restore any records.
        assertThat(instance1NumRestored.get(), is(-1L));
    }
}
Also used : ReentrantLock(java.util.concurrent.locks.ReentrantLock) Stores(org.apache.kafka.streams.state.Stores) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) IntegrationTestUtils.safeUniqueTestName(org.apache.kafka.streams.integration.utils.IntegrationTestUtils.safeUniqueTestName) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Map(java.util.Map) Serdes(org.apache.kafka.common.serialization.Serdes) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) AfterClass(org.junit.AfterClass) TestUtils(org.apache.kafka.test.TestUtils) Utils.mkObjectProperties(org.apache.kafka.common.utils.Utils.mkObjectProperties) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) Category(org.junit.experimental.categories.Category) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) Bytes(org.apache.kafka.common.utils.Bytes) IntegrationTestUtils(org.apache.kafka.streams.integration.utils.IntegrationTestUtils) CountDownLatch(java.util.concurrent.CountDownLatch) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) Materialized(org.apache.kafka.streams.kstream.Materialized) Matchers.is(org.hamcrest.Matchers.is) Topology(org.apache.kafka.streams.Topology) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) BeforeClass(org.junit.BeforeClass) IntegrationTest(org.apache.kafka.test.IntegrationTest) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AssignmentListener(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration.AssignmentListener) Function(java.util.function.Function) NoRetryException(org.apache.kafka.test.NoRetryException) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) EmbeddedKafkaCluster(org.apache.kafka.streams.integration.utils.EmbeddedKafkaCluster) TestName(org.junit.rules.TestName) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) HighAvailabilityTaskAssignor(org.apache.kafka.streams.processor.internals.assignment.HighAvailabilityTaskAssignor) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) Properties(java.util.Properties) ReentrantLock(java.util.concurrent.locks.ReentrantLock) Producer(org.apache.kafka.clients.producer.Producer) Test(org.junit.Test) IOException(java.io.IOException) AtomicLong(java.util.concurrent.atomic.AtomicLong) Rule(org.junit.Rule) KafkaStreams(org.apache.kafka.streams.KafkaStreams) StateRestoreListener(org.apache.kafka.streams.processor.StateRestoreListener) KafkaStreams(org.apache.kafka.streams.KafkaStreams) KafkaConsumer(org.apache.kafka.clients.consumer.KafkaConsumer) Topology(org.apache.kafka.streams.Topology) AssignmentListener(org.apache.kafka.streams.processor.internals.assignment.AssignorConfiguration.AssignmentListener) CountDownLatch(java.util.concurrent.CountDownLatch) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) StreamsBuilder(org.apache.kafka.streams.StreamsBuilder) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 35 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.

the class StreamThreadTest method shouldConstructAdminMetrics.

@Test
public void shouldConstructAdminMetrics() {
    final Node broker1 = new Node(0, "dummyHost-1", 1234);
    final Node broker2 = new Node(1, "dummyHost-2", 1234);
    final List<Node> cluster = Arrays.asList(broker1, broker2);
    final MockAdminClient adminClient = new MockAdminClient.Builder().brokers(cluster).clusterId(null).build();
    final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
    final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
    expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
    expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
    EasyMock.replay(consumer, consumerGroupMetadata);
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
    final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
    topologyMetadata.buildAndRewriteTopology();
    final StreamThread thread = new StreamThread(mockTime, config, adminClient, consumer, consumer, null, null, taskManager, streamsMetrics, topologyMetadata, CLIENT_ID, new LogContext(""), new AtomicInteger(), new AtomicLong(Long.MAX_VALUE), new LinkedList<>(), null, HANDLER, null);
    final MetricName testMetricName = new MetricName("test_metric", "", "", new HashMap<>());
    final Metric testMetric = new KafkaMetric(new Object(), testMetricName, (Measurable) (config, now) -> 0, null, new MockTime());
    EasyMock.replay(taskManager);
    adminClient.setMockMetrics(testMetricName, testMetric);
    final Map<MetricName, Metric> adminClientMetrics = thread.adminClientMetrics();
    assertEquals(testMetricName, adminClientMetrics.get(testMetricName).metricName());
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ConsumedInternal(org.apache.kafka.streams.kstream.internals.ConsumedInternal) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) KafkaException(org.apache.kafka.common.KafkaException) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) CoreMatchers.startsWith(org.hamcrest.CoreMatchers.startsWith) StreamsException(org.apache.kafka.streams.errors.StreamsException) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) MockApiProcessor(org.apache.kafka.test.MockApiProcessor) Cluster(org.apache.kafka.common.Cluster) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) ProcessorSupplier(org.apache.kafka.streams.processor.api.ProcessorSupplier) LogContext(org.apache.kafka.common.utils.LogContext) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Duration(java.time.Duration) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) ProcessorContext(org.apache.kafka.streams.processor.api.ProcessorContext) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) TestUtils(org.apache.kafka.test.TestUtils) ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) Measurable(org.apache.kafka.common.metrics.Measurable) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) Metrics(org.apache.kafka.common.metrics.Metrics) Stream(java.util.stream.Stream) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assert.assertFalse(org.junit.Assert.assertFalse) Node(org.apache.kafka.common.Node) Matchers.is(org.hamcrest.Matchers.is) KafkaMetricsContext(org.apache.kafka.common.metrics.KafkaMetricsContext) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) EasyMock.mock(org.easymock.EasyMock.mock) ArrayList(java.util.ArrayList) MetricsContext(org.apache.kafka.common.metrics.MetricsContext) Assert.assertSame(org.junit.Assert.assertSame) CHECKPOINT_FILE_NAME(org.apache.kafka.streams.processor.internals.StateManagerUtil.CHECKPOINT_FILE_NAME) PunctuationType(org.apache.kafka.streams.processor.PunctuationType) BiConsumer(java.util.function.BiConsumer) Processor(org.apache.kafka.streams.processor.api.Processor) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) Before(org.junit.Before) Properties(java.util.Properties) Producer(org.apache.kafka.clients.producer.Producer) LogAndContinueExceptionHandler(org.apache.kafka.streams.errors.LogAndContinueExceptionHandler) EasyMock.niceMock(org.easymock.EasyMock.niceMock) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) File(java.io.File) AtomicLong(java.util.concurrent.atomic.AtomicLong) Assert.assertNull(org.junit.Assert.assertNull) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Assert(org.junit.Assert) MockProducer(org.apache.kafka.clients.producer.MockProducer) Assert.assertEquals(org.junit.Assert.assertEquals) Matchers.isA(org.hamcrest.Matchers.isA) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Stores(org.apache.kafka.streams.state.Stores) MockRebalanceListener(org.apache.kafka.clients.consumer.internals.MockRebalanceListener) InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) LogAndSkipOnInvalidTimestamp(org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp) ContextualProcessor(org.apache.kafka.streams.processor.api.ContextualProcessor) Collections.singleton(java.util.Collections.singleton) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) Serdes(org.apache.kafka.common.serialization.Serdes) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Assert.fail(org.junit.Assert.fail) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) ClientUtils.getSharedAdminClientId(org.apache.kafka.streams.processor.internals.ClientUtils.getSharedAdminClientId) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) UUID(java.util.UUID) MaterializedInternal(org.apache.kafka.streams.kstream.internals.MaterializedInternal) Bytes(org.apache.kafka.common.utils.Bytes) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskId(org.apache.kafka.streams.processor.TaskId) Assert.assertThrows(org.junit.Assert.assertThrows) CoreMatchers.not(org.hamcrest.CoreMatchers.not) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) HashSet(java.util.HashSet) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) Record(org.apache.kafka.streams.processor.api.Record) Collections.singletonMap(java.util.Collections.singletonMap) LinkedList(java.util.LinkedList) JmxReporter(org.apache.kafka.common.metrics.JmxReporter) Utils(org.apache.kafka.common.utils.Utils) EasyMock.anyObject(org.easymock.EasyMock.anyObject) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Matchers.empty(org.hamcrest.Matchers.empty) Logger(org.slf4j.Logger) Collections.emptySet(java.util.Collections.emptySet) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) Assert.assertNotNull(org.junit.Assert.assertNotNull) EasyMock.expect(org.easymock.EasyMock.expect) StoreBuilder(org.apache.kafka.streams.state.StoreBuilder) EasyMock.expectLastCall(org.easymock.EasyMock.expectLastCall) MockTimestampExtractor(org.apache.kafka.test.MockTimestampExtractor) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) EasyMock.verify(org.easymock.EasyMock.verify) Collections(java.util.Collections) Node(org.apache.kafka.common.Node) LogContext(org.apache.kafka.common.utils.LogContext) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) MetricName(org.apache.kafka.common.MetricName) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Metric(org.apache.kafka.common.Metric) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) EasyMock.anyObject(org.easymock.EasyMock.anyObject) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Aggregations

Consumer (org.apache.kafka.clients.consumer.Consumer)35 Test (org.junit.Test)22 Map (java.util.Map)20 TopicPartition (org.apache.kafka.common.TopicPartition)20 ArrayList (java.util.ArrayList)17 HashMap (java.util.HashMap)17 List (java.util.List)17 Collections (java.util.Collections)16 Set (java.util.Set)16 Properties (java.util.Properties)15 Collection (java.util.Collection)14 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)14 Collectors (java.util.stream.Collectors)13 KafkaException (org.apache.kafka.common.KafkaException)11 HashSet (java.util.HashSet)10 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)10 StreamsConfig (org.apache.kafka.streams.StreamsConfig)10 Logger (org.slf4j.Logger)10 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)9 PartitionInfo (org.apache.kafka.common.PartitionInfo)9