Search in sources :

Example 21 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.

the class StreamThreadTest method shouldTransmitTaskManagerMetrics.

@Test
public void shouldTransmitTaskManagerMetrics() {
    final Consumer<byte[], byte[]> consumer = EasyMock.createNiceMock(Consumer.class);
    final ConsumerGroupMetadata consumerGroupMetadata = mock(ConsumerGroupMetadata.class);
    expect(consumer.groupMetadata()).andStubReturn(consumerGroupMetadata);
    expect(consumerGroupMetadata.groupInstanceId()).andReturn(Optional.empty());
    EasyMock.replay(consumer, consumerGroupMetadata);
    final TaskManager taskManager = EasyMock.createNiceMock(TaskManager.class);
    final MetricName testMetricName = new MetricName("test_metric", "", "", new HashMap<>());
    final Metric testMetric = new KafkaMetric(new Object(), testMetricName, (Measurable) (config, now) -> 0, null, new MockTime());
    final Map<MetricName, Metric> dummyProducerMetrics = singletonMap(testMetricName, testMetric);
    expect(taskManager.producerMetrics()).andReturn(dummyProducerMetrics);
    EasyMock.replay(taskManager);
    final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, StreamsConfig.METRICS_LATEST, mockTime);
    final TopologyMetadata topologyMetadata = new TopologyMetadata(internalTopologyBuilder, config);
    topologyMetadata.buildAndRewriteTopology();
    final StreamThread thread = buildStreamThread(consumer, taskManager, config, topologyMetadata);
    assertThat(dummyProducerMetrics, is(thread.producerMetrics()));
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) ConsumedInternal(org.apache.kafka.streams.kstream.internals.ConsumedInternal) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) KafkaException(org.apache.kafka.common.KafkaException) Utils.mkProperties(org.apache.kafka.common.utils.Utils.mkProperties) CoreMatchers.startsWith(org.hamcrest.CoreMatchers.startsWith) StreamsException(org.apache.kafka.streams.errors.StreamsException) ConsumerRecords(org.apache.kafka.clients.consumer.ConsumerRecords) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) MockApiProcessor(org.apache.kafka.test.MockApiProcessor) Cluster(org.apache.kafka.common.Cluster) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) ProcessorSupplier(org.apache.kafka.streams.processor.api.ProcessorSupplier) LogContext(org.apache.kafka.common.utils.LogContext) KeyValueStore(org.apache.kafka.streams.state.KeyValueStore) Duration(java.time.Duration) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) MockKeyValueStoreBuilder(org.apache.kafka.test.MockKeyValueStoreBuilder) ProcessorContext(org.apache.kafka.streams.processor.api.ProcessorContext) ThreadMetadata(org.apache.kafka.streams.ThreadMetadata) TestUtils(org.apache.kafka.test.TestUtils) ReferenceContainer(org.apache.kafka.streams.processor.internals.assignment.ReferenceContainer) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) Measurable(org.apache.kafka.common.metrics.Measurable) MockClientSupplier(org.apache.kafka.test.MockClientSupplier) MockAdminClient(org.apache.kafka.clients.admin.MockAdminClient) Metrics(org.apache.kafka.common.metrics.Metrics) Stream(java.util.stream.Stream) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assert.assertFalse(org.junit.Assert.assertFalse) Node(org.apache.kafka.common.Node) Matchers.is(org.hamcrest.Matchers.is) KafkaMetricsContext(org.apache.kafka.common.metrics.KafkaMetricsContext) CoreMatchers.equalTo(org.hamcrest.CoreMatchers.equalTo) EasyMock.mock(org.easymock.EasyMock.mock) ArrayList(java.util.ArrayList) MetricsContext(org.apache.kafka.common.metrics.MetricsContext) Assert.assertSame(org.junit.Assert.assertSame) CHECKPOINT_FILE_NAME(org.apache.kafka.streams.processor.internals.StateManagerUtil.CHECKPOINT_FILE_NAME) PunctuationType(org.apache.kafka.streams.processor.PunctuationType) BiConsumer(java.util.function.BiConsumer) Processor(org.apache.kafka.streams.processor.api.Processor) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) Before(org.junit.Before) Properties(java.util.Properties) Producer(org.apache.kafka.clients.producer.Producer) LogAndContinueExceptionHandler(org.apache.kafka.streams.errors.LogAndContinueExceptionHandler) EasyMock.niceMock(org.easymock.EasyMock.niceMock) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) File(java.io.File) AtomicLong(java.util.concurrent.atomic.AtomicLong) Assert.assertNull(org.junit.Assert.assertNull) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) Assert(org.junit.Assert) MockProducer(org.apache.kafka.clients.producer.MockProducer) Assert.assertEquals(org.junit.Assert.assertEquals) Matchers.isA(org.hamcrest.Matchers.isA) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Stores(org.apache.kafka.streams.state.Stores) MockRebalanceListener(org.apache.kafka.clients.consumer.internals.MockRebalanceListener) InternalStreamsBuilder(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilder) LogAndSkipOnInvalidTimestamp(org.apache.kafka.streams.processor.LogAndSkipOnInvalidTimestamp) ContextualProcessor(org.apache.kafka.streams.processor.api.ContextualProcessor) Collections.singleton(java.util.Collections.singleton) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) Serdes(org.apache.kafka.common.serialization.Serdes) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Assert.fail(org.junit.Assert.fail) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) ClientUtils.getSharedAdminClientId(org.apache.kafka.streams.processor.internals.ClientUtils.getSharedAdminClientId) Collection(java.util.Collection) KeyValue(org.apache.kafka.streams.KeyValue) UUID(java.util.UUID) MaterializedInternal(org.apache.kafka.streams.kstream.internals.MaterializedInternal) Bytes(org.apache.kafka.common.utils.Bytes) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) List(java.util.List) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) Materialized(org.apache.kafka.streams.kstream.Materialized) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskId(org.apache.kafka.streams.processor.TaskId) Assert.assertThrows(org.junit.Assert.assertThrows) CoreMatchers.not(org.hamcrest.CoreMatchers.not) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) HashSet(java.util.HashSet) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) Record(org.apache.kafka.streams.processor.api.Record) Collections.singletonMap(java.util.Collections.singletonMap) LinkedList(java.util.LinkedList) JmxReporter(org.apache.kafka.common.metrics.JmxReporter) Utils(org.apache.kafka.common.utils.Utils) EasyMock.anyObject(org.easymock.EasyMock.anyObject) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Matchers.empty(org.hamcrest.Matchers.empty) Logger(org.slf4j.Logger) Collections.emptySet(java.util.Collections.emptySet) MockStateRestoreListener(org.apache.kafka.test.MockStateRestoreListener) Assert.assertNotNull(org.junit.Assert.assertNotNull) EasyMock.expect(org.easymock.EasyMock.expect) StoreBuilder(org.apache.kafka.streams.state.StoreBuilder) EasyMock.expectLastCall(org.easymock.EasyMock.expectLastCall) MockTimestampExtractor(org.apache.kafka.test.MockTimestampExtractor) InvalidOffsetException(org.apache.kafka.clients.consumer.InvalidOffsetException) StreamsTestUtils(org.apache.kafka.test.StreamsTestUtils) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) EasyMock.verify(org.easymock.EasyMock.verify) Collections(java.util.Collections) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) MetricName(org.apache.kafka.common.MetricName) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Metric(org.apache.kafka.common.Metric) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) EasyMock.anyObject(org.easymock.EasyMock.anyObject) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) MockTime(org.apache.kafka.common.utils.MockTime) Test(org.junit.Test)

Example 22 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.

the class TaskManagerTest method shouldReviveCorruptTasks.

@Test
public void shouldReviveCorruptTasks() {
    final ProcessorStateManager stateManager = EasyMock.createStrictMock(ProcessorStateManager.class);
    stateManager.markChangelogAsCorrupted(taskId00Partitions);
    EasyMock.expectLastCall().once();
    replay(stateManager);
    final AtomicBoolean enforcedCheckpoint = new AtomicBoolean(false);
    final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true, stateManager) {

        @Override
        public void postCommit(final boolean enforceCheckpoint) {
            if (enforceCheckpoint) {
                enforcedCheckpoint.set(true);
            }
            super.postCommit(enforceCheckpoint);
        }
    };
    // `handleAssignment`
    expectRestoreToBeCompleted(consumer, changeLogReader);
    expect(activeTaskCreator.createTasks(anyObject(), eq(taskId00Assignment))).andStubReturn(singletonList(task00));
    topologyBuilder.addSubscribedTopicsFromAssignment(anyObject(), anyString());
    expectLastCall().anyTimes();
    expect(consumer.assignment()).andReturn(taskId00Partitions);
    replay(activeTaskCreator, topologyBuilder, consumer, changeLogReader);
    taskManager.handleAssignment(taskId00Assignment, emptyMap());
    assertThat(taskManager.tryToCompleteRestoration(time.milliseconds(), tp -> assertThat(tp, is(empty()))), is(true));
    assertThat(task00.state(), is(Task.State.RUNNING));
    task00.setChangelogOffsets(singletonMap(t1p0, 0L));
    taskManager.handleCorruption(singleton(taskId00));
    assertThat(task00.commitPrepared, is(true));
    assertThat(task00.state(), is(Task.State.CREATED));
    assertThat(task00.partitionsForOffsetReset, equalTo(taskId00Partitions));
    assertThat(enforcedCheckpoint.get(), is(true));
    assertThat(taskManager.activeTaskMap(), is(singletonMap(taskId00, task00)));
    assertThat(taskManager.standbyTaskMap(), Matchers.anEmptyMap());
    verify(stateManager);
    verify(consumer);
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) CoreMatchers.hasItem(org.hamcrest.CoreMatchers.hasItem) ConsumerGroupMetadata(org.apache.kafka.clients.consumer.ConsumerGroupMetadata) Mock(org.easymock.Mock) IsEqual.equalTo(org.hamcrest.core.IsEqual.equalTo) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) Collections.singletonList(java.util.Collections.singletonList) Utils.mkMap(org.apache.kafka.common.utils.Utils.mkMap) DeleteRecordsResult(org.apache.kafka.clients.admin.DeleteRecordsResult) Collections.singleton(java.util.Collections.singleton) EasyMock.reset(org.easymock.EasyMock.reset) Arrays.asList(java.util.Arrays.asList) Map(java.util.Map) Metric(org.apache.kafka.common.Metric) MetricName(org.apache.kafka.common.MetricName) StreamsMetricsImpl(org.apache.kafka.streams.processor.internals.metrics.StreamsMetricsImpl) Assert.fail(org.junit.Assert.fail) EasyMock.eq(org.easymock.EasyMock.eq) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) StreamsConfigUtils(org.apache.kafka.streams.internals.StreamsConfigUtils) Time(org.apache.kafka.common.utils.Time) Collections.emptyList(java.util.Collections.emptyList) State(org.apache.kafka.streams.processor.internals.Task.State) Collection(java.util.Collection) Utils.mkSet(org.apache.kafka.common.utils.Utils.mkSet) Set(java.util.Set) Measurable(org.apache.kafka.common.metrics.Measurable) UUID(java.util.UUID) Collectors(java.util.stream.Collectors) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) List(java.util.List) Metrics(org.apache.kafka.common.metrics.Metrics) Utils.mkEntry(org.apache.kafka.common.utils.Utils.mkEntry) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assert.assertFalse(org.junit.Assert.assertFalse) OffsetAndMetadata(org.apache.kafka.clients.consumer.OffsetAndMetadata) Optional(java.util.Optional) KafkaMetric(org.apache.kafka.common.metrics.KafkaMetric) LockException(org.apache.kafka.streams.errors.LockException) Matchers.is(org.hamcrest.Matchers.is) MockType(org.easymock.MockType) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskId(org.apache.kafka.streams.processor.TaskId) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) EasyMock.mock(org.easymock.EasyMock.mock) HashMap(java.util.HashMap) Deque(java.util.Deque) AtomicReference(java.util.concurrent.atomic.AtomicReference) RecordsToDelete(org.apache.kafka.clients.admin.RecordsToDelete) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) EasyMock.resetToStrict(org.easymock.EasyMock.resetToStrict) TaskDirectory(org.apache.kafka.streams.processor.internals.StateDirectory.TaskDirectory) Admin(org.apache.kafka.clients.admin.Admin) EasyMockRunner(org.easymock.EasyMockRunner) Collections.singletonMap(java.util.Collections.singletonMap) EasyMock.replay(org.easymock.EasyMock.replay) KafkaFutureImpl(org.apache.kafka.common.internals.KafkaFutureImpl) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) LinkedList(java.util.LinkedList) TaskCorruptedException(org.apache.kafka.streams.errors.TaskCorruptedException) Before(org.junit.Before) EasyMock.anyObject(org.easymock.EasyMock.anyObject) Collections.emptyMap(java.util.Collections.emptyMap) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Matchers.empty(org.hamcrest.Matchers.empty) Collections.emptySet(java.util.Collections.emptySet) EasyMock.anyString(org.easymock.EasyMock.anyString) Matchers(org.hamcrest.Matchers) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) ProcessingMode(org.apache.kafka.streams.internals.StreamsConfigUtils.ProcessingMode) EasyMock(org.easymock.EasyMock) EasyMock.expect(org.easymock.EasyMock.expect) File(java.io.File) Utils.union(org.apache.kafka.common.utils.Utils.union) EasyMock.expectLastCall(org.easymock.EasyMock.expectLastCall) Rule(org.junit.Rule) Assert.assertNull(org.junit.Assert.assertNull) StateStore(org.apache.kafka.streams.processor.StateStore) LogCaptureAppender(org.apache.kafka.streams.processor.internals.testutil.LogCaptureAppender) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) OffsetCheckpoint(org.apache.kafka.streams.state.internals.OffsetCheckpoint) EasyMock.verify(org.easymock.EasyMock.verify) DummyStreamsConfig(org.apache.kafka.streams.processor.internals.testutil.DummyStreamsConfig) Collections(java.util.Collections) DeletedRecords(org.apache.kafka.clients.admin.DeletedRecords) TemporaryFolder(org.junit.rules.TemporaryFolder) Assert.assertEquals(org.junit.Assert.assertEquals) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Test(org.junit.Test)

Example 23 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project beam by apache.

the class KafkaIOTest method mkMockConsumer.

// Update mock consumer with records distributed among the given topics, each with given number
// of partitions. Records are assigned in round-robin order among the partitions.
private static MockConsumer<byte[], byte[]> mkMockConsumer(List<String> topics, int partitionsPerTopic, int numElements, OffsetResetStrategy offsetResetStrategy, Map<String, Object> config, SerializableFunction<Integer, byte[]> keyFunction, SerializableFunction<Integer, byte[]> valueFunction) {
    final List<TopicPartition> partitions = new ArrayList<>();
    final Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> records = new HashMap<>();
    Map<String, List<PartitionInfo>> partitionMap = new HashMap<>();
    for (String topic : topics) {
        List<PartitionInfo> partIds = new ArrayList<>(partitionsPerTopic);
        for (int i = 0; i < partitionsPerTopic; i++) {
            TopicPartition tp = new TopicPartition(topic, i);
            partitions.add(tp);
            partIds.add(new PartitionInfo(topic, i, null, null, null));
            records.put(tp, new ArrayList<>());
        }
        partitionMap.put(topic, partIds);
    }
    int numPartitions = partitions.size();
    final long[] offsets = new long[numPartitions];
    long timestampStartMillis = (Long) config.getOrDefault(TIMESTAMP_START_MILLIS_CONFIG, LOG_APPEND_START_TIME.getMillis());
    TimestampType timestampType = TimestampType.forName((String) config.getOrDefault(TIMESTAMP_TYPE_CONFIG, TimestampType.LOG_APPEND_TIME.toString()));
    for (int i = 0; i < numElements; i++) {
        int pIdx = i % numPartitions;
        TopicPartition tp = partitions.get(pIdx);
        byte[] key = keyFunction.apply(i);
        byte[] value = valueFunction.apply(i);
        records.get(tp).add(new ConsumerRecord<>(tp.topic(), tp.partition(), offsets[pIdx]++, timestampStartMillis + Duration.standardSeconds(i).getMillis(), timestampType, 0, key.length, value.length, key, value));
    }
    // This is updated when reader assigns partitions.
    final AtomicReference<List<TopicPartition>> assignedPartitions = new AtomicReference<>(Collections.<TopicPartition>emptyList());
    final MockConsumer<byte[], byte[]> consumer = new MockConsumer<byte[], byte[]>(offsetResetStrategy) {

        @Override
        public synchronized void assign(final Collection<TopicPartition> assigned) {
            super.assign(assigned);
            assignedPartitions.set(ImmutableList.copyOf(assigned));
            for (TopicPartition tp : assigned) {
                updateBeginningOffsets(ImmutableMap.of(tp, 0L));
                updateEndOffsets(ImmutableMap.of(tp, (long) records.get(tp).size()));
            }
        }

        // Override offsetsForTimes() in order to look up the offsets by timestamp.
        @Override
        public synchronized Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
            return timestampsToSearch.entrySet().stream().map(e -> {
                // In test scope, timestamp == offset.
                long maxOffset = offsets[partitions.indexOf(e.getKey())];
                long offset = e.getValue();
                OffsetAndTimestamp value = (offset >= maxOffset) ? null : new OffsetAndTimestamp(offset, offset);
                return new SimpleEntry<>(e.getKey(), value);
            }).collect(Collectors.toMap(SimpleEntry::getKey, SimpleEntry::getValue));
        }
    };
    for (String topic : topics) {
        consumer.updatePartitions(topic, partitionMap.get(topic));
    }
    // MockConsumer does not maintain any relationship between partition seek position and the
    // records added. e.g. if we add 10 records to a partition and then seek to end of the
    // partition, MockConsumer is still going to return the 10 records in next poll. It is
    // our responsibility to make sure currently enqueued records sync with partition offsets.
    // The following task will be called inside each invocation to MockConsumer.poll().
    // We enqueue only the records with the offset >= partition's current position.
    Runnable recordEnqueueTask = new Runnable() {

        @Override
        public void run() {
            // add all the records with offset >= current partition position.
            int recordsAdded = 0;
            for (TopicPartition tp : assignedPartitions.get()) {
                long curPos = consumer.position(tp);
                for (ConsumerRecord<byte[], byte[]> r : records.get(tp)) {
                    if (r.offset() >= curPos) {
                        consumer.addRecord(r);
                        recordsAdded++;
                    }
                }
            }
            if (recordsAdded == 0) {
                if (config.get("inject.error.at.eof") != null) {
                    consumer.setException(new KafkaException("Injected error in consumer.poll()"));
                }
                // MockConsumer.poll(timeout) does not actually wait even when there aren't any
                // records.
                // Add a small wait here in order to avoid busy looping in the reader.
                Uninterruptibles.sleepUninterruptibly(10, TimeUnit.MILLISECONDS);
            // TODO: BEAM-4086: testUnboundedSourceWithoutBoundedWrapper() occasionally hangs
            // without this wait. Need to look into it.
            }
            consumer.schedulePollTask(this);
        }
    };
    consumer.schedulePollTask(recordEnqueueTask);
    return consumer;
}
Also used : Count(org.apache.beam.sdk.transforms.Count) MetricName(org.apache.beam.sdk.metrics.MetricName) Arrays(java.util.Arrays) PipelineExecutionException(org.apache.beam.sdk.Pipeline.PipelineExecutionException) SchemaRegistryClient(io.confluent.kafka.schemaregistry.client.SchemaRegistryClient) CoderUtils(org.apache.beam.sdk.util.CoderUtils) KafkaAvroSerializer(io.confluent.kafka.serializers.KafkaAvroSerializer) UnboundedSource(org.apache.beam.sdk.io.UnboundedSource) KafkaException(org.apache.kafka.common.KafkaException) ImmutableMap(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) Map(java.util.Map) Window(org.apache.beam.sdk.transforms.windowing.Window) Uninterruptibles(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.util.concurrent.Uninterruptibles) TimestampType(org.apache.kafka.common.record.TimestampType) MockSchemaRegistry(io.confluent.kafka.schemaregistry.testutil.MockSchemaRegistry) MetricResult(org.apache.beam.sdk.metrics.MetricResult) UnboundedReader(org.apache.beam.sdk.io.UnboundedSource.UnboundedReader) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) StandardCharsets(java.nio.charset.StandardCharsets) Executors(java.util.concurrent.Executors) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) MetricQueryResults(org.apache.beam.sdk.metrics.MetricQueryResults) Matchers.greaterThan(org.hamcrest.Matchers.greaterThan) DisplayDataMatchers.hasDisplayItem(org.apache.beam.sdk.transforms.display.DisplayDataMatchers.hasDisplayItem) Matchers.containsString(org.hamcrest.Matchers.containsString) KV(org.apache.beam.sdk.values.KV) Duration(org.joda.time.Duration) RunWith(org.junit.runner.RunWith) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) ArrayList(java.util.ArrayList) SinkMetrics(org.apache.beam.sdk.metrics.SinkMetrics) Read(org.apache.beam.sdk.io.Read) Distinct(org.apache.beam.sdk.transforms.Distinct) ThreadLocalRandom(java.util.concurrent.ThreadLocalRandom) TestPipeline(org.apache.beam.sdk.testing.TestPipeline) IntegerSerializer(org.apache.kafka.common.serialization.IntegerSerializer) MatcherAssert.assertThat(org.hamcrest.MatcherAssert.assertThat) Deserializer(org.apache.kafka.common.serialization.Deserializer) DoFn(org.apache.beam.sdk.transforms.DoFn) ByteArrayDeserializer(org.apache.kafka.common.serialization.ByteArrayDeserializer) ThrowableMessageMatcher.hasMessage(org.junit.internal.matchers.ThrowableMessageMatcher.hasMessage) PAssert(org.apache.beam.sdk.testing.PAssert) Producer(org.apache.kafka.clients.producer.Producer) Assert.assertTrue(org.junit.Assert.assertTrue) IOException(java.io.IOException) Test(org.junit.Test) AbstractKafkaAvroSerDeConfig(io.confluent.kafka.serializers.AbstractKafkaAvroSerDeConfig) MetricResultsMatchers.attemptedMetricsResult(org.apache.beam.sdk.metrics.MetricResultsMatchers.attemptedMetricsResult) Matchers.hasItem(org.hamcrest.Matchers.hasItem) Assert.assertNull(org.junit.Assert.assertNull) IsIterableWithSize(org.hamcrest.collection.IsIterableWithSize) Serializer(org.apache.kafka.common.serialization.Serializer) BoundedWindow(org.apache.beam.sdk.transforms.windowing.BoundedWindow) VarIntCoder(org.apache.beam.sdk.coders.VarIntCoder) Assert.assertEquals(org.junit.Assert.assertEquals) MockProducer(org.apache.kafka.clients.producer.MockProducer) SourceMetrics(org.apache.beam.sdk.metrics.SourceMetrics) Matchers.isA(org.hamcrest.Matchers.isA) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) PipelineResult(org.apache.beam.sdk.PipelineResult) LoggerFactory(org.slf4j.LoggerFactory) SerializableFunction(org.apache.beam.sdk.transforms.SerializableFunction) ByteBuffer(java.nio.ByteBuffer) PCollectionList(org.apache.beam.sdk.values.PCollectionList) ThrowableCauseMatcher.hasCause(org.junit.internal.matchers.ThrowableCauseMatcher.hasCause) Method(java.lang.reflect.Method) Flatten(org.apache.beam.sdk.transforms.Flatten) MapElements(org.apache.beam.sdk.transforms.MapElements) Min(org.apache.beam.sdk.transforms.Min) Consumer(org.apache.kafka.clients.consumer.Consumer) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) BigEndianIntegerCoder(org.apache.beam.sdk.coders.BigEndianIntegerCoder) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) LongDeserializer(org.apache.kafka.common.serialization.LongDeserializer) VarLongCoder(org.apache.beam.sdk.coders.VarLongCoder) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) Collectors(java.util.stream.Collectors) List(java.util.List) Max(org.apache.beam.sdk.transforms.Max) ParDo(org.apache.beam.sdk.transforms.ParDo) Header(org.apache.kafka.common.header.Header) TypeDescriptors(org.apache.beam.sdk.values.TypeDescriptors) Optional(java.util.Optional) ImmutableList(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList) Assume.assumeTrue(org.junit.Assume.assumeTrue) Values(org.apache.beam.sdk.transforms.Values) MetricNameFilter(org.apache.beam.sdk.metrics.MetricNameFilter) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Headers(org.apache.kafka.common.header.Headers) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) AvroGeneratedUser(org.apache.beam.sdk.io.AvroGeneratedUser) PipelineOptionsFactory(org.apache.beam.sdk.options.PipelineOptionsFactory) AtomicReference(java.util.concurrent.atomic.AtomicReference) ConcurrentMap(java.util.concurrent.ConcurrentMap) SimpleEntry(java.util.AbstractMap.SimpleEntry) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) ExpectedException(org.junit.rules.ExpectedException) ExecutorService(java.util.concurrent.ExecutorService) Nullable(org.checkerframework.checker.nullness.qual.Nullable) Utils(org.apache.kafka.common.utils.Utils) DisplayData(org.apache.beam.sdk.transforms.display.DisplayData) GenericRecord(org.apache.avro.generic.GenericRecord) Logger(org.slf4j.Logger) BigEndianLongCoder(org.apache.beam.sdk.coders.BigEndianLongCoder) Assert.assertNotNull(org.junit.Assert.assertNotNull) FixedWindows(org.apache.beam.sdk.transforms.windowing.FixedWindows) Lists(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.Lists) JUnit4(org.junit.runners.JUnit4) PCollection(org.apache.beam.sdk.values.PCollection) MetricsFilter(org.apache.beam.sdk.metrics.MetricsFilter) TimeUnit(java.util.concurrent.TimeUnit) Rule(org.junit.Rule) Ignore(org.junit.Ignore) ConfluentSchemaRegistryDeserializerProviderTest.mockDeserializerProvider(org.apache.beam.sdk.io.kafka.ConfluentSchemaRegistryDeserializerProviderTest.mockDeserializerProvider) Instant(org.joda.time.Instant) IntegerDeserializer(org.apache.kafka.common.serialization.IntegerDeserializer) Comparator(java.util.Comparator) Collections(java.util.Collections) IsIterableContainingInAnyOrder(org.hamcrest.collection.IsIterableContainingInAnyOrder) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Matchers.containsString(org.hamcrest.Matchers.containsString) TimestampType(org.apache.kafka.common.record.TimestampType) ArrayList(java.util.ArrayList) PCollectionList(org.apache.beam.sdk.values.PCollectionList) List(java.util.List) ImmutableList(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableList) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) OffsetAndTimestamp(org.apache.kafka.clients.consumer.OffsetAndTimestamp) AtomicReference(java.util.concurrent.atomic.AtomicReference) TopicPartition(org.apache.kafka.common.TopicPartition) Collection(java.util.Collection) PCollection(org.apache.beam.sdk.values.PCollection) KafkaException(org.apache.kafka.common.KafkaException) ImmutableMap(org.apache.beam.vendor.guava.v26_0_jre.com.google.common.collect.ImmutableMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap)

Example 24 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project beam by apache.

the class KafkaCommitOffsetTest method testCommitOffsetDoFn.

@Test
public void testCommitOffsetDoFn() {
    Map<String, Object> configMap = new HashMap<>();
    configMap.put(ConsumerConfig.GROUP_ID_CONFIG, "group1");
    ReadSourceDescriptors<Object, Object> descriptors = ReadSourceDescriptors.read().withBootstrapServers("bootstrap_server").withConsumerConfigUpdates(configMap).withConsumerFactoryFn(new SerializableFunction<Map<String, Object>, Consumer<byte[], byte[]>>() {

        @Override
        public Consumer<byte[], byte[]> apply(Map<String, Object> input) {
            Assert.assertEquals("group1", input.get(ConsumerConfig.GROUP_ID_CONFIG));
            return consumer;
        }
    });
    CommitOffsetDoFn doFn = new CommitOffsetDoFn(descriptors);
    doFn.processElement(KV.of(KafkaSourceDescriptor.of(partition, null, null, null, null, null), 1L));
    Assert.assertEquals(2L, consumer.commit.get(partition).offset());
}
Also used : Consumer(org.apache.kafka.clients.consumer.Consumer) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) HashMap(java.util.HashMap) CommitOffsetDoFn(org.apache.beam.sdk.io.kafka.KafkaCommitOffset.CommitOffsetDoFn) HashMap(java.util.HashMap) Map(java.util.Map) Test(org.junit.Test)

Example 25 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project kafka by apache.

the class AbstractTaskTest method shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException.

@Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException() throws Exception {
    final Consumer consumer = mockConsumer(new AuthorizationException("blah"));
    final AbstractTask task = createTask(consumer);
    task.initializeOffsetLimits();
}
Also used : Consumer(org.apache.kafka.clients.consumer.Consumer) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) Test(org.junit.Test)

Aggregations

Consumer (org.apache.kafka.clients.consumer.Consumer)35 Test (org.junit.Test)22 Map (java.util.Map)20 TopicPartition (org.apache.kafka.common.TopicPartition)20 ArrayList (java.util.ArrayList)17 HashMap (java.util.HashMap)17 List (java.util.List)17 Collections (java.util.Collections)16 Set (java.util.Set)16 Properties (java.util.Properties)15 Collection (java.util.Collection)14 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)14 Collectors (java.util.stream.Collectors)13 KafkaException (org.apache.kafka.common.KafkaException)11 HashSet (java.util.HashSet)10 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)10 StreamsConfig (org.apache.kafka.streams.StreamsConfig)10 Logger (org.slf4j.Logger)10 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)9 PartitionInfo (org.apache.kafka.common.PartitionInfo)9