Search in sources :

Example 26 with MockProducer

use of org.apache.kafka.clients.producer.MockProducer in project hono by eclipse.

the class CachingKafkaProducerFactoryTest method setUp.

@BeforeEach
void setUp() {
    final VertxInternal vertxMock = mock(VertxInternal.class);
    final ContextInternal context = VertxMockSupport.mockContextInternal(vertxMock);
    final PromiseInternal<Void> promiseInternal = VertxMockSupport.promiseInternal();
    when(promiseInternal.future()).thenReturn(Future.succeededFuture());
    doAnswer(invocation -> {
        return promiseInternal;
    }).when(context).promise();
    when(vertxMock.getOrCreateContext()).thenReturn(context);
    doAnswer(invocation -> {
        final Promise<Object> result = Promise.promise();
        final Handler<Future<Object>> blockingCode = invocation.getArgument(0);
        final Handler<AsyncResult<Object>> resultHandler = invocation.getArgument(1);
        result.future().onComplete(resultHandler);
        blockingCode.handle(result.future());
        return null;
    }).when(context).executeBlocking(VertxMockSupport.anyHandler(), VertxMockSupport.anyHandler());
    final BiFunction<String, Map<String, String>, KafkaProducer<String, Buffer>> instanceSupplier = (n, c) -> {
        final MockProducer<String, Buffer> mockProducer = new MockProducer<>(true, new StringSerializer(), new BufferSerializer());
        return KafkaProducer.create(vertxMock, mockProducer);
    };
    factory = CachingKafkaProducerFactory.testFactory(vertxMock, instanceSupplier);
    configProperties.setProducerConfig(Map.of("bootstrap.servers", "localhost:9092"));
}
Also used : KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) BeforeEach(org.junit.jupiter.api.BeforeEach) BufferSerializer(io.vertx.kafka.client.serialization.BufferSerializer) BiFunction(java.util.function.BiFunction) KafkaException(org.apache.kafka.common.KafkaException) ContextInternal(io.vertx.core.impl.ContextInternal) KafkaProducer(io.vertx.kafka.client.producer.KafkaProducer) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Map(java.util.Map) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) AsyncResult(io.vertx.core.AsyncResult) VertxInternal(io.vertx.core.impl.VertxInternal) PromiseInternal(io.vertx.core.impl.future.PromiseInternal) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) Promise(io.vertx.core.Promise) OutOfOrderSequenceException(org.apache.kafka.common.errors.OutOfOrderSequenceException) Mockito.when(org.mockito.Mockito.when) Truth.assertThat(com.google.common.truth.Truth.assertThat) Future(io.vertx.core.Future) Test(org.junit.jupiter.api.Test) Buffer(io.vertx.core.buffer.Buffer) VertxMockSupport(org.eclipse.hono.test.VertxMockSupport) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) UnsupportedForMessageFormatException(org.apache.kafka.common.errors.UnsupportedForMessageFormatException) Optional(java.util.Optional) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) Handler(io.vertx.core.Handler) MockProducer(org.apache.kafka.clients.producer.MockProducer) Mockito.mock(org.mockito.Mockito.mock) BufferSerializer(io.vertx.kafka.client.serialization.BufferSerializer) VertxInternal(io.vertx.core.impl.VertxInternal) MockProducer(org.apache.kafka.clients.producer.MockProducer) ContextInternal(io.vertx.core.impl.ContextInternal) Future(io.vertx.core.Future) AsyncResult(io.vertx.core.AsyncResult) Map(java.util.Map) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) BeforeEach(org.junit.jupiter.api.BeforeEach)

Example 27 with MockProducer

use of org.apache.kafka.clients.producer.MockProducer in project apache-kafka-on-k8s by banzaicloud.

the class StreamThreadTest method shouldCloseTaskAsZombieAndRemoveFromActiveTasksIfProducerWasFencedWhileProcessing.

@Test
public void shouldCloseTaskAsZombieAndRemoveFromActiveTasksIfProducerWasFencedWhileProcessing() throws Exception {
    internalTopologyBuilder.addSource(null, "source", null, null, null, topic1);
    internalTopologyBuilder.addSink("sink", "dummyTopic", null, null, null, "source");
    final StreamThread thread = createStreamThread(clientId, new StreamsConfig(configProps(true)), true);
    final MockConsumer<byte[], byte[]> consumer = clientSupplier.consumer;
    consumer.updatePartitions(topic1, Collections.singletonList(new PartitionInfo(topic1, 1, null, null, null)));
    thread.setState(StreamThread.State.RUNNING);
    thread.rebalanceListener.onPartitionsRevoked(null);
    final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
    final List<TopicPartition> assignedPartitions = new ArrayList<>();
    // assign single partition
    assignedPartitions.add(t1p1);
    activeTasks.put(task1, Collections.singleton(t1p1));
    thread.taskManager().setAssignmentMetadata(activeTasks, Collections.<TaskId, Set<TopicPartition>>emptyMap());
    final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.consumer;
    mockConsumer.assign(assignedPartitions);
    mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
    thread.rebalanceListener.onPartitionsAssigned(assignedPartitions);
    thread.runOnce(-1);
    assertThat(thread.tasks().size(), equalTo(1));
    final MockProducer producer = clientSupplier.producers.get(0);
    // change consumer subscription from "pattern" to "manual" to be able to call .addRecords()
    consumer.updateBeginningOffsets(Collections.singletonMap(assignedPartitions.iterator().next(), 0L));
    consumer.unsubscribe();
    consumer.assign(new HashSet<>(assignedPartitions));
    consumer.addRecord(new ConsumerRecord<>(topic1, 1, 0, new byte[0], new byte[0]));
    mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1);
    thread.runOnce(-1);
    assertThat(producer.history().size(), equalTo(1));
    assertFalse(producer.transactionCommitted());
    mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
    TestUtils.waitForCondition(new TestCondition() {

        @Override
        public boolean conditionMet() {
            return producer.commitCount() == 1;
        }
    }, "StreamsThread did not commit transaction.");
    producer.fenceProducer();
    mockTime.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG) + 1L);
    consumer.addRecord(new ConsumerRecord<>(topic1, 1, 0, new byte[0], new byte[0]));
    try {
        thread.runOnce(-1);
        fail("Should have thrown TaskMigratedException");
    } catch (final TaskMigratedException expected) {
    /* ignore */
    }
    TestUtils.waitForCondition(new TestCondition() {

        @Override
        public boolean conditionMet() {
            return thread.tasks().isEmpty();
        }
    }, "StreamsThread did not remove fenced zombie task.");
    assertThat(producer.commitCount(), equalTo(1L));
}
Also used : TaskId(org.apache.kafka.streams.processor.TaskId) Set(java.util.Set) HashSet(java.util.HashSet) MockProducer(org.apache.kafka.clients.producer.MockProducer) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) TopicPartition(org.apache.kafka.common.TopicPartition) TestCondition(org.apache.kafka.test.TestCondition) PartitionInfo(org.apache.kafka.common.PartitionInfo) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) StreamsConfig(org.apache.kafka.streams.StreamsConfig) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException) InternalStreamsBuilderTest(org.apache.kafka.streams.kstream.internals.InternalStreamsBuilderTest) Test(org.junit.Test)

Example 28 with MockProducer

use of org.apache.kafka.clients.producer.MockProducer in project apache-kafka-on-k8s by banzaicloud.

the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnCloseIfASendFailedWithContinueExceptionHandler.

@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnCloseIfASendFailedWithContinueExceptionHandler() {
    final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

        @Override
        public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
            callback.onCompletion(null, new Exception());
            return null;
        }
    }, "test", logContext, new AlwaysContinueProductionExceptionHandler());
    collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
    collector.close();
}
Also used : AlwaysContinueProductionExceptionHandler(org.apache.kafka.streams.errors.AlwaysContinueProductionExceptionHandler) MockProducer(org.apache.kafka.clients.producer.MockProducer) Callback(org.apache.kafka.clients.producer.Callback) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) Test(org.junit.Test)

Example 29 with MockProducer

use of org.apache.kafka.clients.producer.MockProducer in project apache-kafka-on-k8s by banzaicloud.

the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler.

@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() {
    final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

        @Override
        public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
            callback.onCompletion(null, new Exception());
            return null;
        }
    }, "test", logContext, new AlwaysContinueProductionExceptionHandler());
    collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
    collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
Also used : AlwaysContinueProductionExceptionHandler(org.apache.kafka.streams.errors.AlwaysContinueProductionExceptionHandler) MockProducer(org.apache.kafka.clients.producer.MockProducer) Callback(org.apache.kafka.clients.producer.Callback) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) Test(org.junit.Test)

Example 30 with MockProducer

use of org.apache.kafka.clients.producer.MockProducer in project apache-kafka-on-k8s by banzaicloud.

the class RecordCollectorTest method shouldThrowIfTopicIsUnknownWithDefaultExceptionHandler.

@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowIfTopicIsUnknownWithDefaultExceptionHandler() {
    final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

        @Override
        public List<PartitionInfo> partitionsFor(final String topic) {
            return Collections.EMPTY_LIST;
        }
    }, "test", logContext, new DefaultProductionExceptionHandler());
    collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
Also used : MockProducer(org.apache.kafka.clients.producer.MockProducer) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) DefaultProductionExceptionHandler(org.apache.kafka.streams.errors.DefaultProductionExceptionHandler) List(java.util.List) Test(org.junit.Test)

Aggregations

MockProducer (org.apache.kafka.clients.producer.MockProducer)32 Test (org.junit.Test)25 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)17 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)14 StreamsException (org.apache.kafka.streams.errors.StreamsException)14 Callback (org.apache.kafka.clients.producer.Callback)13 Future (java.util.concurrent.Future)12 KafkaException (org.apache.kafka.common.KafkaException)10 Map (java.util.Map)8 HashMap (java.util.HashMap)7 Truth.assertThat (com.google.common.truth.Truth.assertThat)6 Vertx (io.vertx.core.Vertx)5 Buffer (io.vertx.core.buffer.Buffer)5 VertxExtension (io.vertx.junit5.VertxExtension)5 VertxTestContext (io.vertx.junit5.VertxTestContext)5 TimeoutException (org.apache.kafka.common.errors.TimeoutException)5 KafkaClientUnitTestHelper (org.eclipse.hono.kafka.test.KafkaClientUnitTestHelper)5 BeforeEach (org.junit.jupiter.api.BeforeEach)5 Test (org.junit.jupiter.api.Test)5 ExtendWith (org.junit.jupiter.api.extension.ExtendWith)5