Search in sources :

Example 21 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaBasedLogTest method testSendAndReadToEnd.

@Test
public void testSendAndReadToEnd() throws Exception {
    expectStart();
    TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
    ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp0Record), EasyMock.capture(callback0))).andReturn(tp0Future);
    TestFuture<RecordMetadata> tp1Future = new TestFuture<>();
    ProducerRecord<String, String> tp1Record = new ProducerRecord<>(TOPIC, TP1_KEY, TP1_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp1Record), EasyMock.capture(callback1))).andReturn(tp1Future);
    // Producer flushes when read to log end is called
    producer.flush();
    PowerMock.expectLastCall();
    expectStop();
    PowerMock.replayAll();
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 0L);
    endOffsets.put(TP1, 0L);
    consumer.updateEndOffsets(endOffsets);
    store.start();
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(0L, consumer.position(TP0));
    assertEquals(0L, consumer.position(TP1));
    // Set some keys
    final AtomicInteger invoked = new AtomicInteger(0);
    org.apache.kafka.clients.producer.Callback producerCallback = (metadata, exception) -> invoked.incrementAndGet();
    store.send(TP0_KEY, TP0_VALUE, producerCallback);
    store.send(TP1_KEY, TP1_VALUE, producerCallback);
    assertEquals(0, invoked.get());
    // Output not used, so safe to not return a real value for testing
    tp1Future.resolve((RecordMetadata) null);
    callback1.getValue().onCompletion(null, null);
    assertEquals(1, invoked.get());
    tp0Future.resolve((RecordMetadata) null);
    callback0.getValue().onCompletion(null, null);
    assertEquals(2, invoked.get());
    // Now we should have to wait for the records to be read back when we call readToEnd()
    final AtomicBoolean getInvoked = new AtomicBoolean(false);
    final FutureCallback<Void> readEndFutureCallback = new FutureCallback<>((error, result) -> getInvoked.set(true));
    consumer.schedulePollTask(() -> {
        // Once we're synchronized in a poll, start the read to end and schedule the exact set of poll events
        // that should follow. This readToEnd call will immediately wakeup this consumer.poll() call without
        // returning any data.
        Map<TopicPartition, Long> newEndOffsets = new HashMap<>();
        newEndOffsets.put(TP0, 2L);
        newEndOffsets.put(TP1, 2L);
        consumer.updateEndOffsets(newEndOffsets);
        store.readToEnd(readEndFutureCallback);
        // Should keep polling until it reaches current log end offset for all partitions
        consumer.scheduleNopPollTask();
        consumer.scheduleNopPollTask();
        consumer.scheduleNopPollTask();
        consumer.schedulePollTask(() -> {
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE, new RecordHeaders(), Optional.empty()));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW, new RecordHeaders(), Optional.empty()));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE, new RecordHeaders(), Optional.empty()));
        });
        consumer.schedulePollTask(() -> consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE_NEW, new RecordHeaders(), Optional.empty())));
    // Already have FutureCallback that should be invoked/awaited, so no need for follow up finishedLatch
    });
    readEndFutureCallback.get(10000, TimeUnit.MILLISECONDS);
    assertTrue(getInvoked.get());
    assertEquals(2, consumedRecords.size());
    assertEquals(2, consumedRecords.get(TP0).size());
    assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
    assertEquals(TP0_VALUE_NEW, consumedRecords.get(TP0).get(1).value());
    assertEquals(2, consumedRecords.get(TP1).size());
    assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
    assertEquals(TP1_VALUE_NEW, consumedRecords.get(TP1).get(1).value());
    // Cleanup
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) KafkaException(org.apache.kafka.common.KafkaException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) ByteBuffer(java.nio.ByteBuffer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) CommonClientConfigs(org.apache.kafka.clients.CommonClientConfigs) TopicPartition(org.apache.kafka.common.TopicPartition) Time(org.apache.kafka.common.utils.Time) WakeupException(org.apache.kafka.common.errors.WakeupException) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) PowerMock(org.powermock.api.easymock.PowerMock) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assert.assertFalse(org.junit.Assert.assertFalse) Errors(org.apache.kafka.common.protocol.Errors) Optional(java.util.Optional) Node(org.apache.kafka.common.Node) Whitebox(org.powermock.reflect.Whitebox) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) PowerMockRunner(org.powermock.modules.junit4.PowerMockRunner) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) PowerMockIgnore(org.powermock.core.classloader.annotations.PowerMockIgnore) Before(org.junit.Before) Capture(org.easymock.Capture) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Assert.assertNotNull(org.junit.Assert.assertNotNull) Mock(org.powermock.api.easymock.annotation.Mock) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) TimeUnit(java.util.concurrent.TimeUnit) Assert.assertNull(org.junit.Assert.assertNull) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) Assert.assertEquals(org.junit.Assert.assertEquals) HashMap(java.util.HashMap) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 22 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaBasedLogTest method testReloadOnStart.

@Test
public void testReloadOnStart() throws Exception {
    expectStart();
    expectStop();
    PowerMock.replayAll();
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 1L);
    endOffsets.put(TP1, 1L);
    consumer.updateEndOffsets(endOffsets);
    final CountDownLatch finishedLatch = new CountDownLatch(1);
    consumer.schedulePollTask(() -> {
        // Use first poll task to setup sequence of remaining responses to polls
        // Should keep polling until it reaches current log end offset for all partitions. Should handle
        // as many empty polls as needed
        consumer.scheduleNopPollTask();
        consumer.scheduleNopPollTask();
        consumer.schedulePollTask(() -> consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE, new RecordHeaders(), Optional.empty())));
        consumer.scheduleNopPollTask();
        consumer.scheduleNopPollTask();
        consumer.schedulePollTask(() -> consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE, new RecordHeaders(), Optional.empty())));
        consumer.schedulePollTask(finishedLatch::countDown);
    });
    store.start();
    assertTrue(finishedLatch.await(10000, TimeUnit.MILLISECONDS));
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(2, consumedRecords.size());
    assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
    assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) CountDownLatch(java.util.concurrent.CountDownLatch) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 23 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class KafkaBasedLogTest method testPollConsumerError.

@Test
public void testPollConsumerError() throws Exception {
    expectStart();
    expectStop();
    PowerMock.replayAll();
    final CountDownLatch finishedLatch = new CountDownLatch(1);
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 1L);
    endOffsets.put(TP1, 1L);
    consumer.updateEndOffsets(endOffsets);
    consumer.schedulePollTask(() -> {
        // Trigger exception
        consumer.schedulePollTask(() -> consumer.setPollException(Errors.COORDINATOR_NOT_AVAILABLE.exception()));
        // Should keep polling until it reaches current log end offset for all partitions
        consumer.scheduleNopPollTask();
        consumer.scheduleNopPollTask();
        consumer.schedulePollTask(() -> {
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW, new RecordHeaders(), Optional.empty()));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW, new RecordHeaders(), Optional.empty()));
        });
        consumer.schedulePollTask(finishedLatch::countDown);
    });
    store.start();
    assertTrue(finishedLatch.await(10000, TimeUnit.MILLISECONDS));
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(1L, consumer.position(TP0));
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) CountDownLatch(java.util.concurrent.CountDownLatch) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 24 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class RecordCollectorTest method shouldThrowInformativeStreamsExceptionOnValueClassCastException.

@SuppressWarnings({ "unchecked", "rawtypes" })
@Test
public void shouldThrowInformativeStreamsExceptionOnValueClassCastException() {
    final StreamsException expected = assertThrows(StreamsException.class, () -> this.collector.send("topic", "key", "value", new RecordHeaders(), 0, 0L, new StringSerializer(), // need to add cast to trigger `ClassCastException`
    (Serializer) new LongSerializer()));
    assertThat(expected.getCause(), instanceOf(ClassCastException.class));
    assertThat(expected.getMessage(), equalTo("ClassCastException while producing data to topic topic. " + "A serializer (key: org.apache.kafka.common.serialization.StringSerializer / value: org.apache.kafka.common.serialization.LongSerializer) " + "is not compatible to the actual key or value type (key type: java.lang.String / value type: java.lang.String). " + "Change the default Serdes in StreamConfig or provide correct Serdes via method parameters " + "(for example if using the DSL, `#to(String topic, Produced<K, V> produced)` with `Produced.keySerde(WindowedSerdes.timeWindowedSerdeFrom(String.class))`)."));
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) StreamsException(org.apache.kafka.streams.errors.StreamsException) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) StringSerializer(org.apache.kafka.common.serialization.StringSerializer) LongSerializer(org.apache.kafka.common.serialization.LongSerializer) ByteArraySerializer(org.apache.kafka.common.serialization.ByteArraySerializer) Serializer(org.apache.kafka.common.serialization.Serializer) Test(org.junit.Test)

Example 25 with RecordHeaders

use of org.apache.kafka.common.header.internals.RecordHeaders in project kafka by apache.

the class RecordCollectorTest method shouldSendWithNoPartition.

@Test
public void shouldSendWithNoPartition() {
    final Headers headers = new RecordHeaders(new Header[] { new RecordHeader("key", "value".getBytes()) });
    collector.send(topic, "3", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "9", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "27", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "81", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "243", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "28", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "82", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "244", "0", headers, null, null, stringSerializer, stringSerializer);
    collector.send(topic, "245", "0", headers, null, null, stringSerializer, stringSerializer);
    final Map<TopicPartition, Long> offsets = collector.offsets();
    // with mock producer without specific partition, we would use default producer partitioner with murmur hash
    assertEquals(3L, (long) offsets.get(new TopicPartition(topic, 0)));
    assertEquals(2L, (long) offsets.get(new TopicPartition(topic, 1)));
    assertEquals(1L, (long) offsets.get(new TopicPartition(topic, 2)));
    assertEquals(9, mockProducer.history().size());
}
Also used : RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) Headers(org.apache.kafka.common.header.Headers) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) TopicPartition(org.apache.kafka.common.TopicPartition) RecordHeader(org.apache.kafka.common.header.internals.RecordHeader) Test(org.junit.Test)

Aggregations

RecordHeaders (org.apache.kafka.common.header.internals.RecordHeaders)156 Test (org.junit.Test)111 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)52 ProcessorRecordContext (org.apache.kafka.streams.processor.internals.ProcessorRecordContext)41 Headers (org.apache.kafka.common.header.Headers)34 RecordHeader (org.apache.kafka.common.header.internals.RecordHeader)27 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)24 TopicPartition (org.apache.kafka.common.TopicPartition)22 Position (org.apache.kafka.streams.query.Position)17 ArrayList (java.util.ArrayList)13 Header (org.apache.kafka.common.header.Header)13 HashMap (java.util.HashMap)12 ByteBuffer (java.nio.ByteBuffer)11 Struct (org.apache.kafka.connect.data.Struct)11 Test (org.junit.jupiter.api.Test)11 LinkedHashMap (java.util.LinkedHashMap)9 Bytes (org.apache.kafka.common.utils.Bytes)9 StreamsException (org.apache.kafka.streams.errors.StreamsException)9 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)8 Metrics (org.apache.kafka.common.metrics.Metrics)8