Search in sources :

Example 1 with Whitebox

use of org.powermock.reflect.Whitebox in project kafka by apache.

the class KafkaBasedLogTest method testSendAndReadToEnd.

@Test
public void testSendAndReadToEnd() throws Exception {
    expectStart();
    TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
    ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp0Record), EasyMock.capture(callback0))).andReturn(tp0Future);
    TestFuture<RecordMetadata> tp1Future = new TestFuture<>();
    ProducerRecord<String, String> tp1Record = new ProducerRecord<>(TOPIC, TP1_KEY, TP1_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp1Record), EasyMock.capture(callback1))).andReturn(tp1Future);
    // Producer flushes when read to log end is called
    producer.flush();
    PowerMock.expectLastCall();
    expectStop();
    PowerMock.replayAll();
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 0L);
    endOffsets.put(TP1, 0L);
    consumer.updateEndOffsets(endOffsets);
    store.start();
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(0L, consumer.position(TP0));
    assertEquals(0L, consumer.position(TP1));
    // Set some keys
    final AtomicInteger invoked = new AtomicInteger(0);
    org.apache.kafka.clients.producer.Callback producerCallback = (metadata, exception) -> invoked.incrementAndGet();
    store.send(TP0_KEY, TP0_VALUE, producerCallback);
    store.send(TP1_KEY, TP1_VALUE, producerCallback);
    assertEquals(0, invoked.get());
    // Output not used, so safe to not return a real value for testing
    tp1Future.resolve((RecordMetadata) null);
    callback1.getValue().onCompletion(null, null);
    assertEquals(1, invoked.get());
    tp0Future.resolve((RecordMetadata) null);
    callback0.getValue().onCompletion(null, null);
    assertEquals(2, invoked.get());
    // Now we should have to wait for the records to be read back when we call readToEnd()
    final AtomicBoolean getInvoked = new AtomicBoolean(false);
    final FutureCallback<Void> readEndFutureCallback = new FutureCallback<>((error, result) -> getInvoked.set(true));
    consumer.schedulePollTask(() -> {
        // Once we're synchronized in a poll, start the read to end and schedule the exact set of poll events
        // that should follow. This readToEnd call will immediately wakeup this consumer.poll() call without
        // returning any data.
        Map<TopicPartition, Long> newEndOffsets = new HashMap<>();
        newEndOffsets.put(TP0, 2L);
        newEndOffsets.put(TP1, 2L);
        consumer.updateEndOffsets(newEndOffsets);
        store.readToEnd(readEndFutureCallback);
        // Should keep polling until it reaches current log end offset for all partitions
        consumer.scheduleNopPollTask();
        consumer.scheduleNopPollTask();
        consumer.scheduleNopPollTask();
        consumer.schedulePollTask(() -> {
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE, new RecordHeaders(), Optional.empty()));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP0_KEY, TP0_VALUE_NEW, new RecordHeaders(), Optional.empty()));
            consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE, new RecordHeaders(), Optional.empty()));
        });
        consumer.schedulePollTask(() -> consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TP1_KEY, TP1_VALUE_NEW, new RecordHeaders(), Optional.empty())));
    // Already have FutureCallback that should be invoked/awaited, so no need for follow up finishedLatch
    });
    readEndFutureCallback.get(10000, TimeUnit.MILLISECONDS);
    assertTrue(getInvoked.get());
    assertEquals(2, consumedRecords.size());
    assertEquals(2, consumedRecords.get(TP0).size());
    assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
    assertEquals(TP0_VALUE_NEW, consumedRecords.get(TP0).get(1).value());
    assertEquals(2, consumedRecords.get(TP1).size());
    assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
    assertEquals(TP1_VALUE_NEW, consumedRecords.get(TP1).get(1).value());
    // Cleanup
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : MockTime(org.apache.kafka.common.utils.MockTime) Arrays(java.util.Arrays) MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) KafkaException(org.apache.kafka.common.KafkaException) OffsetResetStrategy(org.apache.kafka.clients.consumer.OffsetResetStrategy) ByteBuffer(java.nio.ByteBuffer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Map(java.util.Map) TimestampType(org.apache.kafka.common.record.TimestampType) CommonClientConfigs(org.apache.kafka.clients.CommonClientConfigs) TopicPartition(org.apache.kafka.common.TopicPartition) Time(org.apache.kafka.common.utils.Time) WakeupException(org.apache.kafka.common.errors.WakeupException) Set(java.util.Set) ConsumerConfig(org.apache.kafka.clients.consumer.ConsumerConfig) PartitionInfo(org.apache.kafka.common.PartitionInfo) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) PowerMock(org.powermock.api.easymock.PowerMock) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) Assert.assertFalse(org.junit.Assert.assertFalse) Errors(org.apache.kafka.common.protocol.Errors) Optional(java.util.Optional) Node(org.apache.kafka.common.Node) Whitebox(org.powermock.reflect.Whitebox) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Assert.assertThrows(org.junit.Assert.assertThrows) RunWith(org.junit.runner.RunWith) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) HashMap(java.util.HashMap) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) AtomicReference(java.util.concurrent.atomic.AtomicReference) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) PowerMockRunner(org.powermock.modules.junit4.PowerMockRunner) ProducerConfig(org.apache.kafka.clients.producer.ProducerConfig) PowerMockIgnore(org.powermock.core.classloader.annotations.PowerMockIgnore) Before(org.junit.Before) Capture(org.easymock.Capture) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Assert.assertNotNull(org.junit.Assert.assertNotNull) Mock(org.powermock.api.easymock.annotation.Mock) Assert.assertTrue(org.junit.Assert.assertTrue) Test(org.junit.Test) EasyMock(org.easymock.EasyMock) TimeUnit(java.util.concurrent.TimeUnit) Assert.assertNull(org.junit.Assert.assertNull) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) Assert.assertEquals(org.junit.Assert.assertEquals) HashMap(java.util.HashMap) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) ConsumerRecord(org.apache.kafka.clients.consumer.ConsumerRecord) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) RecordHeaders(org.apache.kafka.common.header.internals.RecordHeaders) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

ByteBuffer (java.nio.ByteBuffer)1 ArrayList (java.util.ArrayList)1 Arrays (java.util.Arrays)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 List (java.util.List)1 Map (java.util.Map)1 Optional (java.util.Optional)1 Set (java.util.Set)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 TimeUnit (java.util.concurrent.TimeUnit)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 Supplier (java.util.function.Supplier)1 CommonClientConfigs (org.apache.kafka.clients.CommonClientConfigs)1 ConsumerConfig (org.apache.kafka.clients.consumer.ConsumerConfig)1 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)1 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)1 OffsetResetStrategy (org.apache.kafka.clients.consumer.OffsetResetStrategy)1