Search in sources :

Example 41 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class RequestResponseTest method createOffsetFetchResponse.

private OffsetFetchResponse createOffsetFetchResponse() {
    Map<TopicPartition, OffsetFetchResponse.PartitionData> responseData = new HashMap<>();
    responseData.put(new TopicPartition("test", 0), new OffsetFetchResponse.PartitionData(100L, "", Errors.NONE));
    responseData.put(new TopicPartition("test", 1), new OffsetFetchResponse.PartitionData(100L, null, Errors.NONE));
    return new OffsetFetchResponse(Errors.NONE, responseData);
}
Also used : HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) TopicPartition(org.apache.kafka.common.TopicPartition)

Example 42 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class KafkaBasedLogTest method testSendAndReadToEnd.

@Test
public void testSendAndReadToEnd() throws Exception {
    expectStart();
    TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
    ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp0Record), EasyMock.capture(callback0))).andReturn(tp0Future);
    TestFuture<RecordMetadata> tp1Future = new TestFuture<>();
    ProducerRecord<String, String> tp1Record = new ProducerRecord<>(TOPIC, TP1_KEY, TP1_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp1Record), EasyMock.capture(callback1))).andReturn(tp1Future);
    // Producer flushes when read to log end is called
    producer.flush();
    PowerMock.expectLastCall();
    expectStop();
    PowerMock.replayAll();
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 0L);
    endOffsets.put(TP1, 0L);
    consumer.updateEndOffsets(endOffsets);
    store.start();
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(0L, consumer.position(TP0));
    assertEquals(0L, consumer.position(TP1));
    // Set some keys
    final AtomicInteger invoked = new AtomicInteger(0);
    org.apache.kafka.clients.producer.Callback producerCallback = new org.apache.kafka.clients.producer.Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            invoked.incrementAndGet();
        }
    };
    store.send(TP0_KEY, TP0_VALUE, producerCallback);
    store.send(TP1_KEY, TP1_VALUE, producerCallback);
    assertEquals(0, invoked.get());
    // Output not used, so safe to not return a real value for testing
    tp1Future.resolve((RecordMetadata) null);
    callback1.getValue().onCompletion(null, null);
    assertEquals(1, invoked.get());
    tp0Future.resolve((RecordMetadata) null);
    callback0.getValue().onCompletion(null, null);
    assertEquals(2, invoked.get());
    // Now we should have to wait for the records to be read back when we call readToEnd()
    final AtomicBoolean getInvoked = new AtomicBoolean(false);
    final FutureCallback<Void> readEndFutureCallback = new FutureCallback<>(new Callback<Void>() {

        @Override
        public void onCompletion(Throwable error, Void result) {
            getInvoked.set(true);
        }
    });
    consumer.schedulePollTask(new Runnable() {

        @Override
        public void run() {
            // Once we're synchronized in a poll, start the read to end and schedule the exact set of poll events
            // that should follow. This readToEnd call will immediately wakeup this consumer.poll() call without
            // returning any data.
            Map<TopicPartition, Long> newEndOffsets = new HashMap<>();
            newEndOffsets.put(TP0, 2L);
            newEndOffsets.put(TP1, 2L);
            consumer.updateEndOffsets(newEndOffsets);
            store.readToEnd(readEndFutureCallback);
            // Should keep polling until it reaches current log end offset for all partitions
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE));
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE_NEW));
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY, TP1_VALUE));
                }
            });
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY, TP1_VALUE_NEW));
                }
            });
        // Already have FutureCallback that should be invoked/awaited, so no need for follow up finishedLatch
        }
    });
    readEndFutureCallback.get(10000, TimeUnit.MILLISECONDS);
    assertTrue(getInvoked.get());
    assertEquals(2, consumedRecords.size());
    assertEquals(2, consumedRecords.get(TP0).size());
    assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
    assertEquals(TP0_VALUE_NEW, consumedRecords.get(TP0).get(1).value());
    assertEquals(2, consumedRecords.get(TP1).size());
    assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
    assertEquals(TP1_VALUE_NEW, consumedRecords.get(TP1).get(1).value());
    // Cleanup
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) KafkaException(org.apache.kafka.common.KafkaException) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) WakeupException(org.apache.kafka.common.errors.WakeupException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) HashMap(java.util.HashMap) Map(java.util.Map) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 43 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class KafkaBasedLogTest method testReloadOnStartWithNoNewRecordsPresent.

@Test
public void testReloadOnStartWithNoNewRecordsPresent() throws Exception {
    expectStart();
    expectStop();
    PowerMock.replayAll();
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 7L);
    endOffsets.put(TP1, 7L);
    consumer.updateEndOffsets(endOffsets);
    // Better test with an advanced offset other than just 0L
    consumer.updateBeginningOffsets(endOffsets);
    consumer.schedulePollTask(new Runnable() {

        @Override
        public void run() {
            // time outs (for instance via ConnectRestException)
            throw new WakeupException();
        }
    });
    store.start();
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(7L, consumer.position(TP0));
    assertEquals(7L, consumer.position(TP1));
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) WakeupException(org.apache.kafka.common.errors.WakeupException) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 44 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class KafkaBasedLogTest method testConsumerError.

@Test
public void testConsumerError() throws Exception {
    expectStart();
    expectStop();
    PowerMock.replayAll();
    final CountDownLatch finishedLatch = new CountDownLatch(1);
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 1L);
    endOffsets.put(TP1, 1L);
    consumer.updateEndOffsets(endOffsets);
    consumer.schedulePollTask(new Runnable() {

        @Override
        public void run() {
            // Trigger exception
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.setException(Errors.GROUP_COORDINATOR_NOT_AVAILABLE.exception());
                }
            });
            // Should keep polling until it reaches current log end offset for all partitions
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE_NEW));
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE_NEW));
                }
            });
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    finishedLatch.countDown();
                }
            });
        }
    });
    store.start();
    assertTrue(finishedLatch.await(10000, TimeUnit.MILLISECONDS));
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(1L, consumer.position(TP0));
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) CountDownLatch(java.util.concurrent.CountDownLatch) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 45 with TopicPartition

use of org.apache.kafka.common.TopicPartition in project kafka by apache.

the class KafkaBasedLogTest method testReloadOnStart.

@Test
public void testReloadOnStart() throws Exception {
    expectStart();
    expectStop();
    PowerMock.replayAll();
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 1L);
    endOffsets.put(TP1, 1L);
    consumer.updateEndOffsets(endOffsets);
    final CountDownLatch finishedLatch = new CountDownLatch(1);
    consumer.schedulePollTask(new // Use first poll task to setup sequence of remaining responses to polls
    Runnable() {

        @Override
        public void run() {
            // Should keep polling until it reaches current log end offset for all partitions. Should handle
            // as many empty polls as needed
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE));
                }
            });
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY, TP1_VALUE));
                }
            });
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    finishedLatch.countDown();
                }
            });
        }
    });
    store.start();
    assertTrue(finishedLatch.await(10000, TimeUnit.MILLISECONDS));
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(2, consumedRecords.size());
    assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
    assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) TopicPartition(org.apache.kafka.common.TopicPartition) CountDownLatch(java.util.concurrent.CountDownLatch) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

TopicPartition (org.apache.kafka.common.TopicPartition)257 HashMap (java.util.HashMap)135 Test (org.junit.Test)97 Map (java.util.Map)64 ArrayList (java.util.ArrayList)50 HashSet (java.util.HashSet)44 LinkedHashMap (java.util.LinkedHashMap)39 PartitionInfo (org.apache.kafka.common.PartitionInfo)34 TaskId (org.apache.kafka.streams.processor.TaskId)33 OffsetAndMetadata (org.apache.kafka.clients.consumer.OffsetAndMetadata)29 Set (java.util.Set)28 List (java.util.List)26 Metrics (org.apache.kafka.common.metrics.Metrics)20 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)19 Node (org.apache.kafka.common.Node)19 StreamsConfig (org.apache.kafka.streams.StreamsConfig)19 Properties (java.util.Properties)18 MockTime (org.apache.kafka.common.utils.MockTime)17 PrepareForTest (org.powermock.core.classloader.annotations.PrepareForTest)15 Collection (java.util.Collection)13