Search in sources :

Example 6 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project apache-kafka-on-k8s by banzaicloud.

the class AbstractTaskTest method shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException.

@Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException() {
    final Consumer consumer = mockConsumer(new KafkaException("blah"));
    final AbstractTask task = createTask(consumer, Collections.<StateStore, String>emptyMap());
    task.updateOffsetLimits();
}
Also used : MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Consumer(org.apache.kafka.clients.consumer.Consumer) KafkaException(org.apache.kafka.common.KafkaException) Test(org.junit.Test)

Example 7 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project apache-kafka-on-k8s by banzaicloud.

the class AbstractTaskTest method shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException.

@Test(expected = WakeupException.class)
public void shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException() {
    final Consumer consumer = mockConsumer(new WakeupException());
    final AbstractTask task = createTask(consumer, Collections.<StateStore, String>emptyMap());
    task.updateOffsetLimits();
}
Also used : MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Consumer(org.apache.kafka.clients.consumer.Consumer) WakeupException(org.apache.kafka.common.errors.WakeupException) Test(org.junit.Test)

Example 8 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project apache-kafka-on-k8s by banzaicloud.

the class AbstractTaskTest method shouldThrowLockExceptionIfFailedToLockStateDirectoryWhenTopologyHasStores.

@Test
public void shouldThrowLockExceptionIfFailedToLockStateDirectoryWhenTopologyHasStores() throws IOException {
    final Consumer consumer = EasyMock.createNiceMock(Consumer.class);
    final StateStore store = EasyMock.createNiceMock(StateStore.class);
    expect(stateDirectory.lock(id)).andReturn(false);
    EasyMock.replay(stateDirectory);
    final AbstractTask task = createTask(consumer, Collections.singletonMap(store, "dummy"));
    try {
        task.registerStateStores();
        fail("Should have thrown LockException");
    } catch (final LockException e) {
    // ok
    }
}
Also used : MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Consumer(org.apache.kafka.clients.consumer.Consumer) LockException(org.apache.kafka.streams.errors.LockException) StateStore(org.apache.kafka.streams.processor.StateStore) Test(org.junit.Test)

Example 9 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project apache-kafka-on-k8s by banzaicloud.

the class AbstractTaskTest method shouldDeleteAndRecreateStoreDirectoryOnReinitialize.

@Test
public void shouldDeleteAndRecreateStoreDirectoryOnReinitialize() throws IOException {
    final StreamsConfig streamsConfig = new StreamsConfig(new Properties() {

        {
            put(StreamsConfig.APPLICATION_ID_CONFIG, "app-id");
            put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
            put(StreamsConfig.STATE_DIR_CONFIG, TestUtils.tempDirectory().getAbsolutePath());
        }
    });
    final Consumer consumer = EasyMock.createNiceMock(Consumer.class);
    final StateStore store1 = EasyMock.createNiceMock(StateStore.class);
    final StateStore store2 = EasyMock.createNiceMock(StateStore.class);
    final StateStore store3 = EasyMock.createNiceMock(StateStore.class);
    final StateStore store4 = EasyMock.createNiceMock(StateStore.class);
    final String storeName1 = "storeName1";
    final String storeName2 = "storeName2";
    final String storeName3 = "storeName3";
    final String storeName4 = "storeName4";
    expect(store1.name()).andReturn(storeName1).anyTimes();
    EasyMock.replay(store1);
    expect(store2.name()).andReturn(storeName2).anyTimes();
    EasyMock.replay(store2);
    expect(store3.name()).andReturn(storeName3).anyTimes();
    EasyMock.replay(store3);
    expect(store4.name()).andReturn(storeName4).anyTimes();
    EasyMock.replay(store4);
    final StateDirectory stateDirectory = new StateDirectory(streamsConfig, new MockTime());
    final AbstractTask task = createTask(consumer, new HashMap<StateStore, String>() {

        {
            put(store1, storeTopicPartition1.topic());
            put(store2, storeTopicPartition2.topic());
            put(store3, storeTopicPartition3.topic());
            put(store4, storeTopicPartition4.topic());
        }
    }, stateDirectory);
    final String taskDir = stateDirectory.directoryForTask(task.id).getAbsolutePath();
    final File storeDirectory1 = new File(taskDir + File.separator + "rocksdb" + File.separator + storeName1);
    final File storeDirectory2 = new File(taskDir + File.separator + "rocksdb" + File.separator + storeName2);
    final File storeDirectory3 = new File(taskDir + File.separator + storeName3);
    final File storeDirectory4 = new File(taskDir + File.separator + storeName4);
    final File testFile1 = new File(storeDirectory1.getAbsolutePath() + File.separator + "testFile");
    final File testFile2 = new File(storeDirectory2.getAbsolutePath() + File.separator + "testFile");
    final File testFile3 = new File(storeDirectory3.getAbsolutePath() + File.separator + "testFile");
    final File testFile4 = new File(storeDirectory4.getAbsolutePath() + File.separator + "testFile");
    storeDirectory1.mkdirs();
    storeDirectory2.mkdirs();
    storeDirectory3.mkdirs();
    storeDirectory4.mkdirs();
    testFile1.createNewFile();
    assertTrue(testFile1.exists());
    testFile2.createNewFile();
    assertTrue(testFile2.exists());
    testFile3.createNewFile();
    assertTrue(testFile3.exists());
    testFile4.createNewFile();
    assertTrue(testFile4.exists());
    task.processorContext = new InternalMockProcessorContext(stateDirectory.directoryForTask(task.id), streamsConfig);
    task.stateMgr.register(store1, new MockRestoreCallback());
    task.stateMgr.register(store2, new MockRestoreCallback());
    task.stateMgr.register(store3, new MockRestoreCallback());
    task.stateMgr.register(store4, new MockRestoreCallback());
    // only reinitialize store1 and store3 -- store2 and store4 should be untouched
    task.reinitializeStateStoresForPartitions(Utils.mkSet(storeTopicPartition1, storeTopicPartition3));
    assertFalse(testFile1.exists());
    assertTrue(testFile2.exists());
    assertFalse(testFile3.exists());
    assertTrue(testFile4.exists());
}
Also used : MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Consumer(org.apache.kafka.clients.consumer.Consumer) StateStore(org.apache.kafka.streams.processor.StateStore) MockRestoreCallback(org.apache.kafka.test.MockRestoreCallback) Properties(java.util.Properties) InternalMockProcessorContext(org.apache.kafka.test.InternalMockProcessorContext) File(java.io.File) MockTime(org.apache.kafka.common.utils.MockTime) StreamsConfig(org.apache.kafka.streams.StreamsConfig) Test(org.junit.Test)

Example 10 with Consumer

use of org.apache.kafka.clients.consumer.Consumer in project incubator-pulsar by apache.

the class KafkaConsumerTest method testPartitions.

@Test
public void testPartitions() throws Exception {
    String topic = "persistent://sample/standalone/ns/testPartitions";
    // Create 8 partitions in topic
    admin.properties().createProperty("sample", new PropertyAdmin());
    admin.persistentTopics().createPartitionedTopic(topic, 8);
    Properties props = new Properties();
    props.put("bootstrap.servers", lookupUrl.toString());
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "true");
    props.put("key.deserializer", StringDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());
    Producer<byte[]> pulsarProducer = pulsarClient.newProducer().topic(topic).messageRoutingMode(org.apache.pulsar.client.api.MessageRoutingMode.RoundRobinPartition).create();
    // Create 2 Kakfa consumer and verify each gets half of the messages
    List<Consumer<String, String>> consumers = new ArrayList<>();
    for (int c = 0; c < 2; c++) {
        Consumer<String, String> consumer = new PulsarKafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList(topic));
        consumers.add(consumer);
    }
    int N = 8 * 3;
    for (int i = 0; i < N; i++) {
        Message<byte[]> msg = MessageBuilder.create().setKey(Integer.toString(i)).setContent(("hello-" + i).getBytes()).build();
        pulsarProducer.send(msg);
    }
    consumers.forEach(consumer -> {
        int expectedMessaged = N / consumers.size();
        for (int i = 0; i < expectedMessaged; ) {
            ConsumerRecords<String, String> records = consumer.poll(100);
            i += records.count();
        }
        // No more messages for this consumer
        ConsumerRecords<String, String> records = consumer.poll(100);
        assertEquals(records.count(), 0);
    });
    consumers.forEach(Consumer::close);
}
Also used : StringDeserializer(org.apache.kafka.common.serialization.StringDeserializer) ArrayList(java.util.ArrayList) PulsarKafkaConsumer(org.apache.kafka.clients.consumer.PulsarKafkaConsumer) Properties(java.util.Properties) PropertyAdmin(org.apache.pulsar.common.policies.data.PropertyAdmin) Consumer(org.apache.kafka.clients.consumer.Consumer) PulsarKafkaConsumer(org.apache.kafka.clients.consumer.PulsarKafkaConsumer) Test(org.testng.annotations.Test)

Aggregations

Consumer (org.apache.kafka.clients.consumer.Consumer)35 Test (org.junit.Test)22 Map (java.util.Map)20 TopicPartition (org.apache.kafka.common.TopicPartition)20 ArrayList (java.util.ArrayList)17 HashMap (java.util.HashMap)17 List (java.util.List)17 Collections (java.util.Collections)16 Set (java.util.Set)16 Properties (java.util.Properties)15 Collection (java.util.Collection)14 MockConsumer (org.apache.kafka.clients.consumer.MockConsumer)14 Collectors (java.util.stream.Collectors)13 KafkaException (org.apache.kafka.common.KafkaException)11 HashSet (java.util.HashSet)10 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)10 StreamsConfig (org.apache.kafka.streams.StreamsConfig)10 Logger (org.slf4j.Logger)10 ConsumerRecords (org.apache.kafka.clients.consumer.ConsumerRecords)9 PartitionInfo (org.apache.kafka.common.PartitionInfo)9