Search in sources :

Example 11 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class RocksDBSessionStoreSupplierTest method shouldCreateLoggingEnabledStoreWhenStoreLogged.

@Test
public void shouldCreateLoggingEnabledStoreWhenStoreLogged() throws Exception {
    store = createStore(true, false);
    final List<ProducerRecord> logged = new ArrayList<>();
    final NoOpRecordCollector collector = new NoOpRecordCollector() {

        @Override
        public <K, V> void send(final String topic, K key, V value, Integer partition, Long timestamp, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
            logged.add(new ProducerRecord<K, V>(topic, partition, timestamp, key, value));
        }
    };
    final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(), Serdes.String(), Serdes.String(), collector, cache);
    context.setTime(1);
    store.init(context, store);
    store.put(new Windowed<>("a", new SessionWindow(0, 10)), "b");
    assertFalse(logged.isEmpty());
}
Also used : NoOpRecordCollector(org.apache.kafka.test.NoOpRecordCollector) ArrayList(java.util.ArrayList) MockProcessorContext(org.apache.kafka.test.MockProcessorContext) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) SessionWindow(org.apache.kafka.streams.kstream.internals.SessionWindow) Serializer(org.apache.kafka.common.serialization.Serializer) Test(org.junit.Test)

Example 12 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class RocksDBSessionStoreSupplierTest method shouldNotBeLoggingEnabledStoreWhenLoggingNotEnabled.

@Test
public void shouldNotBeLoggingEnabledStoreWhenLoggingNotEnabled() throws Exception {
    store = createStore(false, false);
    final List<ProducerRecord> logged = new ArrayList<>();
    final NoOpRecordCollector collector = new NoOpRecordCollector() {

        @Override
        public <K, V> void send(final String topic, K key, V value, Integer partition, Long timestamp, Serializer<K> keySerializer, Serializer<V> valueSerializer) {
            logged.add(new ProducerRecord<K, V>(topic, partition, timestamp, key, value));
        }
    };
    final MockProcessorContext context = new MockProcessorContext(TestUtils.tempDirectory(), Serdes.String(), Serdes.String(), collector, cache);
    context.setTime(1);
    store.init(context, store);
    store.put(new Windowed<>("a", new SessionWindow(0, 10)), "b");
    assertTrue(logged.isEmpty());
}
Also used : NoOpRecordCollector(org.apache.kafka.test.NoOpRecordCollector) ArrayList(java.util.ArrayList) MockProcessorContext(org.apache.kafka.test.MockProcessorContext) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) SessionWindow(org.apache.kafka.streams.kstream.internals.SessionWindow) Serializer(org.apache.kafka.common.serialization.Serializer) Test(org.junit.Test)

Example 13 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project storm by apache.

the class KafkaUtilsTest method createTopicAndSendMessage.

private void createTopicAndSendMessage(String key, String value) {
    Properties p = new Properties();
    p.put("acks", "1");
    p.put("bootstrap.servers", broker.getBrokerConnectionString());
    p.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    p.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    p.put("metadata.fetch.timeout.ms", 1000);
    KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);
    try {
        producer.send(new ProducerRecord<String, String>(config.topic, key, value)).get();
    } catch (Exception e) {
        Assert.fail(e.getMessage());
        LOG.error("Failed to do synchronous sending due to " + e, e);
    } finally {
        producer.close();
    }
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Properties(java.util.Properties)

Example 14 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class ProducerInterceptorsTest method testOnAcknowledgementWithErrorChain.

@Test
public void testOnAcknowledgementWithErrorChain() {
    List<ProducerInterceptor<Integer, String>> interceptorList = new ArrayList<>();
    AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One");
    interceptorList.add(interceptor1);
    ProducerInterceptors<Integer, String> interceptors = new ProducerInterceptors<>(interceptorList);
    // verify that metadata contains both topic and partition
    interceptors.onSendError(producerRecord, new TopicPartition(producerRecord.topic(), producerRecord.partition()), new KafkaException("Test"));
    assertEquals(1, onErrorAckCount);
    assertEquals(1, onErrorAckWithTopicPartitionSetCount);
    // verify that metadata contains both topic and partition (because record already contains partition)
    interceptors.onSendError(producerRecord, null, new KafkaException("Test"));
    assertEquals(2, onErrorAckCount);
    assertEquals(2, onErrorAckWithTopicPartitionSetCount);
    // if producer record does not contain partition, interceptor should get partition == -1
    ProducerRecord<Integer, String> record2 = new ProducerRecord<>("test2", null, 1, "value");
    interceptors.onSendError(record2, null, new KafkaException("Test"));
    assertEquals(3, onErrorAckCount);
    assertEquals(3, onErrorAckWithTopicSetCount);
    assertEquals(2, onErrorAckWithTopicPartitionSetCount);
    // if producer record does not contain partition, but topic/partition is passed to
    // onSendError, then interceptor should get valid partition
    int reassignedPartition = producerRecord.partition() + 1;
    interceptors.onSendError(record2, new TopicPartition(record2.topic(), reassignedPartition), new KafkaException("Test"));
    assertEquals(4, onErrorAckCount);
    assertEquals(4, onErrorAckWithTopicSetCount);
    assertEquals(3, onErrorAckWithTopicPartitionSetCount);
    // if both record and topic/partition are null, interceptor should not receive metadata
    interceptors.onSendError(null, null, new KafkaException("Test"));
    assertEquals(5, onErrorAckCount);
    assertEquals(4, onErrorAckWithTopicSetCount);
    assertEquals(3, onErrorAckWithTopicPartitionSetCount);
    interceptors.close();
}
Also used : ArrayList(java.util.ArrayList) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) ProducerInterceptor(org.apache.kafka.clients.producer.ProducerInterceptor) KafkaException(org.apache.kafka.common.KafkaException) Test(org.junit.Test)

Example 15 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class KafkaBasedLogTest method testSendAndReadToEnd.

@Test
public void testSendAndReadToEnd() throws Exception {
    expectStart();
    TestFuture<RecordMetadata> tp0Future = new TestFuture<>();
    ProducerRecord<String, String> tp0Record = new ProducerRecord<>(TOPIC, TP0_KEY, TP0_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback0 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp0Record), EasyMock.capture(callback0))).andReturn(tp0Future);
    TestFuture<RecordMetadata> tp1Future = new TestFuture<>();
    ProducerRecord<String, String> tp1Record = new ProducerRecord<>(TOPIC, TP1_KEY, TP1_VALUE);
    Capture<org.apache.kafka.clients.producer.Callback> callback1 = EasyMock.newCapture();
    EasyMock.expect(producer.send(EasyMock.eq(tp1Record), EasyMock.capture(callback1))).andReturn(tp1Future);
    // Producer flushes when read to log end is called
    producer.flush();
    PowerMock.expectLastCall();
    expectStop();
    PowerMock.replayAll();
    Map<TopicPartition, Long> endOffsets = new HashMap<>();
    endOffsets.put(TP0, 0L);
    endOffsets.put(TP1, 0L);
    consumer.updateEndOffsets(endOffsets);
    store.start();
    assertEquals(CONSUMER_ASSIGNMENT, consumer.assignment());
    assertEquals(0L, consumer.position(TP0));
    assertEquals(0L, consumer.position(TP1));
    // Set some keys
    final AtomicInteger invoked = new AtomicInteger(0);
    org.apache.kafka.clients.producer.Callback producerCallback = new org.apache.kafka.clients.producer.Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            invoked.incrementAndGet();
        }
    };
    store.send(TP0_KEY, TP0_VALUE, producerCallback);
    store.send(TP1_KEY, TP1_VALUE, producerCallback);
    assertEquals(0, invoked.get());
    // Output not used, so safe to not return a real value for testing
    tp1Future.resolve((RecordMetadata) null);
    callback1.getValue().onCompletion(null, null);
    assertEquals(1, invoked.get());
    tp0Future.resolve((RecordMetadata) null);
    callback0.getValue().onCompletion(null, null);
    assertEquals(2, invoked.get());
    // Now we should have to wait for the records to be read back when we call readToEnd()
    final AtomicBoolean getInvoked = new AtomicBoolean(false);
    final FutureCallback<Void> readEndFutureCallback = new FutureCallback<>(new Callback<Void>() {

        @Override
        public void onCompletion(Throwable error, Void result) {
            getInvoked.set(true);
        }
    });
    consumer.schedulePollTask(new Runnable() {

        @Override
        public void run() {
            // Once we're synchronized in a poll, start the read to end and schedule the exact set of poll events
            // that should follow. This readToEnd call will immediately wakeup this consumer.poll() call without
            // returning any data.
            Map<TopicPartition, Long> newEndOffsets = new HashMap<>();
            newEndOffsets.put(TP0, 2L);
            newEndOffsets.put(TP1, 2L);
            consumer.updateEndOffsets(newEndOffsets);
            store.readToEnd(readEndFutureCallback);
            // Should keep polling until it reaches current log end offset for all partitions
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.scheduleNopPollTask();
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE));
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP0_KEY, TP0_VALUE_NEW));
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 0, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY, TP1_VALUE));
                }
            });
            consumer.schedulePollTask(new Runnable() {

                @Override
                public void run() {
                    consumer.addRecord(new ConsumerRecord<>(TOPIC, 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, TP1_KEY, TP1_VALUE_NEW));
                }
            });
        // Already have FutureCallback that should be invoked/awaited, so no need for follow up finishedLatch
        }
    });
    readEndFutureCallback.get(10000, TimeUnit.MILLISECONDS);
    assertTrue(getInvoked.get());
    assertEquals(2, consumedRecords.size());
    assertEquals(2, consumedRecords.get(TP0).size());
    assertEquals(TP0_VALUE, consumedRecords.get(TP0).get(0).value());
    assertEquals(TP0_VALUE_NEW, consumedRecords.get(TP0).get(1).value());
    assertEquals(2, consumedRecords.get(TP1).size());
    assertEquals(TP1_VALUE, consumedRecords.get(TP1).get(0).value());
    assertEquals(TP1_VALUE_NEW, consumedRecords.get(TP1).get(1).value());
    // Cleanup
    store.stop();
    assertFalse(Whitebox.<Thread>getInternalState(store, "thread").isAlive());
    assertTrue(consumer.closed());
    PowerMock.verifyAll();
}
Also used : HashMap(java.util.HashMap) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) KafkaException(org.apache.kafka.common.KafkaException) LeaderNotAvailableException(org.apache.kafka.common.errors.LeaderNotAvailableException) WakeupException(org.apache.kafka.common.errors.WakeupException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) HashMap(java.util.HashMap) Map(java.util.Map) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Aggregations

ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)51 Test (org.junit.Test)29 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)14 Future (java.util.concurrent.Future)11 ArrayList (java.util.ArrayList)10 Properties (java.util.Properties)10 Callback (org.apache.kafka.clients.producer.Callback)10 TopicPartition (org.apache.kafka.common.TopicPartition)8 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)7 ExecutionException (java.util.concurrent.ExecutionException)6 TimeoutException (org.apache.kafka.common.errors.TimeoutException)6 Serializer (org.apache.kafka.common.serialization.Serializer)6 MockProcessorContext (org.apache.kafka.test.MockProcessorContext)6 NoOpRecordCollector (org.apache.kafka.test.NoOpRecordCollector)6 MockProducer (org.apache.kafka.clients.producer.MockProducer)5 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)5 SourceRecord (org.apache.kafka.connect.source.SourceRecord)5 HashMap (java.util.HashMap)4 Endpoint (org.apache.camel.Endpoint)4 MockEndpoint (org.apache.camel.component.mock.MockEndpoint)4