Search in sources :

Example 1 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class WorkerSourceTaskTest method expectSendRecord.

private Capture<ProducerRecord<byte[], byte[]>> expectSendRecord(boolean anyTimes, boolean isRetry, boolean succeed) throws InterruptedException {
    expectConvertKeyValue(anyTimes);
    expectApplyTransformationChain(anyTimes);
    Capture<ProducerRecord<byte[], byte[]>> sent = EasyMock.newCapture();
    // 1. Offset data is passed to the offset storage.
    if (!isRetry) {
        offsetWriter.offset(PARTITION, OFFSET);
        if (anyTimes)
            PowerMock.expectLastCall().anyTimes();
        else
            PowerMock.expectLastCall();
    }
    // 2. Converted data passed to the producer, which will need callbacks invoked for flush to work
    IExpectationSetters<Future<RecordMetadata>> expect = EasyMock.expect(producer.send(EasyMock.capture(sent), EasyMock.capture(producerCallbacks)));
    IAnswer<Future<RecordMetadata>> expectResponse = new IAnswer<Future<RecordMetadata>>() {

        @Override
        public Future<RecordMetadata> answer() throws Throwable {
            synchronized (producerCallbacks) {
                for (org.apache.kafka.clients.producer.Callback cb : producerCallbacks.getValues()) {
                    cb.onCompletion(new RecordMetadata(new TopicPartition("foo", 0), 0, 0, 0L, 0L, 0, 0), null);
                }
                producerCallbacks.reset();
            }
            return sendFuture;
        }
    };
    if (anyTimes)
        expect.andStubAnswer(expectResponse);
    else
        expect.andAnswer(expectResponse);
    // 3. As a result of a successful producer send callback, we'll notify the source task of the record commit
    expectTaskCommitRecord(anyTimes, succeed);
    return sent;
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) IAnswer(org.easymock.IAnswer) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future)

Example 2 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project hadoop by apache.

the class TestKafkaMetrics method testPutMetrics.

@Test
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testPutMetrics() throws Exception {
    // Create a record by mocking MetricsRecord class.
    MetricsRecord record = mock(MetricsRecord.class);
    when(record.tags()).thenReturn(Lists.newArrayList(new MetricsTag(KafkaMetricsInfo.KafkaTag, "test_tag")));
    when(record.timestamp()).thenReturn(System.currentTimeMillis());
    // Create a metric using AbstractMetric class.
    AbstractMetric metric = new AbstractMetric(KafkaMetricsInfo.KafkaCounter) {

        @Override
        public Number value() {
            return new Integer(123);
        }

        @Override
        public MetricType type() {
            return null;
        }

        @Override
        public void visit(MetricsVisitor visitor) {
        }
    };
    // Create a list of metrics.
    Iterable<AbstractMetric> metrics = Lists.newArrayList(metric);
    when(record.name()).thenReturn("Kafka record name");
    when(record.metrics()).thenReturn(metrics);
    SubsetConfiguration conf = mock(SubsetConfiguration.class);
    when(conf.getString(KafkaSink.BROKER_LIST)).thenReturn("localhost:9092");
    String topic = "myTestKafkaTopic";
    when(conf.getString(KafkaSink.TOPIC)).thenReturn(topic);
    // Create the KafkaSink object and initialize it.
    kafkaSink = new KafkaSink();
    kafkaSink.init(conf);
    // Create a mock KafkaProducer as a producer for KafkaSink.
    Producer<Integer, byte[]> mockProducer = mock(KafkaProducer.class);
    kafkaSink.setProducer(mockProducer);
    // Create the json object from the record.
    StringBuilder jsonLines = recordToJson(record);
    if (LOG.isDebugEnabled()) {
        LOG.debug("kafka message: " + jsonLines.toString());
    }
    // Send the record and store the result in a mock Future.
    Future<RecordMetadata> f = mock(Future.class);
    when(mockProducer.send((ProducerRecord) anyObject())).thenReturn(f);
    kafkaSink.putMetrics(record);
    // Get the argument and verity it.
    ArgumentCaptor<ProducerRecord> argument = ArgumentCaptor.forClass(ProducerRecord.class);
    verify(mockProducer).send(argument.capture());
    // Compare the received data with the original one.
    ProducerRecord<Integer, byte[]> data = (argument.getValue());
    String jsonResult = new String(data.value());
    if (LOG.isDebugEnabled()) {
        LOG.debug("kafka result: " + jsonResult);
    }
    assertEquals(jsonLines.toString(), jsonResult);
}
Also used : MetricsRecord(org.apache.hadoop.metrics2.MetricsRecord) AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric) MetricsTag(org.apache.hadoop.metrics2.MetricsTag) MetricsVisitor(org.apache.hadoop.metrics2.MetricsVisitor) SubsetConfiguration(org.apache.commons.configuration2.SubsetConfiguration) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) KafkaSink(org.apache.hadoop.metrics2.sink.KafkaSink) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Test(org.junit.Test)

Example 3 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class RecordCollectorTest method shouldThrowStreamsExceptionOnSubsequentCallIfASendFails.

@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnSubsequentCallIfASendFails() throws Exception {
    final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

        @Override
        public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
            callback.onCompletion(null, new Exception());
            return null;
        }
    }, "test");
    collector.send("topic1", "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
    collector.send("topic1", "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
}
Also used : MockProducer(org.apache.kafka.clients.producer.MockProducer) Callback(org.apache.kafka.clients.producer.Callback) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future) TimeoutException(org.apache.kafka.common.errors.TimeoutException) StreamsException(org.apache.kafka.streams.errors.StreamsException) Test(org.junit.Test)

Example 4 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class RecordCollectorTest method shouldThrowStreamsExceptionOnCloseIfASendFailed.

@SuppressWarnings("unchecked")
@Test(expected = StreamsException.class)
public void shouldThrowStreamsExceptionOnCloseIfASendFailed() throws Exception {
    final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

        @Override
        public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
            callback.onCompletion(null, new Exception());
            return null;
        }
    }, "test");
    collector.send("topic1", "3", "0", null, null, stringSerializer, stringSerializer, streamPartitioner);
    collector.close();
}
Also used : MockProducer(org.apache.kafka.clients.producer.MockProducer) Callback(org.apache.kafka.clients.producer.Callback) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future) TimeoutException(org.apache.kafka.common.errors.TimeoutException) StreamsException(org.apache.kafka.streams.errors.StreamsException) Test(org.junit.Test)

Example 5 with ProducerRecord

use of org.apache.kafka.clients.producer.ProducerRecord in project kafka by apache.

the class SmokeTestDriver method generate.

public static Map<String, Set<Integer>> generate(String kafka, final int numKeys, final int maxRecordsPerKey) throws Exception {
    Properties props = new Properties();
    props.put(ProducerConfig.CLIENT_ID_CONFIG, "SmokeTest");
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
    KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(props);
    int numRecordsProduced = 0;
    Map<String, Set<Integer>> allData = new HashMap<>();
    ValueList[] data = new ValueList[numKeys];
    for (int i = 0; i < numKeys; i++) {
        data[i] = new ValueList(i, i + maxRecordsPerKey - 1);
        allData.put(data[i].key, new HashSet<Integer>());
    }
    Random rand = new Random();
    int remaining = data.length;
    while (remaining > 0) {
        int index = rand.nextInt(remaining);
        String key = data[index].key;
        int value = data[index].next();
        if (value < 0) {
            remaining--;
            data[index] = data[remaining];
        } else {
            ProducerRecord<byte[], byte[]> record = new ProducerRecord<>("data", stringSerde.serializer().serialize("", key), intSerde.serializer().serialize("", value));
            producer.send(record, new Callback() {

                @Override
                public void onCompletion(final RecordMetadata metadata, final Exception exception) {
                    if (exception != null) {
                        exception.printStackTrace();
                        Exit.exit(1);
                    }
                }
            });
            numRecordsProduced++;
            allData.get(key).add(value);
            if (numRecordsProduced % 100 == 0)
                System.out.println(numRecordsProduced + " records produced");
            Utils.sleep(2);
        }
    }
    producer.close();
    return Collections.unmodifiableMap(allData);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) Properties(java.util.Properties) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) Random(java.util.Random) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord)

Aggregations

ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)202 Test (org.junit.Test)95 Properties (java.util.Properties)57 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)57 ArrayList (java.util.ArrayList)46 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)41 Callback (org.apache.kafka.clients.producer.Callback)31 TopicPartition (org.apache.kafka.common.TopicPartition)31 Future (java.util.concurrent.Future)26 HashMap (java.util.HashMap)22 KafkaConsumer (org.apache.kafka.clients.consumer.KafkaConsumer)22 StringSerializer (org.apache.kafka.common.serialization.StringSerializer)22 List (java.util.List)20 Random (java.util.Random)19 ConsumerRecord (org.apache.kafka.clients.consumer.ConsumerRecord)17 IOException (java.io.IOException)16 KafkaException (org.apache.kafka.common.KafkaException)16 MockProducer (org.apache.kafka.clients.producer.MockProducer)14 Producer (org.apache.kafka.clients.producer.Producer)14 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)12