Search in sources :

Example 1 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.

the class RecordAccumulatorTest method testAppendInExpiryCallback.

@Test
public void testAppendInExpiryCallback() throws InterruptedException {
    long retryBackoffMs = 100L;
    long lingerMs = 3000L;
    int requestTimeout = 60;
    int messagesPerBatch = 1024 / msgSize;
    final RecordAccumulator accum = new RecordAccumulator(1024, 10 * 1024, CompressionType.NONE, lingerMs, retryBackoffMs, metrics, time);
    final AtomicInteger expiryCallbackCount = new AtomicInteger();
    final AtomicReference<Exception> unexpectedException = new AtomicReference<Exception>();
    Callback callback = new Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            if (exception instanceof TimeoutException) {
                expiryCallbackCount.incrementAndGet();
                try {
                    accum.append(tp1, 0L, key, value, null, maxBlockTimeMs);
                } catch (InterruptedException e) {
                    throw new RuntimeException("Unexpected interruption", e);
                }
            } else if (exception != null)
                unexpectedException.compareAndSet(null, exception);
        }
    };
    for (int i = 0; i < messagesPerBatch + 1; i++) accum.append(tp1, 0L, key, value, callback, maxBlockTimeMs);
    assertEquals(2, accum.batches().get(tp1).size());
    assertTrue("First batch not full", accum.batches().get(tp1).peekFirst().isFull());
    // Advance the clock to expire the first batch.
    time.sleep(requestTimeout + 1);
    List<ProducerBatch> expiredBatches = accum.abortExpiredBatches(requestTimeout, time.milliseconds());
    assertEquals("The batch was not expired", 1, expiredBatches.size());
    assertEquals("Callbacks not invoked for expiry", messagesPerBatch, expiryCallbackCount.get());
    assertNull("Unexpected exception", unexpectedException.get());
    assertEquals("Some messages not appended from expiry callbacks", 2, accum.batches().get(tp1).size());
    assertTrue("First batch not full after expiry callbacks with appends", accum.batches().get(tp1).peekFirst().isFull());
}
Also used : AtomicReference(java.util.concurrent.atomic.AtomicReference) TimeoutException(org.apache.kafka.common.errors.TimeoutException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Example 2 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.

the class WorkerSourceTaskTest method expectSendRecord.

private Capture<ProducerRecord<byte[], byte[]>> expectSendRecord(boolean anyTimes, boolean isRetry, boolean succeed) throws InterruptedException {
    expectConvertKeyValue(anyTimes);
    expectApplyTransformationChain(anyTimes);
    Capture<ProducerRecord<byte[], byte[]>> sent = EasyMock.newCapture();
    // 1. Offset data is passed to the offset storage.
    if (!isRetry) {
        offsetWriter.offset(PARTITION, OFFSET);
        if (anyTimes)
            PowerMock.expectLastCall().anyTimes();
        else
            PowerMock.expectLastCall();
    }
    // 2. Converted data passed to the producer, which will need callbacks invoked for flush to work
    IExpectationSetters<Future<RecordMetadata>> expect = EasyMock.expect(producer.send(EasyMock.capture(sent), EasyMock.capture(producerCallbacks)));
    IAnswer<Future<RecordMetadata>> expectResponse = new IAnswer<Future<RecordMetadata>>() {

        @Override
        public Future<RecordMetadata> answer() throws Throwable {
            synchronized (producerCallbacks) {
                for (org.apache.kafka.clients.producer.Callback cb : producerCallbacks.getValues()) {
                    cb.onCompletion(new RecordMetadata(new TopicPartition("foo", 0), 0, 0, 0L, 0L, 0, 0), null);
                }
                producerCallbacks.reset();
            }
            return sendFuture;
        }
    };
    if (anyTimes)
        expect.andStubAnswer(expectResponse);
    else
        expect.andAnswer(expectResponse);
    // 3. As a result of a successful producer send callback, we'll notify the source task of the record commit
    expectTaskCommitRecord(anyTimes, succeed);
    return sent;
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) IAnswer(org.easymock.IAnswer) TopicPartition(org.apache.kafka.common.TopicPartition) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future)

Example 3 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project hadoop by apache.

the class TestKafkaMetrics method testPutMetrics.

@Test
@SuppressWarnings({ "unchecked", "rawtypes" })
public void testPutMetrics() throws Exception {
    // Create a record by mocking MetricsRecord class.
    MetricsRecord record = mock(MetricsRecord.class);
    when(record.tags()).thenReturn(Lists.newArrayList(new MetricsTag(KafkaMetricsInfo.KafkaTag, "test_tag")));
    when(record.timestamp()).thenReturn(System.currentTimeMillis());
    // Create a metric using AbstractMetric class.
    AbstractMetric metric = new AbstractMetric(KafkaMetricsInfo.KafkaCounter) {

        @Override
        public Number value() {
            return new Integer(123);
        }

        @Override
        public MetricType type() {
            return null;
        }

        @Override
        public void visit(MetricsVisitor visitor) {
        }
    };
    // Create a list of metrics.
    Iterable<AbstractMetric> metrics = Lists.newArrayList(metric);
    when(record.name()).thenReturn("Kafka record name");
    when(record.metrics()).thenReturn(metrics);
    SubsetConfiguration conf = mock(SubsetConfiguration.class);
    when(conf.getString(KafkaSink.BROKER_LIST)).thenReturn("localhost:9092");
    String topic = "myTestKafkaTopic";
    when(conf.getString(KafkaSink.TOPIC)).thenReturn(topic);
    // Create the KafkaSink object and initialize it.
    kafkaSink = new KafkaSink();
    kafkaSink.init(conf);
    // Create a mock KafkaProducer as a producer for KafkaSink.
    Producer<Integer, byte[]> mockProducer = mock(KafkaProducer.class);
    kafkaSink.setProducer(mockProducer);
    // Create the json object from the record.
    StringBuilder jsonLines = recordToJson(record);
    if (LOG.isDebugEnabled()) {
        LOG.debug("kafka message: " + jsonLines.toString());
    }
    // Send the record and store the result in a mock Future.
    Future<RecordMetadata> f = mock(Future.class);
    when(mockProducer.send((ProducerRecord) anyObject())).thenReturn(f);
    kafkaSink.putMetrics(record);
    // Get the argument and verity it.
    ArgumentCaptor<ProducerRecord> argument = ArgumentCaptor.forClass(ProducerRecord.class);
    verify(mockProducer).send(argument.capture());
    // Compare the received data with the original one.
    ProducerRecord<Integer, byte[]> data = (argument.getValue());
    String jsonResult = new String(data.value());
    if (LOG.isDebugEnabled()) {
        LOG.debug("kafka result: " + jsonResult);
    }
    assertEquals(jsonLines.toString(), jsonResult);
}
Also used : MetricsRecord(org.apache.hadoop.metrics2.MetricsRecord) AbstractMetric(org.apache.hadoop.metrics2.AbstractMetric) MetricsTag(org.apache.hadoop.metrics2.MetricsTag) MetricsVisitor(org.apache.hadoop.metrics2.MetricsVisitor) SubsetConfiguration(org.apache.commons.configuration2.SubsetConfiguration) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) KafkaSink(org.apache.hadoop.metrics2.sink.KafkaSink) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Test(org.junit.Test)

Example 4 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.

the class SmokeTestDriver method generate.

public static Map<String, Set<Integer>> generate(String kafka, final int numKeys, final int maxRecordsPerKey) throws Exception {
    Properties props = new Properties();
    props.put(ProducerConfig.CLIENT_ID_CONFIG, "SmokeTest");
    props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafka);
    props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
    props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
    KafkaProducer<byte[], byte[]> producer = new KafkaProducer<>(props);
    int numRecordsProduced = 0;
    Map<String, Set<Integer>> allData = new HashMap<>();
    ValueList[] data = new ValueList[numKeys];
    for (int i = 0; i < numKeys; i++) {
        data[i] = new ValueList(i, i + maxRecordsPerKey - 1);
        allData.put(data[i].key, new HashSet<Integer>());
    }
    Random rand = new Random();
    int remaining = data.length;
    while (remaining > 0) {
        int index = rand.nextInt(remaining);
        String key = data[index].key;
        int value = data[index].next();
        if (value < 0) {
            remaining--;
            data[index] = data[remaining];
        } else {
            ProducerRecord<byte[], byte[]> record = new ProducerRecord<>("data", stringSerde.serializer().serialize("", key), intSerde.serializer().serialize("", value));
            producer.send(record, new Callback() {

                @Override
                public void onCompletion(final RecordMetadata metadata, final Exception exception) {
                    if (exception != null) {
                        exception.printStackTrace();
                        Exit.exit(1);
                    }
                }
            });
            numRecordsProduced++;
            allData.get(key).add(value);
            if (numRecordsProduced % 100 == 0)
                System.out.println(numRecordsProduced + " records produced");
            Utils.sleep(2);
        }
    }
    producer.close();
    return Collections.unmodifiableMap(allData);
}
Also used : KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) HashSet(java.util.HashSet) Set(java.util.Set) HashMap(java.util.HashMap) Properties(java.util.Properties) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) Random(java.util.Random) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord)

Example 5 with RecordMetadata

use of org.apache.kafka.clients.producer.RecordMetadata in project logging-log4j2 by apache.

the class KafkaManager method send.

public void send(final byte[] msg) throws ExecutionException, InterruptedException, TimeoutException {
    if (producer != null) {
        ProducerRecord<byte[], byte[]> newRecord = new ProducerRecord<>(topic, msg);
        if (syncSend) {
            Future<RecordMetadata> response = producer.send(newRecord);
            response.get(timeoutMillis, TimeUnit.MILLISECONDS);
        } else {
            producer.send(newRecord, new Callback() {

                public void onCompletion(RecordMetadata metadata, Exception e) {
                    if (e != null) {
                        LOGGER.error("Unable to write to Kafka in appender [" + getName() + "]", e);
                    }
                }
            });
        }
    }
}
Also used : RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) TimeoutException(java.util.concurrent.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException)

Aggregations

RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)189 Test (org.junit.Test)64 Node (org.apache.kafka.common.Node)50 Test (org.junit.jupiter.api.Test)50 TopicPartition (org.apache.kafka.common.TopicPartition)48 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)45 ExecutionException (java.util.concurrent.ExecutionException)33 Callback (org.apache.kafka.clients.producer.Callback)32 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)31 Properties (java.util.Properties)30 HashMap (java.util.HashMap)24 TimeoutException (org.apache.kafka.common.errors.TimeoutException)23 ArrayList (java.util.ArrayList)21 KafkaException (org.apache.kafka.common.KafkaException)19 List (java.util.List)15 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)15 Metrics (org.apache.kafka.common.metrics.Metrics)15 LinkedHashMap (java.util.LinkedHashMap)13 Future (java.util.concurrent.Future)13 Map (java.util.Map)12