Search in sources :

Example 66 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnCloseIfASendFailedWithContinueExceptionHandler.

@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnCloseIfASendFailedWithContinueExceptionHandler() {
    final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

        @Override
        public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
            callback.onCompletion(null, new Exception());
            return null;
        }
    }, "test", logContext, new AlwaysContinueProductionExceptionHandler());
    collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
    collector.close();
}
Also used : AlwaysContinueProductionExceptionHandler(org.apache.kafka.streams.errors.AlwaysContinueProductionExceptionHandler) MockProducer(org.apache.kafka.clients.producer.MockProducer) Callback(org.apache.kafka.clients.producer.Callback) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) Test(org.junit.Test)

Example 67 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler.

@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() {
    final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {

        @Override
        public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
            callback.onCompletion(null, new Exception());
            return null;
        }
    }, "test", logContext, new AlwaysContinueProductionExceptionHandler());
    collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
    collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
Also used : AlwaysContinueProductionExceptionHandler(org.apache.kafka.streams.errors.AlwaysContinueProductionExceptionHandler) MockProducer(org.apache.kafka.clients.producer.MockProducer) Callback(org.apache.kafka.clients.producer.Callback) DefaultPartitioner(org.apache.kafka.clients.producer.internals.DefaultPartitioner) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) Future(java.util.concurrent.Future) KafkaException(org.apache.kafka.common.KafkaException) StreamsException(org.apache.kafka.streams.errors.StreamsException) Test(org.junit.Test)

Example 68 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class RecordAccumulatorTest method testSplitAndReenqueue.

@Test
public void testSplitAndReenqueue() throws ExecutionException, InterruptedException {
    long now = time.milliseconds();
    RecordAccumulator accum = createTestRecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10);
    // Create a big batch
    ByteBuffer buffer = ByteBuffer.allocate(4096);
    MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
    ProducerBatch batch = new ProducerBatch(tp1, builder, now, true);
    byte[] value = new byte[1024];
    final AtomicInteger acked = new AtomicInteger(0);
    Callback cb = new Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            acked.incrementAndGet();
        }
    };
    // Append two messages so the batch is too big.
    Future<RecordMetadata> future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
    Future<RecordMetadata> future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
    assertNotNull(future1);
    assertNotNull(future2);
    batch.close();
    // Enqueue the batch to the accumulator as if the batch was created by the accumulator.
    accum.reenqueue(batch, now);
    time.sleep(101L);
    // Drain the batch.
    RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
    assertTrue("The batch should be ready", result.readyNodes.size() > 0);
    Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertEquals("Only node1 should be drained", 1, drained.size());
    assertEquals("Only one batch should be drained", 1, drained.get(node1.id()).size());
    // Split and reenqueue the batch.
    accum.splitAndReenqueue(drained.get(node1.id()).get(0));
    time.sleep(101L);
    drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertFalse(drained.isEmpty());
    assertFalse(drained.get(node1.id()).isEmpty());
    drained.get(node1.id()).get(0).done(acked.get(), 100L, null);
    assertEquals("The first message should have been acked.", 1, acked.get());
    assertTrue(future1.isDone());
    assertEquals(0, future1.get().offset());
    drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertFalse(drained.isEmpty());
    assertFalse(drained.get(node1.id()).isEmpty());
    drained.get(node1.id()).get(0).done(acked.get(), 100L, null);
    assertEquals("Both message should have been acked.", 2, acked.get());
    assertTrue(future2.isDone());
    assertEquals(1, future2.get().offset());
}
Also used : ByteBuffer(java.nio.ByteBuffer) KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) ArrayList(java.util.ArrayList) Arrays.asList(java.util.Arrays.asList) List(java.util.List) Test(org.junit.Test)

Example 69 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class RecordAccumulatorTest method testAbortIncompleteBatches.

@Test
public void testAbortIncompleteBatches() throws Exception {
    long lingerMs = Long.MAX_VALUE;
    int numRecords = 100;
    final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0);
    final RecordAccumulator accum = createTestRecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs);
    class TestCallback implements Callback {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            assertTrue(exception.getMessage().equals("Producer is closed forcefully."));
            numExceptionReceivedInCallback.incrementAndGet();
        }
    }
    for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs);
    RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
    assertFalse(result.readyNodes.isEmpty());
    Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
    assertTrue(accum.hasUndrained());
    assertTrue(accum.hasIncomplete());
    int numDrainedRecords = 0;
    for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) {
        for (ProducerBatch batch : drainedEntry.getValue()) {
            assertTrue(batch.isClosed());
            assertFalse(batch.produceFuture.completed());
            numDrainedRecords += batch.recordCount;
        }
    }
    assertTrue(numDrainedRecords > 0 && numDrainedRecords < numRecords);
    accum.abortIncompleteBatches();
    assertEquals(numRecords, numExceptionReceivedInCallback.get());
    assertFalse(accum.hasUndrained());
    assertFalse(accum.hasIncomplete());
}
Also used : KafkaException(org.apache.kafka.common.KafkaException) ExecutionException(java.util.concurrent.ExecutionException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TopicPartition(org.apache.kafka.common.TopicPartition) ArrayList(java.util.ArrayList) Arrays.asList(java.util.Arrays.asList) List(java.util.List) Map(java.util.Map) Test(org.junit.Test)

Example 70 with Callback

use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.

the class SenderTest method testAppendInExpiryCallback.

@Test
public void testAppendInExpiryCallback() throws InterruptedException {
    int messagesPerBatch = 10;
    final AtomicInteger expiryCallbackCount = new AtomicInteger(0);
    final AtomicReference<Exception> unexpectedException = new AtomicReference<>();
    final byte[] key = "key".getBytes();
    final byte[] value = "value".getBytes();
    final long maxBlockTimeMs = 1000;
    Callback callback = new Callback() {

        @Override
        public void onCompletion(RecordMetadata metadata, Exception exception) {
            if (exception instanceof TimeoutException) {
                expiryCallbackCount.incrementAndGet();
                try {
                    accumulator.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs);
                } catch (InterruptedException e) {
                    throw new RuntimeException("Unexpected interruption", e);
                }
            } else if (exception != null)
                unexpectedException.compareAndSet(null, exception);
        }
    };
    for (int i = 0; i < messagesPerBatch; i++) accumulator.append(tp1, 0L, key, value, null, callback, maxBlockTimeMs);
    // Advance the clock to expire the first batch.
    time.sleep(10000);
    // Disconnect the target node for the pending produce request. This will ensure that sender will try to
    // expire the batch.
    Node clusterNode = this.cluster.nodes().get(0);
    client.disconnect(clusterNode.idString());
    client.blackout(clusterNode, 100);
    // We should try to flush the batch, but we expire it instead without sending anything.
    sender.run(time.milliseconds());
    assertEquals("Callbacks not invoked for expiry", messagesPerBatch, expiryCallbackCount.get());
    assertNull("Unexpected exception", unexpectedException.get());
    // Make sure that the reconds were appended back to the batch.
    assertTrue(accumulator.batches().containsKey(tp1));
    assertEquals(1, accumulator.batches().get(tp1).size());
    assertEquals(messagesPerBatch, accumulator.batches().get(tp1).peekFirst().recordCount);
}
Also used : Node(org.apache.kafka.common.Node) AtomicReference(java.util.concurrent.atomic.AtomicReference) ClusterAuthorizationException(org.apache.kafka.common.errors.ClusterAuthorizationException) OutOfOrderSequenceException(org.apache.kafka.common.errors.OutOfOrderSequenceException) UnsupportedForMessageFormatException(org.apache.kafka.common.errors.UnsupportedForMessageFormatException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) NetworkException(org.apache.kafka.common.errors.NetworkException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) UnsupportedVersionException(org.apache.kafka.common.errors.UnsupportedVersionException) RecordMetadata(org.apache.kafka.clients.producer.RecordMetadata) Callback(org.apache.kafka.clients.producer.Callback) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) TimeoutException(org.apache.kafka.common.errors.TimeoutException) Test(org.junit.Test)

Aggregations

Callback (org.apache.kafka.clients.producer.Callback)81 Test (org.junit.Test)47 ProducerRecord (org.apache.kafka.clients.producer.ProducerRecord)39 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)37 KafkaException (org.apache.kafka.common.KafkaException)21 Future (java.util.concurrent.Future)18 TimeoutException (org.apache.kafka.common.errors.TimeoutException)18 ExecutionException (java.util.concurrent.ExecutionException)15 ArrayList (java.util.ArrayList)14 List (java.util.List)13 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)13 MockProducer (org.apache.kafka.clients.producer.MockProducer)13 HashMap (java.util.HashMap)12 Properties (java.util.Properties)12 DefaultPartitioner (org.apache.kafka.clients.producer.internals.DefaultPartitioner)12 TopicPartition (org.apache.kafka.common.TopicPartition)12 Schema (org.apache.kafka.connect.data.Schema)12 Struct (org.apache.kafka.connect.data.Struct)12 KafkaProducer (org.apache.kafka.clients.producer.KafkaProducer)11 StreamsException (org.apache.kafka.streams.errors.StreamsException)11