use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnCloseIfASendFailedWithContinueExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnCloseIfASendFailedWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new AlwaysContinueProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.close();
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordCollectorTest method shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler.
@SuppressWarnings("unchecked")
@Test
public void shouldNotThrowStreamsExceptionOnSubsequentCallIfASendFailsWithContinueExceptionHandler() {
final RecordCollector collector = new RecordCollectorImpl(new MockProducer(cluster, true, new DefaultPartitioner(), byteArraySerializer, byteArraySerializer) {
@Override
public synchronized Future<RecordMetadata> send(final ProducerRecord record, final Callback callback) {
callback.onCompletion(null, new Exception());
return null;
}
}, "test", logContext, new AlwaysContinueProductionExceptionHandler());
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
collector.send("topic1", "3", "0", null, stringSerializer, stringSerializer, streamPartitioner);
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordAccumulatorTest method testSplitAndReenqueue.
@Test
public void testSplitAndReenqueue() throws ExecutionException, InterruptedException {
long now = time.milliseconds();
RecordAccumulator accum = createTestRecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10);
// Create a big batch
ByteBuffer buffer = ByteBuffer.allocate(4096);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
ProducerBatch batch = new ProducerBatch(tp1, builder, now, true);
byte[] value = new byte[1024];
final AtomicInteger acked = new AtomicInteger(0);
Callback cb = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
acked.incrementAndGet();
}
};
// Append two messages so the batch is too big.
Future<RecordMetadata> future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
Future<RecordMetadata> future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
assertNotNull(future1);
assertNotNull(future2);
batch.close();
// Enqueue the batch to the accumulator as if the batch was created by the accumulator.
accum.reenqueue(batch, now);
time.sleep(101L);
// Drain the batch.
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
assertTrue("The batch should be ready", result.readyNodes.size() > 0);
Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertEquals("Only node1 should be drained", 1, drained.size());
assertEquals("Only one batch should be drained", 1, drained.get(node1.id()).size());
// Split and reenqueue the batch.
accum.splitAndReenqueue(drained.get(node1.id()).get(0));
time.sleep(101L);
drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertFalse(drained.isEmpty());
assertFalse(drained.get(node1.id()).isEmpty());
drained.get(node1.id()).get(0).done(acked.get(), 100L, null);
assertEquals("The first message should have been acked.", 1, acked.get());
assertTrue(future1.isDone());
assertEquals(0, future1.get().offset());
drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertFalse(drained.isEmpty());
assertFalse(drained.get(node1.id()).isEmpty());
drained.get(node1.id()).get(0).done(acked.get(), 100L, null);
assertEquals("Both message should have been acked.", 2, acked.get());
assertTrue(future2.isDone());
assertEquals(1, future2.get().offset());
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class RecordAccumulatorTest method testAbortIncompleteBatches.
@Test
public void testAbortIncompleteBatches() throws Exception {
long lingerMs = Long.MAX_VALUE;
int numRecords = 100;
final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0);
final RecordAccumulator accum = createTestRecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs);
class TestCallback implements Callback {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
assertTrue(exception.getMessage().equals("Producer is closed forcefully."));
numExceptionReceivedInCallback.incrementAndGet();
}
}
for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs);
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
assertFalse(result.readyNodes.isEmpty());
Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertTrue(accum.hasUndrained());
assertTrue(accum.hasIncomplete());
int numDrainedRecords = 0;
for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) {
for (ProducerBatch batch : drainedEntry.getValue()) {
assertTrue(batch.isClosed());
assertFalse(batch.produceFuture.completed());
numDrainedRecords += batch.recordCount;
}
}
assertTrue(numDrainedRecords > 0 && numDrainedRecords < numRecords);
accum.abortIncompleteBatches();
assertEquals(numRecords, numExceptionReceivedInCallback.get());
assertFalse(accum.hasUndrained());
assertFalse(accum.hasIncomplete());
}
use of org.apache.kafka.clients.producer.Callback in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testAppendInExpiryCallback.
@Test
public void testAppendInExpiryCallback() throws InterruptedException {
int messagesPerBatch = 10;
final AtomicInteger expiryCallbackCount = new AtomicInteger(0);
final AtomicReference<Exception> unexpectedException = new AtomicReference<>();
final byte[] key = "key".getBytes();
final byte[] value = "value".getBytes();
final long maxBlockTimeMs = 1000;
Callback callback = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
if (exception instanceof TimeoutException) {
expiryCallbackCount.incrementAndGet();
try {
accumulator.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs);
} catch (InterruptedException e) {
throw new RuntimeException("Unexpected interruption", e);
}
} else if (exception != null)
unexpectedException.compareAndSet(null, exception);
}
};
for (int i = 0; i < messagesPerBatch; i++) accumulator.append(tp1, 0L, key, value, null, callback, maxBlockTimeMs);
// Advance the clock to expire the first batch.
time.sleep(10000);
// Disconnect the target node for the pending produce request. This will ensure that sender will try to
// expire the batch.
Node clusterNode = this.cluster.nodes().get(0);
client.disconnect(clusterNode.idString());
client.blackout(clusterNode, 100);
// We should try to flush the batch, but we expire it instead without sending anything.
sender.run(time.milliseconds());
assertEquals("Callbacks not invoked for expiry", messagesPerBatch, expiryCallbackCount.get());
assertNull("Unexpected exception", unexpectedException.get());
// Make sure that the reconds were appended back to the batch.
assertTrue(accumulator.batches().containsKey(tp1));
assertEquals(1, accumulator.batches().get(tp1).size());
assertEquals(messagesPerBatch, accumulator.batches().get(tp1).peekFirst().recordCount);
}
Aggregations