use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class RecordAccumulatorTest method testSplitAndReenqueue.
@Test
public void testSplitAndReenqueue() throws ExecutionException, InterruptedException {
long now = time.milliseconds();
RecordAccumulator accum = createTestRecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10);
// Create a big batch
ByteBuffer buffer = ByteBuffer.allocate(4096);
MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L);
ProducerBatch batch = new ProducerBatch(tp1, builder, now, true);
byte[] value = new byte[1024];
final AtomicInteger acked = new AtomicInteger(0);
Callback cb = new Callback() {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
acked.incrementAndGet();
}
};
// Append two messages so the batch is too big.
Future<RecordMetadata> future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
Future<RecordMetadata> future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now);
assertNotNull(future1);
assertNotNull(future2);
batch.close();
// Enqueue the batch to the accumulator as if the batch was created by the accumulator.
accum.reenqueue(batch, now);
time.sleep(101L);
// Drain the batch.
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
assertTrue(result.readyNodes.size() > 0, "The batch should be ready");
Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertEquals(1, drained.size(), "Only node1 should be drained");
assertEquals(1, drained.get(node1.id()).size(), "Only one batch should be drained");
// Split and reenqueue the batch.
accum.splitAndReenqueue(drained.get(node1.id()).get(0));
time.sleep(101L);
drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertFalse(drained.isEmpty());
assertFalse(drained.get(node1.id()).isEmpty());
drained.get(node1.id()).get(0).complete(acked.get(), 100L);
assertEquals(1, acked.get(), "The first message should have been acked.");
assertTrue(future1.isDone());
assertEquals(0, future1.get().offset());
drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertFalse(drained.isEmpty());
assertFalse(drained.get(node1.id()).isEmpty());
drained.get(node1.id()).get(0).complete(acked.get(), 100L);
assertEquals(2, acked.get(), "Both message should have been acked.");
assertTrue(future2.isDone());
assertEquals(1, future2.get().offset());
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class RecordAccumulatorTest method testAbortUnsentBatches.
@Test
public void testAbortUnsentBatches() throws Exception {
int lingerMs = Integer.MAX_VALUE;
int numRecords = 100;
final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0);
final RecordAccumulator accum = createTestRecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs);
final KafkaException cause = new KafkaException();
class TestCallback implements Callback {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
assertEquals(cause, exception);
numExceptionReceivedInCallback.incrementAndGet();
}
}
for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs, false, time.milliseconds());
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
assertFalse(result.readyNodes.isEmpty());
Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertTrue(accum.hasUndrained());
assertTrue(accum.hasIncomplete());
accum.abortUndrainedBatches(cause);
int numDrainedRecords = 0;
for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) {
for (ProducerBatch batch : drainedEntry.getValue()) {
assertTrue(batch.isClosed());
assertFalse(batch.produceFuture.completed());
numDrainedRecords += batch.recordCount;
}
}
assertTrue(numDrainedRecords > 0);
assertTrue(numExceptionReceivedInCallback.get() > 0);
assertEquals(numRecords, numExceptionReceivedInCallback.get() + numDrainedRecords);
assertFalse(accum.hasUndrained());
assertTrue(accum.hasIncomplete());
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class RecordAccumulatorTest method testAbortIncompleteBatches.
@Test
public void testAbortIncompleteBatches() throws Exception {
int lingerMs = Integer.MAX_VALUE;
int numRecords = 100;
final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0);
final RecordAccumulator accum = createTestRecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs);
class TestCallback implements Callback {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
assertTrue(exception.getMessage().equals("Producer is closed forcefully."));
numExceptionReceivedInCallback.incrementAndGet();
}
}
for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs, false, time.milliseconds());
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
assertFalse(result.readyNodes.isEmpty());
Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertTrue(accum.hasUndrained());
assertTrue(accum.hasIncomplete());
int numDrainedRecords = 0;
for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) {
for (ProducerBatch batch : drainedEntry.getValue()) {
assertTrue(batch.isClosed());
assertFalse(batch.produceFuture.completed());
numDrainedRecords += batch.recordCount;
}
}
assertTrue(numDrainedRecords > 0 && numDrainedRecords < numRecords);
accum.abortIncompleteBatches();
assertEquals(numRecords, numExceptionReceivedInCallback.get());
assertFalse(accum.hasUndrained());
assertFalse(accum.hasIncomplete());
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class KafkaStatusBackingStoreTest method putConnectorStateShouldOverride.
@Test
public void putConnectorStateShouldOverride() {
final byte[] value = new byte[0];
String otherWorkerId = "anotherhost:8083";
// the persisted came from a different host and has a newer generation
Map<String, Object> firstStatusRead = new HashMap<>();
firstStatusRead.put("worker_id", otherWorkerId);
firstStatusRead.put("state", "RUNNING");
firstStatusRead.put("generation", 1L);
Map<String, Object> secondStatusRead = new HashMap<>();
secondStatusRead.put("worker_id", WORKER_ID);
secondStatusRead.put("state", "UNASSIGNED");
secondStatusRead.put("generation", 0L);
when(converter.toConnectData(STATUS_TOPIC, value)).thenReturn(new SchemaAndValue(null, firstStatusRead)).thenReturn(new SchemaAndValue(null, secondStatusRead));
when(converter.fromConnectData(eq(STATUS_TOPIC), any(Schema.class), any(Struct.class))).thenReturn(value);
doAnswer(invocation -> {
((Callback) invocation.getArgument(2)).onCompletion(null, null);
store.read(consumerRecord(1, "status-connector-conn", value));
return null;
}).when(kafkaBasedLog).send(eq("status-connector-conn"), eq(value), any(Callback.class));
store.read(consumerRecord(0, "status-connector-conn", value));
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.UNASSIGNED, WORKER_ID, 0);
store.put(status);
assertEquals(status, store.get(CONNECTOR));
}
use of org.apache.kafka.clients.producer.Callback in project kafka by apache.
the class KafkaStatusBackingStoreTest method putConnectorState.
@Test
public void putConnectorState() {
byte[] value = new byte[0];
when(converter.fromConnectData(eq(STATUS_TOPIC), any(Schema.class), any(Struct.class))).thenReturn(value);
doAnswer(invocation -> {
((Callback) invocation.getArgument(2)).onCompletion(null, null);
return null;
}).when(kafkaBasedLog).send(eq("status-connector-conn"), eq(value), any(Callback.class));
ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.RUNNING, WORKER_ID, 0);
store.put(status);
// state is not visible until read back from the log
assertNull(store.get(CONNECTOR));
}
Aggregations