use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class SenderTest method testNoDoubleDeallocation.
@Test
public void testNoDoubleDeallocation() throws Exception {
long totalSize = 1024 * 1024;
String metricGrpName = "producer-custom-metrics";
MatchingBufferPool pool = new MatchingBufferPool(totalSize, batchSize, metrics, time, metricGrpName);
setupWithTransactionState(null, false, pool);
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
// send request
sender.runOnce();
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, sender.inFlightBatches(tp0).size());
time.sleep(REQUEST_TIMEOUT);
assertFalse(pool.allMatch());
// expire the batch
sender.runOnce();
assertTrue(request1.isDone());
assertTrue(pool.allMatch(), "The batch should have been de-allocated");
assertTrue(pool.allMatch());
sender.runOnce();
assertTrue(pool.allMatch(), "The batch should have been de-allocated");
assertEquals(0, client.inFlightRequestCount());
assertEquals(0, sender.inFlightBatches(tp0).size());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class SenderTest method testIdempotenceWithMultipleInflightsRetriedInOrder.
@Test
public void testIdempotenceWithMultipleInflightsRetriedInOrder() throws Exception {
// Send multiple in flight requests, retry them all one at a time, in the correct order.
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce();
String nodeId = client.requests().peek().destination();
Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
// Send second ProduceRequest
Future<RecordMetadata> request2 = appendToAccumulator(tp0);
sender.runOnce();
// Send third ProduceRequest
Future<RecordMetadata> request3 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(3, client.inFlightRequestCount());
assertEquals(3, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
assertFalse(request1.isDone());
assertFalse(request2.isDone());
assertFalse(request3.isDone());
assertTrue(client.isReady(node, time.milliseconds()));
sendIdempotentProducerResponse(0, tp0, Errors.LEADER_NOT_AVAILABLE, -1L);
// receive response 0
sender.runOnce();
// Queue the fourth request, it shouldn't be sent until the first 3 complete.
Future<RecordMetadata> request4 = appendToAccumulator(tp0);
assertEquals(2, client.inFlightRequestCount());
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
sendIdempotentProducerResponse(1, tp0, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, -1L);
// re send request 1, receive response 2
sender.runOnce();
sendIdempotentProducerResponse(2, tp0, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, -1L);
// receive response 3
sender.runOnce();
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
assertEquals(1, client.inFlightRequestCount());
// Do nothing, we are reduced to one in flight request during retries.
sender.runOnce();
// the batch for request 4 shouldn't have been drained, and hence the sequence should not have been incremented.
assertEquals(3, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(1, client.inFlightRequestCount());
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 0L);
// receive response 1
sender.runOnce();
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertTrue(request1.isDone());
assertEquals(0, request1.get().offset());
assertFalse(client.hasInFlightRequests());
assertEquals(0, sender.inFlightBatches(tp0).size());
// send request 2;
sender.runOnce();
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, sender.inFlightBatches(tp0).size());
sendIdempotentProducerResponse(1, tp0, Errors.NONE, 1L);
// receive response 2
sender.runOnce();
assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
assertTrue(request2.isDone());
assertEquals(1, request2.get().offset());
assertFalse(client.hasInFlightRequests());
assertEquals(0, sender.inFlightBatches(tp0).size());
// send request 3
sender.runOnce();
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, sender.inFlightBatches(tp0).size());
sendIdempotentProducerResponse(2, tp0, Errors.NONE, 2L);
// receive response 3, send request 4 since we are out of 'retry' mode.
sender.runOnce();
assertEquals(OptionalInt.of(2), transactionManager.lastAckedSequence(tp0));
assertTrue(request3.isDone());
assertEquals(2, request3.get().offset());
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, sender.inFlightBatches(tp0).size());
sendIdempotentProducerResponse(3, tp0, Errors.NONE, 3L);
// receive response 4
sender.runOnce();
assertEquals(OptionalInt.of(3), transactionManager.lastAckedSequence(tp0));
assertTrue(request4.isDone());
assertEquals(3, request4.get().offset());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class SenderTest method testExpiredBatchesInMultiplePartitions.
@SuppressWarnings("deprecation")
@Test
public void testExpiredBatchesInMultiplePartitions() throws Exception {
long deliveryTimeoutMs = 1500L;
setupWithTransactionState(null, true, null);
// Send multiple ProduceRequest across multiple partitions.
Future<RecordMetadata> request1 = appendToAccumulator(tp0, time.milliseconds(), "k1", "v1");
Future<RecordMetadata> request2 = appendToAccumulator(tp1, time.milliseconds(), "k2", "v2");
// Send request.
sender.runOnce();
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, sender.inFlightBatches(tp0).size(), "Expect one in-flight batch in accumulator");
Map<TopicPartition, ProduceResponse.PartitionResponse> responseMap = new HashMap<>();
responseMap.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L));
client.respond(new ProduceResponse(responseMap));
// Successfully expire both batches.
time.sleep(deliveryTimeoutMs);
sender.runOnce();
assertEquals(0, sender.inFlightBatches(tp0).size(), "Expect zero in-flight batch in accumulator");
ExecutionException e = assertThrows(ExecutionException.class, request1::get);
assertTrue(e.getCause() instanceof TimeoutException);
e = assertThrows(ExecutionException.class, request2::get);
assertTrue(e.getCause() instanceof TimeoutException);
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class SenderTest method testInflightBatchesExpireOnDeliveryTimeout.
@SuppressWarnings("deprecation")
@Test
public void testInflightBatchesExpireOnDeliveryTimeout() throws InterruptedException {
long deliveryTimeoutMs = 1500L;
setupWithTransactionState(null, true, null);
// Send first ProduceRequest
Future<RecordMetadata> request = appendToAccumulator(tp0);
// send request
sender.runOnce();
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, sender.inFlightBatches(tp0).size(), "Expect one in-flight batch in accumulator");
Map<TopicPartition, ProduceResponse.PartitionResponse> responseMap = new HashMap<>();
responseMap.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L));
client.respond(new ProduceResponse(responseMap));
time.sleep(deliveryTimeoutMs);
// receive first response
sender.runOnce();
assertEquals(0, sender.inFlightBatches(tp0).size(), "Expect zero in-flight batch in accumulator");
try {
request.get();
fail("The expired batch should throw a TimeoutException");
} catch (ExecutionException e) {
assertTrue(e.getCause() instanceof TimeoutException);
}
}
use of org.apache.kafka.clients.producer.RecordMetadata in project kafka by apache.
the class SenderTest method testExpiryOfAllSentBatchesShouldCauseUnresolvedSequences.
@Test
public void testExpiryOfAllSentBatchesShouldCauseUnresolvedSequences() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = createTransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0, 0L, "key", "value");
// send request
sender.runOnce();
sendIdempotentProducerResponse(0, tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1);
// receive response
sender.runOnce();
assertEquals(1L, transactionManager.sequenceNumber(tp0).longValue());
Node node = metadata.fetch().nodes().get(0);
time.sleep(15000L);
client.disconnect(node.idString());
client.backoff(node, 10);
// now expire the batch.
sender.runOnce();
assertFutureFailure(request1, TimeoutException.class);
assertTrue(transactionManager.hasUnresolvedSequence(tp0));
assertFalse(client.hasInFlightRequests());
Deque<ProducerBatch> batches = accumulator.batches().get(tp0);
assertEquals(0, batches.size());
assertEquals(producerId, transactionManager.producerIdAndEpoch().producerId);
// In the next run loop, we bump the epoch and clear the unresolved sequences
sender.runOnce();
assertEquals(1, transactionManager.producerIdAndEpoch().epoch);
assertFalse(transactionManager.hasUnresolvedSequence(tp0));
}
Aggregations