use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testUnsupportedVersionInProduceRequest.
@Test
public void testUnsupportedVersionInProduceRequest() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
client.setNode(new Node(1, "localhost", 33343));
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
client.prepareUnsupportedVersionResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent();
}
});
sender.run(time.milliseconds());
assertFutureFailure(future, UnsupportedVersionException.class);
// unsupported version errors are fatal, so we should continue seeing it on future sends
assertTrue(transactionManager.hasFatalError());
assertSendFailure(UnsupportedVersionException.class);
}
use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testCorrectHandlingOfOutOfOrderResponsesWhenSecondSucceeds.
@Test
public void testCorrectHandlingOfOutOfOrderResponsesWhenSecondSucceeds() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
// Send first ProduceRequest
Future<RecordMetadata> request1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
String nodeId = client.requests().peek().destination();
Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
// Send second ProduceRequest
Future<RecordMetadata> request2 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
assertEquals(2, client.inFlightRequestCount());
assertFalse(request1.isDone());
assertFalse(request2.isDone());
assertTrue(client.isReady(node, time.milliseconds()));
ClientRequest firstClientRequest = client.requests().peek();
ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
client.respondToRequest(secondClientRequest, produceResponse(tp0, 1, Errors.NONE, 1));
// receive response 1
sender.run(time.milliseconds());
assertTrue(request2.isDone());
assertEquals(1, request2.get().offset());
assertFalse(request1.isDone());
Deque<ProducerBatch> queuedBatches = accumulator.batches().get(tp0);
assertEquals(0, queuedBatches.size());
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
client.respondToRequest(firstClientRequest, produceResponse(tp0, -1, Errors.REQUEST_TIMED_OUT, -1));
// receive response 0
sender.run(time.milliseconds());
// Make sure we requeued both batches in the correct order.
assertEquals(1, queuedBatches.size());
assertEquals(0, queuedBatches.peekFirst().baseSequence());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
assertEquals(0, client.inFlightRequestCount());
// resend request 0
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
// Make sure we handle the out of order successful responses correctly.
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 0L);
// receive response 0
sender.run(time.milliseconds());
assertEquals(0, queuedBatches.size());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
assertEquals(0, client.inFlightRequestCount());
assertFalse(client.hasInFlightRequests());
assertTrue(request1.isDone());
assertEquals(0, request1.get().offset());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testCorrectHandlingOfOutOfOrderResponses.
@Test
public void testCorrectHandlingOfOutOfOrderResponses() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
// Send first ProduceRequest
Future<RecordMetadata> request1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
String nodeId = client.requests().peek().destination();
Node node = new Node(Integer.valueOf(nodeId), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
// Send second ProduceRequest
Future<RecordMetadata> request2 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
assertEquals(2, client.inFlightRequestCount());
assertEquals(2, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
assertFalse(request1.isDone());
assertFalse(request2.isDone());
assertTrue(client.isReady(node, time.milliseconds()));
ClientRequest firstClientRequest = client.requests().peek();
ClientRequest secondClientRequest = (ClientRequest) client.requests().toArray()[1];
client.respondToRequest(secondClientRequest, produceResponse(tp0, -1, Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, -1));
// receive response 1
sender.run(time.milliseconds());
Deque<ProducerBatch> queuedBatches = accumulator.batches().get(tp0);
// Make sure that we are queueing the second batch first.
assertEquals(1, queuedBatches.size());
assertEquals(1, queuedBatches.peekFirst().baseSequence());
assertEquals(1, client.inFlightRequestCount());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
client.respondToRequest(firstClientRequest, produceResponse(tp0, -1, Errors.NOT_LEADER_FOR_PARTITION, -1));
// receive response 0
sender.run(time.milliseconds());
// Make sure we requeued both batches in the correct order.
assertEquals(2, queuedBatches.size());
assertEquals(0, queuedBatches.peekFirst().baseSequence());
assertEquals(1, queuedBatches.peekLast().baseSequence());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
assertEquals(0, client.inFlightRequestCount());
assertFalse(request1.isDone());
assertFalse(request2.isDone());
// send request 0
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
// don't do anything, only one inflight allowed once we are retrying.
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
assertEquals(-1, transactionManager.lastAckedSequence(tp0));
// Make sure that the requests are sent in order, even though the previous responses were not in order.
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 0L);
// receive response 0
sender.run(time.milliseconds());
assertEquals(0, transactionManager.lastAckedSequence(tp0));
assertEquals(0, client.inFlightRequestCount());
assertTrue(request1.isDone());
assertEquals(0, request1.get().offset());
// send request 1
sender.run(time.milliseconds());
assertEquals(1, client.inFlightRequestCount());
sendIdempotentProducerResponse(1, tp0, Errors.NONE, 1L);
// receive response 1
sender.run(time.milliseconds());
assertFalse(client.hasInFlightRequests());
assertEquals(1, transactionManager.lastAckedSequence(tp0));
assertTrue(request2.isDone());
assertEquals(1, request2.get().offset());
}
use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testExpiryOfUnsentBatchesShouldNotCauseUnresolvedSequences.
@Test
public void testExpiryOfUnsentBatchesShouldNotCauseUnresolvedSequences() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
// Send first ProduceRequest
Future<RecordMetadata> request1 = accumulator.append(tp0, 0L, "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
Node node = this.cluster.nodes().get(0);
time.sleep(10000L);
client.disconnect(node.idString());
client.blackout(node, 10);
sender.run(time.milliseconds());
assertFutureFailure(request1, TimeoutException.class);
assertFalse(transactionManager.hasUnresolvedSequence(tp0));
}
use of org.apache.kafka.clients.producer.RecordMetadata in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testAbortRetryWhenProducerIdChanges.
@Test
@SuppressWarnings("deprecation")
public void testAbortRetryWhenProducerIdChanges() throws InterruptedException {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId, (short) 0));
setupWithTransactionState(transactionManager);
client.setNode(new Node(1, "localhost", 33343));
int maxRetries = 10;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions);
Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
// connect.
sender.run(time.milliseconds());
// send.
sender.run(time.milliseconds());
String id = client.requests().peek().destination();
Node node = new Node(Integer.valueOf(id), "localhost", 0);
assertEquals(1, client.inFlightRequestCount());
assertTrue("Client ready status should be true", client.isReady(node, 0L));
client.disconnect(id);
assertEquals(0, client.inFlightRequestCount());
assertFalse("Client ready status should be false", client.isReady(node, 0L));
transactionManager.resetProducerId();
transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId + 1, (short) 0));
// receive error
sender.run(time.milliseconds());
// reconnect
sender.run(time.milliseconds());
// nothing to do, since the pid has changed. We should check the metrics for errors.
sender.run(time.milliseconds());
assertEquals("Expected requests to be aborted after pid change", 0, client.inFlightRequestCount());
KafkaMetric recordErrors = m.metrics().get(senderMetrics.recordErrorRate);
assertTrue("Expected non-zero value for record send errors", recordErrors.value() > 0);
assertTrue(responseFuture.isDone());
assertEquals(0, (long) transactionManager.sequenceNumber(tp0));
}
Aggregations