use of org.apache.kafka.common.requests.ProduceRequest in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testSequenceNumberIncrement.
@Test
public void testSequenceNumberIncrement() throws InterruptedException {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
transactionManager.setProducerIdAndEpoch(new ProducerIdAndEpoch(producerId, (short) 0));
setupWithTransactionState(transactionManager);
client.setNode(new Node(1, "localhost", 33343));
int maxRetries = 10;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, 50, transactionManager, apiVersions);
Future<RecordMetadata> responseFuture = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
if (body instanceof ProduceRequest) {
ProduceRequest request = (ProduceRequest) body;
MemoryRecords records = request.partitionRecordsOrFail().get(tp0);
Iterator<MutableRecordBatch> batchIterator = records.batches().iterator();
assertTrue(batchIterator.hasNext());
RecordBatch batch = batchIterator.next();
assertFalse(batchIterator.hasNext());
assertEquals(0, batch.baseSequence());
assertEquals(producerId, batch.producerId());
assertEquals(0, batch.producerEpoch());
return true;
}
return false;
}
}, produceResponse(tp0, 0, Errors.NONE, 0));
// connect.
sender.run(time.milliseconds());
// send.
sender.run(time.milliseconds());
// receive response
sender.run(time.milliseconds());
assertTrue(responseFuture.isDone());
assertEquals(0L, (long) transactionManager.lastAckedSequence(tp0));
assertEquals(1L, (long) transactionManager.sequenceNumber(tp0));
}
use of org.apache.kafka.common.requests.ProduceRequest in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testUnsupportedVersionInProduceRequest.
@Test
public void testUnsupportedVersionInProduceRequest() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
client.setNode(new Node(1, "localhost", 33343));
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
client.prepareUnsupportedVersionResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent();
}
});
sender.run(time.milliseconds());
assertFutureFailure(future, UnsupportedVersionException.class);
// unsupported version errors are fatal, so we should continue seeing it on future sends
assertTrue(transactionManager.hasFatalError());
assertSendFailure(UnsupportedVersionException.class);
}
use of org.apache.kafka.common.requests.ProduceRequest in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testCancelInFlightRequestAfterFatalError.
@Test
public void testCancelInFlightRequestAfterFatalError() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
client.setNode(new Node(1, "localhost", 33343));
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
// cluster authorization is a fatal error for the producer
Future<RecordMetadata> future1 = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
Future<RecordMetadata> future2 = accumulator.append(tp1, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
sender.run(time.milliseconds());
client.respond(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent();
}
}, produceResponse(tp0, -1, Errors.CLUSTER_AUTHORIZATION_FAILED, 0));
sender.run(time.milliseconds());
assertTrue(transactionManager.hasFatalError());
assertFutureFailure(future1, ClusterAuthorizationException.class);
sender.run(time.milliseconds());
assertFutureFailure(future2, ClusterAuthorizationException.class);
// Should be fine if the second response eventually returns
client.respond(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent();
}
}, produceResponse(tp1, 0, Errors.NONE, 0));
sender.run(time.milliseconds());
}
use of org.apache.kafka.common.requests.ProduceRequest in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method produceRequestMatcher.
private MockClient.RequestMatcher produceRequestMatcher(final TopicPartition tp, final ProducerIdAndEpoch producerIdAndEpoch, final int sequence, final boolean isTransactional) {
return new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
if (!(body instanceof ProduceRequest))
return false;
ProduceRequest request = (ProduceRequest) body;
Map<TopicPartition, MemoryRecords> recordsMap = request.partitionRecordsOrFail();
MemoryRecords records = recordsMap.get(tp);
if (records == null)
return false;
List<MutableRecordBatch> batches = TestUtils.toList(records.batches());
if (batches.isEmpty() || batches.size() > 1)
return false;
MutableRecordBatch batch = batches.get(0);
return batch.baseOffset() == 0L && batch.baseSequence() == sequence && batch.producerId() == producerIdAndEpoch.producerId && batch.producerEpoch() == producerIdAndEpoch.epoch && batch.isTransactional() == isTransactional;
}
};
}
use of org.apache.kafka.common.requests.ProduceRequest in project apache-kafka-on-k8s by banzaicloud.
the class SenderTest method testUnsupportedForMessageFormatInProduceRequest.
@Test
public void testUnsupportedForMessageFormatInProduceRequest() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager();
setupWithTransactionState(transactionManager);
client.setNode(new Node(1, "localhost", 33343));
prepareAndReceiveInitProducerId(producerId, Errors.NONE);
assertTrue(transactionManager.hasProducerId());
Future<RecordMetadata> future = accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), null, null, MAX_BLOCK_TIMEOUT).future;
client.prepareResponse(new MockClient.RequestMatcher() {
@Override
public boolean matches(AbstractRequest body) {
return body instanceof ProduceRequest && ((ProduceRequest) body).isIdempotent();
}
}, produceResponse(tp0, -1, Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, 0));
sender.run(time.milliseconds());
assertFutureFailure(future, UnsupportedForMessageFormatException.class);
// unsupported for message format is not a fatal error
assertFalse(transactionManager.hasError());
}
Aggregations