use of org.apache.kafka.common.requests.AddPartitionsToTxnResponse in project kafka by apache.
the class TransactionManagerTest method prepareAddPartitionsToTxn.
private void prepareAddPartitionsToTxn(final Map<TopicPartition, Errors> errors) {
client.prepareResponse(body -> {
AddPartitionsToTxnRequest request = (AddPartitionsToTxnRequest) body;
assertEquals(new HashSet<>(request.partitions()), new HashSet<>(errors.keySet()));
return true;
}, new AddPartitionsToTxnResponse(0, errors));
}
use of org.apache.kafka.common.requests.AddPartitionsToTxnResponse in project kafka by apache.
the class SenderTest method testTooLargeBatchesAreSafelyRemoved.
@Test
public void testTooLargeBatchesAreSafelyRemoved() throws InterruptedException {
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions);
setupWithTransactionState(txnManager, false, null);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp0);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp0, Errors.NONE)));
sender.runOnce();
// create a producer batch with more than one record so it is eligible for splitting
appendToAccumulator(tp0, time.milliseconds(), "key1", "value1");
appendToAccumulator(tp0, time.milliseconds(), "key2", "value2");
// send request
sender.runOnce();
assertEquals(1, sender.inFlightBatches(tp0).size());
// return a MESSAGE_TOO_LARGE error
client.respond(produceResponse(tp0, -1, Errors.MESSAGE_TOO_LARGE, -1));
sender.runOnce();
// process retried response
sender.runOnce();
client.respond(produceResponse(tp0, 0, Errors.NONE, 0));
sender.runOnce();
// In-flight batches should be empty. Sleep past the expiration time of the batch and run once, no error should be thrown
assertEquals(0, sender.inFlightBatches(tp0).size());
time.sleep(2000);
sender.runOnce();
}
Aggregations