use of org.apache.kafka.common.requests.EndTxnResponse in project kafka by apache.
the class SenderTest method testIncompleteTransactionAbortOnShutdown.
@Test
public void testIncompleteTransactionAbortOnShutdown() {
// create a sender with retries = 1
int maxRetries = 1;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
try {
TransactionManager txnManager = new TransactionManager(logContext, "testIncompleteTransactionAbortOnShutdown", 6000, 100, apiVersions);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions);
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
TopicPartition tp = new TopicPartition("testIncompleteTransactionAbortOnShutdown", 1);
setupWithTransactionState(txnManager);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp, Errors.NONE)));
sender.runOnce();
sender.initiateClose();
AssertEndTxnRequestMatcher endTxnMatcher = new AssertEndTxnRequestMatcher(TransactionResult.ABORT);
client.prepareResponse(endTxnMatcher, new EndTxnResponse(new EndTxnResponseData().setErrorCode(Errors.NONE.code()).setThrottleTimeMs(0)));
sender.run();
assertTrue(endTxnMatcher.matched, "Response didn't match in test");
} finally {
m.close();
}
}
use of org.apache.kafka.common.requests.EndTxnResponse in project kafka by apache.
the class SenderTest method testTransactionalRequestsSentOnShutdown.
@Test
public void testTransactionalRequestsSentOnShutdown() {
// create a sender with retries = 1
int maxRetries = 1;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
try {
TransactionManager txnManager = new TransactionManager(logContext, "testTransactionalRequestsSentOnShutdown", 6000, 100, apiVersions);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions);
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
TopicPartition tp = new TopicPartition("testTransactionalRequestsSentOnShutdown", 1);
setupWithTransactionState(txnManager);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp, Errors.NONE)));
sender.runOnce();
sender.initiateClose();
txnManager.beginCommit();
AssertEndTxnRequestMatcher endTxnMatcher = new AssertEndTxnRequestMatcher(TransactionResult.COMMIT);
client.prepareResponse(endTxnMatcher, new EndTxnResponse(new EndTxnResponseData().setErrorCode(Errors.NONE.code()).setThrottleTimeMs(0)));
sender.run();
assertTrue(endTxnMatcher.matched, "Response didn't match in test");
} finally {
m.close();
}
}
use of org.apache.kafka.common.requests.EndTxnResponse in project kafka by apache.
the class KafkaProducerTest method testCommitTransactionWithRecordTooLargeException.
@Test
public void testCommitTransactionWithRecordTooLargeException() throws Exception {
Map<String, Object> configs = new HashMap<>();
configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id");
configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000");
configs.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, 1000);
Time time = new MockTime(1);
MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1));
ProducerMetadata metadata = mock(ProducerMetadata.class);
MockClient client = new MockClient(time, metadata);
client.updateMetadata(initialUpdateResponse);
client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some.id", NODE));
client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE));
when(metadata.fetch()).thenReturn(onePartitionCluster);
String largeString = IntStream.range(0, 1000).mapToObj(i -> "*").collect(Collectors.joining());
ProducerRecord<String, String> largeRecord = new ProducerRecord<>(topic, "large string", largeString);
try (KafkaProducer<String, String> producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), metadata, client, null, time)) {
producer.initTransactions();
client.prepareResponse(endTxnResponse(Errors.NONE));
producer.beginTransaction();
TestUtils.assertFutureError(producer.send(largeRecord), RecordTooLargeException.class);
assertThrows(KafkaException.class, producer::commitTransaction);
}
}
Aggregations