use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class ProducerBatch method split.
public Deque<ProducerBatch> split(int splitBatchSize) {
Deque<ProducerBatch> batches = new ArrayDeque<>();
MemoryRecords memoryRecords = recordsBuilder.build();
Iterator<MutableRecordBatch> recordBatchIter = memoryRecords.batches().iterator();
if (!recordBatchIter.hasNext())
throw new IllegalStateException("Cannot split an empty producer batch.");
RecordBatch recordBatch = recordBatchIter.next();
if (recordBatch.magic() < MAGIC_VALUE_V2 && !recordBatch.isCompressed())
throw new IllegalArgumentException("Batch splitting cannot be used with non-compressed messages " + "with version v0 and v1");
if (recordBatchIter.hasNext())
throw new IllegalArgumentException("A producer batch should only have one record batch.");
Iterator<Thunk> thunkIter = thunks.iterator();
// We always allocate batch size because we are already splitting a big batch.
// And we also Retain the create time of the original batch.
ProducerBatch batch = null;
for (Record record : recordBatch) {
assert thunkIter.hasNext();
Thunk thunk = thunkIter.next();
if (batch == null)
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
// A newly created batch can always host the first message.
if (!batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk)) {
batches.add(batch);
batch.closeForRecordAppends();
batch = createBatchOffAccumulatorForRecord(record, splitBatchSize);
batch.tryAppendForSplit(record.timestamp(), record.key(), record.value(), record.headers(), thunk);
}
}
// Close the last batch and add it to the batch list after split.
if (batch != null) {
batches.add(batch);
batch.closeForRecordAppends();
}
produceFuture.set(ProduceResponse.INVALID_OFFSET, NO_TIMESTAMP, index -> new RecordBatchTooLargeException());
produceFuture.done();
if (hasSequence()) {
int sequence = baseSequence();
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId(), producerEpoch());
for (ProducerBatch newBatch : batches) {
newBatch.setProducerState(producerIdAndEpoch, sequence, isTransactional());
sequence += newBatch.recordCount;
}
}
return batches;
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class RecordAccumulator method shouldStopDrainBatchesForPartition.
private boolean shouldStopDrainBatchesForPartition(ProducerBatch first, TopicPartition tp) {
ProducerIdAndEpoch producerIdAndEpoch = null;
if (transactionManager != null) {
if (!transactionManager.isSendToPartitionAllowed(tp))
return true;
producerIdAndEpoch = transactionManager.producerIdAndEpoch();
if (!producerIdAndEpoch.isValid())
// we cannot send the batch until we have refreshed the producer id
return true;
if (!first.hasSequence()) {
if (transactionManager.hasInflightBatches(tp) && transactionManager.hasStaleProducerIdAndEpoch(tp)) {
// 0 could be written before earlier batches complete, which would cause out of sequence errors
return true;
}
if (transactionManager.hasUnresolvedSequence(first.topicPartition))
// on the client after being sent to the broker at least once.
return true;
}
int firstInFlightSequence = transactionManager.firstInFlightSequence(first.topicPartition);
if (firstInFlightSequence != RecordBatch.NO_SEQUENCE && first.hasSequence() && first.baseSequence() != firstInFlightSequence)
// a fatal broker error). This effectively reduces our in flight request count to 1.
return true;
}
return false;
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class TransactionManager method adjustSequencesDueToFailedBatch.
// If a batch is failed fatally, the sequence numbers for future batches bound for the partition must be adjusted
// so that they don't fail with the OutOfOrderSequenceException.
//
// This method must only be called when we know that the batch is question has been unequivocally failed by the broker,
// ie. it has received a confirmed fatal status code like 'Message Too Large' or something similar.
private void adjustSequencesDueToFailedBatch(ProducerBatch batch) {
if (!topicPartitionBookkeeper.contains(batch.topicPartition))
// reset due to a previous OutOfOrderSequenceException.
return;
log.debug("producerId: {}, send to partition {} failed fatally. Reducing future sequence numbers by {}", batch.producerId(), batch.topicPartition, batch.recordCount);
int currentSequence = sequenceNumber(batch.topicPartition);
currentSequence -= batch.recordCount;
if (currentSequence < 0)
throw new IllegalStateException("Sequence number for partition " + batch.topicPartition + " is going to become negative: " + currentSequence);
setNextSequence(batch.topicPartition, currentSequence);
topicPartitionBookkeeper.getPartition(batch.topicPartition).resetSequenceNumbers(inFlightBatch -> {
if (inFlightBatch.baseSequence() < batch.baseSequence())
return;
int newSequence = inFlightBatch.baseSequence() - batch.recordCount;
if (newSequence < 0)
throw new IllegalStateException("Sequence number for batch with sequence " + inFlightBatch.baseSequence() + " for partition " + batch.topicPartition + " is going to become negative: " + newSequence);
log.info("Resetting sequence number of batch with current sequence {} for partition {} to {}", inFlightBatch.baseSequence(), batch.topicPartition, newSequence);
inFlightBatch.resetProducerState(new ProducerIdAndEpoch(inFlightBatch.producerId(), inFlightBatch.producerEpoch()), newSequence, inFlightBatch.isTransactional());
});
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class SenderTest method testUnresolvedSequencesAreNotFatal.
@Test
public void testUnresolvedSequencesAreNotFatal() throws Exception {
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3));
TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions);
setupWithTransactionState(txnManager);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp0);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp0, Errors.NONE)));
sender.runOnce();
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
// send request
sender.runOnce();
time.sleep(1000L);
appendToAccumulator(tp0);
// send request
sender.runOnce();
assertEquals(2, client.inFlightRequestCount());
sendIdempotentProducerResponse(0, tp0, Errors.NOT_LEADER_OR_FOLLOWER, -1);
// receive first response
sender.runOnce();
Node node = metadata.fetch().nodes().get(0);
time.sleep(1000L);
client.disconnect(node.idString());
client.backoff(node, 10);
// now expire the first batch.
sender.runOnce();
assertFutureFailure(request1, TimeoutException.class);
assertTrue(txnManager.hasUnresolvedSequence(tp0));
// Loop once and confirm that the transaction manager does not enter a fatal error state
sender.runOnce();
assertTrue(txnManager.hasAbortableError());
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class SenderTest method testIncompleteTransactionAbortOnShutdown.
@Test
public void testIncompleteTransactionAbortOnShutdown() {
// create a sender with retries = 1
int maxRetries = 1;
Metrics m = new Metrics();
SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m);
try {
TransactionManager txnManager = new TransactionManager(logContext, "testIncompleteTransactionAbortOnShutdown", 6000, 100, apiVersions);
Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions);
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
TopicPartition tp = new TopicPartition("testIncompleteTransactionAbortOnShutdown", 1);
setupWithTransactionState(txnManager);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp, Errors.NONE)));
sender.runOnce();
sender.initiateClose();
AssertEndTxnRequestMatcher endTxnMatcher = new AssertEndTxnRequestMatcher(TransactionResult.ABORT);
client.prepareResponse(endTxnMatcher, new EndTxnResponse(new EndTxnResponseData().setErrorCode(Errors.NONE.code()).setThrottleTimeMs(0)));
sender.run();
assertTrue(endTxnMatcher.matched, "Response didn't match in test");
} finally {
m.close();
}
}
Aggregations