use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class RecordAccumulator method drainBatchesForOneNode.
private List<ProducerBatch> drainBatchesForOneNode(Cluster cluster, Node node, int maxSize, long now) {
int size = 0;
List<PartitionInfo> parts = cluster.partitionsForNode(node.id());
List<ProducerBatch> ready = new ArrayList<>();
/* to make starvation less likely this loop doesn't start at 0 */
int start = drainIndex = drainIndex % parts.size();
do {
PartitionInfo part = parts.get(drainIndex);
TopicPartition tp = new TopicPartition(part.topic(), part.partition());
this.drainIndex = (this.drainIndex + 1) % parts.size();
// Only proceed if the partition has no in-flight batches.
if (isMuted(tp))
continue;
Deque<ProducerBatch> deque = getDeque(tp);
if (deque == null)
continue;
synchronized (deque) {
// invariant: !isMuted(tp,now) && deque != null
ProducerBatch first = deque.peekFirst();
if (first == null)
continue;
// first != null
boolean backoff = first.attempts() > 0 && first.waitedTimeMs(now) < retryBackoffMs;
// Only drain the batch if it is not during backoff period.
if (backoff)
continue;
if (size + first.estimatedSizeInBytes() > maxSize && !ready.isEmpty()) {
// compression; in this case we will still eventually send this batch in a single request
break;
} else {
if (shouldStopDrainBatchesForPartition(first, tp))
break;
boolean isTransactional = transactionManager != null && transactionManager.isTransactional();
ProducerIdAndEpoch producerIdAndEpoch = transactionManager != null ? transactionManager.producerIdAndEpoch() : null;
ProducerBatch batch = deque.pollFirst();
if (producerIdAndEpoch != null && !batch.hasSequence()) {
// If the producer id/epoch of the partition do not match the latest one
// of the producer, we update it and reset the sequence. This should be
// only done when all its in-flight batches have completed. This is guarantee
// in `shouldStopDrainBatchesForPartition`.
transactionManager.maybeUpdateProducerIdAndEpoch(batch.topicPartition);
// If the batch already has an assigned sequence, then we should not change the producer id and
// sequence number, since this may introduce duplicates. In particular, the previous attempt
// may actually have been accepted, and if we change the producer id and sequence here, this
// attempt will also be accepted, causing a duplicate.
//
// Additionally, we update the next sequence number bound for the partition, and also have
// the transaction manager track the batch so as to ensure that sequence ordering is maintained
// even if we receive out of order responses.
batch.setProducerState(producerIdAndEpoch, transactionManager.sequenceNumber(batch.topicPartition), isTransactional);
transactionManager.incrementSequenceNumber(batch.topicPartition, batch.recordCount);
log.debug("Assigned producerId {} and producerEpoch {} to batch with base sequence " + "{} being sent to partition {}", producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, batch.baseSequence(), tp);
transactionManager.addInFlightBatch(batch);
}
batch.close();
size += batch.records().sizeInBytes();
ready.add(batch);
batch.drained(now);
}
}
} while (start != drainIndex);
return ready;
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class TransactionManager method bumpIdempotentProducerEpoch.
private void bumpIdempotentProducerEpoch() {
if (this.producerIdAndEpoch.epoch == Short.MAX_VALUE) {
resetIdempotentProducerId();
} else {
setProducerIdAndEpoch(new ProducerIdAndEpoch(this.producerIdAndEpoch.producerId, (short) (this.producerIdAndEpoch.epoch + 1)));
log.debug("Incremented producer epoch, current producer ID and epoch are now {}", this.producerIdAndEpoch);
}
// When the epoch is bumped, rewrite all in-flight sequences for the partition(s) that triggered the epoch bump
for (TopicPartition topicPartition : this.partitionsToRewriteSequences) {
this.topicPartitionBookkeeper.startSequencesAtBeginning(topicPartition, this.producerIdAndEpoch);
this.partitionsWithUnresolvedSequences.remove(topicPartition);
}
this.partitionsToRewriteSequences.clear();
epochBumpRequired = false;
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class SenderTest method testTransactionalSplitBatchAndSend.
@Test
public void testTransactionalSplitBatchAndSend() throws Exception {
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0);
TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1);
TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions);
setupWithTransactionState(txnManager);
doInitTransactions(txnManager, producerIdAndEpoch);
txnManager.beginTransaction();
txnManager.maybeAddPartition(tp);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp, Errors.NONE)));
sender.runOnce();
testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp);
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class SenderTest method testTransactionalUnknownProducerHandlingWhenRetentionLimitReached.
@Test
public void testTransactionalUnknownProducerHandlingWhenRetentionLimitReached() throws Exception {
final long producerId = 343434L;
TransactionManager transactionManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions);
setupWithTransactionState(transactionManager);
doInitTransactions(transactionManager, new ProducerIdAndEpoch(producerId, (short) 0));
assertTrue(transactionManager.hasProducerId());
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
client.prepareResponse(new AddPartitionsToTxnResponse(0, Collections.singletonMap(tp0, Errors.NONE)));
// Receive AddPartitions response
sender.runOnce();
assertEquals(0, transactionManager.sequenceNumber(tp0).longValue());
// Send first ProduceRequest
Future<RecordMetadata> request1 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(1, client.inFlightRequestCount());
assertEquals(1, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 1000L, 10L);
// receive the response.
sender.runOnce();
assertTrue(request1.isDone());
assertEquals(1000L, request1.get().offset());
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertEquals(OptionalLong.of(1000L), transactionManager.lastAckedOffset(tp0));
// Send second ProduceRequest, a single batch with 2 records.
appendToAccumulator(tp0);
Future<RecordMetadata> request2 = appendToAccumulator(tp0);
sender.runOnce();
assertEquals(3, transactionManager.sequenceNumber(tp0).longValue());
assertEquals(OptionalInt.of(0), transactionManager.lastAckedSequence(tp0));
assertFalse(request2.isDone());
sendIdempotentProducerResponse(1, tp0, Errors.UNKNOWN_PRODUCER_ID, -1L, 1010L);
// receive response 0, should be retried since the logStartOffset > lastAckedOffset.
sender.runOnce();
// We should have reset the sequence number state of the partition because the state was lost on the broker.
assertEquals(OptionalInt.empty(), transactionManager.lastAckedSequence(tp0));
assertEquals(2, transactionManager.sequenceNumber(tp0).longValue());
assertFalse(request2.isDone());
assertFalse(client.hasInFlightRequests());
// should retry request 1
sender.runOnce();
// resend the request. Note that the expected sequence is 0, since we have lost producer state on the broker.
sendIdempotentProducerResponse(0, tp0, Errors.NONE, 1011L, 1010L);
// receive response 1
sender.runOnce();
assertEquals(OptionalInt.of(1), transactionManager.lastAckedSequence(tp0));
assertEquals(2, transactionManager.sequenceNumber(tp0).longValue());
assertFalse(client.hasInFlightRequests());
assertTrue(request2.isDone());
assertEquals(1012L, request2.get().offset());
assertEquals(OptionalLong.of(1012L), transactionManager.lastAckedOffset(tp0));
}
use of org.apache.kafka.common.utils.ProducerIdAndEpoch in project kafka by apache.
the class SenderTest method testInitProducerIdWithMaxInFlightOne.
/**
* Verifies that InitProducerId of transactional producer succeeds even if metadata requests
* are pending with only one bootstrap node available and maxInFlight=1, where multiple
* polls are necessary to send requests.
*/
@Test
public void testInitProducerIdWithMaxInFlightOne() throws Exception {
final long producerId = 123456L;
createMockClientWithMaxFlightOneMetadataPending();
// Initialize transaction manager. InitProducerId will be queued up until metadata response
// is processed and FindCoordinator can be sent to `leastLoadedNode`.
TransactionManager transactionManager = new TransactionManager(new LogContext(), "testInitProducerIdWithPendingMetadataRequest", 60000, 100L, new ApiVersions());
setupWithTransactionState(transactionManager, false, null, false);
ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId, (short) 0);
transactionManager.initializeTransactions();
sender.runOnce();
// Process metadata response, prepare FindCoordinator and InitProducerId responses.
// Verify producerId after the sender is run to process responses.
MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWith(1, Collections.emptyMap());
client.respond(metadataUpdate);
prepareFindCoordinatorResponse(Errors.NONE, "testInitProducerIdWithPendingMetadataRequest");
prepareInitProducerResponse(Errors.NONE, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch);
waitForProducerId(transactionManager, producerIdAndEpoch);
}
Aggregations