use of org.apache.kafka.common.errors.FencedInstanceIdException in project kafka by apache.
the class TransactionManagerTest method testFencedInstanceIdInTxnOffsetCommitByGroupMetadata.
@Test
public void testFencedInstanceIdInTxnOffsetCommitByGroupMetadata() {
final TopicPartition tp = new TopicPartition("foo", 0);
final String fencedMemberId = "fenced_member";
doInitTransactions();
transactionManager.beginTransaction();
TransactionalRequestResult sendOffsetsResult = transactionManager.sendOffsetsToTransaction(singletonMap(tp, new OffsetAndMetadata(39L)), new ConsumerGroupMetadata(consumerGroupId, 5, fencedMemberId, Optional.of(groupInstanceId)));
prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
client.prepareResponse(request -> {
TxnOffsetCommitRequest txnOffsetCommitRequest = (TxnOffsetCommitRequest) request;
assertEquals(consumerGroupId, txnOffsetCommitRequest.data().groupId());
assertEquals(producerId, txnOffsetCommitRequest.data().producerId());
assertEquals(epoch, txnOffsetCommitRequest.data().producerEpoch());
return txnOffsetCommitRequest.data().groupInstanceId().equals(groupInstanceId) && !txnOffsetCommitRequest.data().memberId().equals(memberId);
}, new TxnOffsetCommitResponse(0, singletonMap(tp, Errors.FENCED_INSTANCE_ID)));
runUntil(transactionManager::hasError);
assertTrue(transactionManager.lastError() instanceof FencedInstanceIdException);
assertTrue(sendOffsetsResult.isCompleted());
assertFalse(sendOffsetsResult.isSuccessful());
assertTrue(sendOffsetsResult.error() instanceof FencedInstanceIdException);
assertAbortableError(FencedInstanceIdException.class);
}
use of org.apache.kafka.common.errors.FencedInstanceIdException in project kafka by apache.
the class ExactlyOnceMessageProcessor method run.
@Override
public void run() {
// Init transactions call should always happen first in order to clear zombie transactions from previous generation.
producer.initTransactions();
final AtomicLong messageRemaining = new AtomicLong(Long.MAX_VALUE);
consumer.subscribe(Collections.singleton(inputTopic), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
printWithTxnId("Revoked partition assignment to kick-off rebalancing: " + partitions);
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
printWithTxnId("Received partition assignment after rebalancing: " + partitions);
messageRemaining.set(messagesRemaining(consumer));
}
});
int messageProcessed = 0;
while (messageRemaining.get() > 0) {
try {
ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofMillis(200));
if (records.count() > 0) {
// Begin a new transaction session.
producer.beginTransaction();
for (ConsumerRecord<Integer, String> record : records) {
// Process the record and send to downstream.
ProducerRecord<Integer, String> customizedRecord = transform(record);
producer.send(customizedRecord);
}
Map<TopicPartition, OffsetAndMetadata> offsets = consumerOffsets();
// Checkpoint the progress by sending offsets to group coordinator broker.
// Note that this API is only available for broker >= 2.5.
producer.sendOffsetsToTransaction(offsets, consumer.groupMetadata());
// Finish the transaction. All sent records should be visible for consumption now.
producer.commitTransaction();
messageProcessed += records.count();
}
} catch (ProducerFencedException e) {
throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId));
} catch (FencedInstanceIdException e) {
throw new KafkaException(String.format("The group.instance.id %s has been claimed by another process", groupInstanceId));
} catch (KafkaException e) {
// If we have not been fenced, try to abort the transaction and continue. This will raise immediately
// if the producer has hit a fatal error.
producer.abortTransaction();
// The consumer fetch position needs to be restored to the committed offset
// before the transaction started.
resetToLastCommittedPositions(consumer);
}
messageRemaining.set(messagesRemaining(consumer));
printWithTxnId("Message remaining: " + messageRemaining);
}
printWithTxnId("Finished processing " + messageProcessed + " records");
latch.countDown();
}
use of org.apache.kafka.common.errors.FencedInstanceIdException in project kafka by apache.
the class ConsumerCoordinator method doCommitOffsetsAsync.
private RequestFuture<Void> doCommitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback) {
RequestFuture<Void> future = sendOffsetCommitRequest(offsets);
final OffsetCommitCallback cb = callback == null ? defaultOffsetCommitCallback : callback;
future.addListener(new RequestFutureListener<Void>() {
@Override
public void onSuccess(Void value) {
if (interceptors != null)
interceptors.onCommit(offsets);
completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, null));
}
@Override
public void onFailure(RuntimeException e) {
Exception commitException = e;
if (e instanceof RetriableException) {
commitException = new RetriableCommitFailedException(e);
}
completedOffsetCommits.add(new OffsetCommitCompletion(cb, offsets, commitException));
if (commitException instanceof FencedInstanceIdException) {
asyncCommitFenced.set(true);
}
}
});
return future;
}
Aggregations