use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TransactionManagerTest method testBasicTransaction.
@Test
public void testBasicTransaction() throws InterruptedException {
doInitTransactions();
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
assertFalse(responseFuture.isDone());
prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, epoch, producerId);
prepareProduceResponse(Errors.NONE, producerId, epoch);
assertFalse(transactionManager.transactionContainsPartition(tp0));
assertFalse(transactionManager.isSendToPartitionAllowed(tp0));
runUntil(() -> transactionManager.transactionContainsPartition(tp0));
assertTrue(transactionManager.isSendToPartitionAllowed(tp0));
assertFalse(responseFuture.isDone());
runUntil(responseFuture::isDone);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp1, new OffsetAndMetadata(1));
TransactionalRequestResult addOffsetsResult = transactionManager.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId));
assertFalse(transactionManager.hasPendingOffsetCommits());
prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
runUntil(transactionManager::hasPendingOffsetCommits);
// the result doesn't complete until TxnOffsetCommit returns
assertFalse(addOffsetsResult.isCompleted());
Map<TopicPartition, Errors> txnOffsetCommitResponse = new HashMap<>();
txnOffsetCommitResponse.put(tp1, Errors.NONE);
prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
prepareTxnOffsetCommitResponse(consumerGroupId, producerId, epoch, txnOffsetCommitResponse);
assertNull(transactionManager.coordinator(CoordinatorType.GROUP));
runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
assertTrue(transactionManager.hasPendingOffsetCommits());
runUntil(() -> !transactionManager.hasPendingOffsetCommits());
// We should only be done after both RPCs complete.
assertTrue(addOffsetsResult.isCompleted());
transactionManager.beginCommit();
prepareEndTxnResponse(Errors.NONE, TransactionResult.COMMIT, producerId, epoch);
runUntil(() -> !transactionManager.hasOngoingTransaction());
assertFalse(transactionManager.isCompleting());
assertFalse(transactionManager.transactionContainsPartition(tp0));
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TransactionManagerTest method testFencedInstanceIdInTxnOffsetCommitByGroupMetadata.
@Test
public void testFencedInstanceIdInTxnOffsetCommitByGroupMetadata() {
final TopicPartition tp = new TopicPartition("foo", 0);
final String fencedMemberId = "fenced_member";
doInitTransactions();
transactionManager.beginTransaction();
TransactionalRequestResult sendOffsetsResult = transactionManager.sendOffsetsToTransaction(singletonMap(tp, new OffsetAndMetadata(39L)), new ConsumerGroupMetadata(consumerGroupId, 5, fencedMemberId, Optional.of(groupInstanceId)));
prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.GROUP, consumerGroupId);
runUntil(() -> transactionManager.coordinator(CoordinatorType.GROUP) != null);
client.prepareResponse(request -> {
TxnOffsetCommitRequest txnOffsetCommitRequest = (TxnOffsetCommitRequest) request;
assertEquals(consumerGroupId, txnOffsetCommitRequest.data().groupId());
assertEquals(producerId, txnOffsetCommitRequest.data().producerId());
assertEquals(epoch, txnOffsetCommitRequest.data().producerEpoch());
return txnOffsetCommitRequest.data().groupInstanceId().equals(groupInstanceId) && !txnOffsetCommitRequest.data().memberId().equals(memberId);
}, new TxnOffsetCommitResponse(0, singletonMap(tp, Errors.FENCED_INSTANCE_ID)));
runUntil(transactionManager::hasError);
assertTrue(transactionManager.lastError() instanceof FencedInstanceIdException);
assertTrue(sendOffsetsResult.isCompleted());
assertFalse(sendOffsetsResult.isSuccessful());
assertTrue(sendOffsetsResult.error() instanceof FencedInstanceIdException);
assertAbortableError(FencedInstanceIdException.class);
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TransactionManagerTest method testBumpTransactionalEpochOnRecoverableAddOffsetsRequestError.
@Test
public void testBumpTransactionalEpochOnRecoverableAddOffsetsRequestError() throws InterruptedException {
final short initialEpoch = 1;
final short bumpedEpoch = 2;
doInitTransactions(producerId, initialEpoch);
transactionManager.beginTransaction();
transactionManager.maybeAddPartition(tp0);
Future<RecordMetadata> responseFuture = appendToAccumulator(tp0);
assertFalse(responseFuture.isDone());
prepareAddPartitionsToTxnResponse(Errors.NONE, tp0, initialEpoch, producerId);
prepareProduceResponse(Errors.NONE, producerId, initialEpoch);
runUntil(responseFuture::isDone);
Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
offsets.put(tp0, new OffsetAndMetadata(1));
transactionManager.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(consumerGroupId));
assertFalse(transactionManager.hasPendingOffsetCommits());
prepareAddOffsetsToTxnResponse(Errors.INVALID_PRODUCER_ID_MAPPING, consumerGroupId, producerId, initialEpoch);
// Send AddOffsetsRequest
runUntil(transactionManager::hasAbortableError);
TransactionalRequestResult abortResult = transactionManager.beginAbort();
prepareEndTxnResponse(Errors.NONE, TransactionResult.ABORT, producerId, initialEpoch);
prepareInitPidResponse(Errors.NONE, false, producerId, bumpedEpoch);
runUntil(abortResult::isCompleted);
assertEquals(bumpedEpoch, transactionManager.producerIdAndEpoch().epoch);
assertTrue(abortResult.isSuccessful());
// make sure we are ready for a transaction now.
assertTrue(transactionManager.isReady());
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TransactionManagerTest method testGroupAuthorizationFailureInFindCoordinator.
@Test
public void testGroupAuthorizationFailureInFindCoordinator() {
doInitTransactions();
transactionManager.beginTransaction();
TransactionalRequestResult sendOffsetsResult = transactionManager.sendOffsetsToTransaction(singletonMap(new TopicPartition("foo", 0), new OffsetAndMetadata(39L)), new ConsumerGroupMetadata(consumerGroupId));
prepareAddOffsetsToTxnResponse(Errors.NONE, consumerGroupId, producerId, epoch);
runUntil(() -> !transactionManager.hasPartitionsToAdd());
prepareFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, false, CoordinatorType.GROUP, consumerGroupId);
runUntil(transactionManager::hasError);
assertTrue(transactionManager.lastError() instanceof GroupAuthorizationException);
runUntil(sendOffsetsResult::isCompleted);
assertFalse(sendOffsetsResult.isSuccessful());
assertTrue(sendOffsetsResult.error() instanceof GroupAuthorizationException);
GroupAuthorizationException exception = (GroupAuthorizationException) sendOffsetsResult.error();
assertEquals(consumerGroupId, exception.groupId());
assertAbortableError(GroupAuthorizationException.class);
}
use of org.apache.kafka.clients.consumer.ConsumerGroupMetadata in project kafka by apache.
the class TransactionalMessageCopier method runEventLoop.
public static void runEventLoop(Namespace parsedArgs) {
final String transactionalId = parsedArgs.getString("transactionalId");
final String outputTopic = parsedArgs.getString("outputTopic");
String consumerGroup = parsedArgs.getString("consumerGroup");
final KafkaProducer<String, String> producer = createProducer(parsedArgs);
final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
final AtomicLong remainingMessages = new AtomicLong(parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages"));
boolean groupMode = parsedArgs.getBoolean("groupMode");
String topicName = parsedArgs.getString("inputTopic");
final AtomicLong numMessagesProcessedSinceLastRebalance = new AtomicLong(0);
final AtomicLong totalMessageProcessed = new AtomicLong(0);
if (groupMode) {
consumer.subscribe(Collections.singleton(topicName), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
remainingMessages.set(partitions.stream().mapToLong(partition -> messagesRemaining(consumer, partition)).sum());
numMessagesProcessedSinceLastRebalance.set(0);
// We use message cap for remaining here as the remainingMessages are not set yet.
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "RebalanceComplete"));
}
});
} else {
TopicPartition inputPartition = new TopicPartition(topicName, parsedArgs.getInt("inputPartition"));
consumer.assign(singleton(inputPartition));
remainingMessages.set(Math.min(messagesRemaining(consumer, inputPartition), remainingMessages.get()));
}
final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
producer.initTransactions();
final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
Exit.addShutdownHook("transactional-message-copier-shutdown-hook", () -> {
isShuttingDown.set(true);
consumer.wakeup();
System.out.println(shutDownString(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId));
});
final boolean useGroupMetadata = parsedArgs.getBoolean("useGroupMetadata");
try {
Random random = new Random();
while (!isShuttingDown.get() && remainingMessages.get() > 0) {
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "ProcessLoop"));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(200));
if (records.count() > 0) {
try {
producer.beginTransaction();
for (ConsumerRecord<String, String> record : records) {
producer.send(producerRecordFromConsumerRecord(outputTopic, record));
}
long messagesSentWithinCurrentTxn = records.count();
ConsumerGroupMetadata groupMetadata = useGroupMetadata ? consumer.groupMetadata() : new ConsumerGroupMetadata(consumerGroup);
producer.sendOffsetsToTransaction(consumerPositions(consumer), groupMetadata);
if (enableRandomAborts && random.nextInt() % 3 == 0) {
abortTransactionAndResetPosition(producer, consumer);
} else {
producer.commitTransaction();
remainingMessages.getAndAdd(-messagesSentWithinCurrentTxn);
numMessagesProcessedSinceLastRebalance.getAndAdd(messagesSentWithinCurrentTxn);
totalMessageProcessed.getAndAdd(messagesSentWithinCurrentTxn);
}
} catch (ProducerFencedException e) {
throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId), e);
} catch (KafkaException e) {
log.debug("Aborting transaction after catching exception", e);
abortTransactionAndResetPosition(producer, consumer);
}
}
}
} catch (WakeupException e) {
if (!isShuttingDown.get()) {
// as part of shutdown.
throw e;
}
} finally {
Utils.closeQuietly(producer, "producer");
Utils.closeQuietly(consumer, "consumer");
}
}
Aggregations