use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.
the class MockProducer method send.
/**
* Adds the record to the list of sent records.
*
* @see #history()
*/
@Override
public synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback) {
if (this.closed) {
throw new IllegalStateException("MockProducer is already closed.");
}
if (this.producerFenced) {
throw new KafkaException("MockProducer is fenced.", new ProducerFencedException("Fenced"));
}
if (this.sendException != null) {
throw this.sendException;
}
int partition = 0;
if (!this.cluster.partitionsForTopic(record.topic()).isEmpty())
partition = partition(record, this.cluster);
else {
// just to throw ClassCastException if serializers are not the proper ones to serialize key/value
keySerializer.serialize(record.topic(), record.key());
valueSerializer.serialize(record.topic(), record.value());
}
TopicPartition topicPartition = new TopicPartition(record.topic(), partition);
ProduceRequestResult result = new ProduceRequestResult(topicPartition);
FutureRecordMetadata future = new FutureRecordMetadata(result, 0, RecordBatch.NO_TIMESTAMP, 0, 0, Time.SYSTEM);
long offset = nextOffset(topicPartition);
long baseOffset = Math.max(0, offset - Integer.MAX_VALUE);
int batchIndex = (int) Math.min(Integer.MAX_VALUE, offset);
Completion completion = new Completion(offset, new RecordMetadata(topicPartition, baseOffset, batchIndex, RecordBatch.NO_TIMESTAMP, 0, 0), result, callback, topicPartition);
if (!this.transactionInFlight)
this.sent.add(record);
else
this.uncommittedSends.add(record);
if (autoComplete)
completion.complete(null);
else
this.completions.addLast(completion);
return future;
}
use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.
the class TransactionalMessageCopier method runEventLoop.
public static void runEventLoop(Namespace parsedArgs) {
final String transactionalId = parsedArgs.getString("transactionalId");
final String outputTopic = parsedArgs.getString("outputTopic");
String consumerGroup = parsedArgs.getString("consumerGroup");
final KafkaProducer<String, String> producer = createProducer(parsedArgs);
final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
final AtomicLong remainingMessages = new AtomicLong(parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages"));
boolean groupMode = parsedArgs.getBoolean("groupMode");
String topicName = parsedArgs.getString("inputTopic");
final AtomicLong numMessagesProcessedSinceLastRebalance = new AtomicLong(0);
final AtomicLong totalMessageProcessed = new AtomicLong(0);
if (groupMode) {
consumer.subscribe(Collections.singleton(topicName), new ConsumerRebalanceListener() {
@Override
public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
}
@Override
public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
remainingMessages.set(partitions.stream().mapToLong(partition -> messagesRemaining(consumer, partition)).sum());
numMessagesProcessedSinceLastRebalance.set(0);
// We use message cap for remaining here as the remainingMessages are not set yet.
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "RebalanceComplete"));
}
});
} else {
TopicPartition inputPartition = new TopicPartition(topicName, parsedArgs.getInt("inputPartition"));
consumer.assign(singleton(inputPartition));
remainingMessages.set(Math.min(messagesRemaining(consumer, inputPartition), remainingMessages.get()));
}
final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
producer.initTransactions();
final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
Exit.addShutdownHook("transactional-message-copier-shutdown-hook", () -> {
isShuttingDown.set(true);
consumer.wakeup();
System.out.println(shutDownString(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId));
});
final boolean useGroupMetadata = parsedArgs.getBoolean("useGroupMetadata");
try {
Random random = new Random();
while (!isShuttingDown.get() && remainingMessages.get() > 0) {
System.out.println(statusAsJson(totalMessageProcessed.get(), numMessagesProcessedSinceLastRebalance.get(), remainingMessages.get(), transactionalId, "ProcessLoop"));
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(200));
if (records.count() > 0) {
try {
producer.beginTransaction();
for (ConsumerRecord<String, String> record : records) {
producer.send(producerRecordFromConsumerRecord(outputTopic, record));
}
long messagesSentWithinCurrentTxn = records.count();
ConsumerGroupMetadata groupMetadata = useGroupMetadata ? consumer.groupMetadata() : new ConsumerGroupMetadata(consumerGroup);
producer.sendOffsetsToTransaction(consumerPositions(consumer), groupMetadata);
if (enableRandomAborts && random.nextInt() % 3 == 0) {
abortTransactionAndResetPosition(producer, consumer);
} else {
producer.commitTransaction();
remainingMessages.getAndAdd(-messagesSentWithinCurrentTxn);
numMessagesProcessedSinceLastRebalance.getAndAdd(messagesSentWithinCurrentTxn);
totalMessageProcessed.getAndAdd(messagesSentWithinCurrentTxn);
}
} catch (ProducerFencedException e) {
throw new KafkaException(String.format("The transactional.id %s has been claimed by another process", transactionalId), e);
} catch (KafkaException e) {
log.debug("Aborting transaction after catching exception", e);
abortTransactionAndResetPosition(producer, consumer);
}
}
}
} catch (WakeupException e) {
if (!isShuttingDown.get()) {
// as part of shutdown.
throw e;
}
} finally {
Utils.closeQuietly(producer, "producer");
Utils.closeQuietly(consumer, "consumer");
}
}
use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.
the class StreamThreadTest method shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenSuspendingTasks.
@Test
public void shouldNotCloseTaskAndRemoveFromTaskManagerIfProducerGotFencedInCommitTransactionWhenSuspendingTasks() {
final StreamThread thread = createStreamThread(CLIENT_ID, new StreamsConfig(configProps(true)), true);
internalTopologyBuilder.addSource(null, "name", null, null, null, topic1);
internalTopologyBuilder.addSink("out", "output", null, null, null, "name");
thread.setState(StreamThread.State.STARTING);
thread.rebalanceListener().onPartitionsRevoked(Collections.emptySet());
final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>();
final List<TopicPartition> assignedPartitions = new ArrayList<>();
// assign single partition
assignedPartitions.add(t1p1);
activeTasks.put(task1, Collections.singleton(t1p1));
thread.taskManager().handleAssignment(activeTasks, emptyMap());
final MockConsumer<byte[], byte[]> mockConsumer = (MockConsumer<byte[], byte[]>) thread.mainConsumer();
mockConsumer.assign(assignedPartitions);
mockConsumer.updateBeginningOffsets(Collections.singletonMap(t1p1, 0L));
thread.rebalanceListener().onPartitionsAssigned(assignedPartitions);
thread.runOnce();
assertThat(thread.activeTasks().size(), equalTo(1));
// need to process a record to enable committing
addRecord(mockConsumer, 0L);
thread.runOnce();
clientSupplier.producers.get(0).commitTransactionException = new ProducerFencedException("Producer is fenced");
assertThrows(TaskMigratedException.class, () -> thread.rebalanceListener().onPartitionsRevoked(assignedPartitions));
assertFalse(clientSupplier.producers.get(0).transactionCommitted());
assertFalse(clientSupplier.producers.get(0).closed());
assertEquals(1, thread.activeTasks().size());
}
use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.
the class RecordCollectorImpl method recordSendError.
private void recordSendError(final String topic, final Exception exception, final ProducerRecord<byte[], byte[]> serializedRecord) {
String errorMessage = String.format(SEND_EXCEPTION_MESSAGE, topic, taskId, exception.toString());
if (isFatalException(exception)) {
errorMessage += "\nWritten offsets would not be recorded and no more records would be sent since this is a fatal error.";
sendException.set(new StreamsException(errorMessage, exception));
} else if (exception instanceof ProducerFencedException || exception instanceof InvalidProducerEpochException || exception instanceof OutOfOrderSequenceException) {
errorMessage += "\nWritten offsets would not be recorded and no more records would be sent since the producer is fenced, " + "indicating the task may be migrated out";
sendException.set(new TaskMigratedException(errorMessage, exception));
} else {
if (exception instanceof RetriableException) {
errorMessage += "\nThe broker is either slow or in bad state (like not having enough replicas) in responding the request, " + "or the connection to broker was interrupted sending the request or receiving the response. " + "\nConsider overwriting `max.block.ms` and /or " + "`delivery.timeout.ms` to a larger value to wait longer for such scenarios and avoid timeout errors";
sendException.set(new TaskCorruptedException(Collections.singleton(taskId)));
} else {
if (productionExceptionHandler.handle(serializedRecord, exception) == ProductionExceptionHandlerResponse.FAIL) {
errorMessage += "\nException handler choose to FAIL the processing, no more records would be sent.";
sendException.set(new StreamsException(errorMessage, exception));
} else {
errorMessage += "\nException handler choose to CONTINUE processing in spite of this error but written offsets would not be recorded.";
droppedRecordsSensor.record();
}
}
}
log.error(errorMessage, exception);
}
use of org.apache.kafka.common.errors.ProducerFencedException in project kafka by apache.
the class StreamsProducer method maybeBeginTransaction.
private void maybeBeginTransaction() {
if (eosEnabled() && !transactionInFlight) {
try {
producer.beginTransaction();
transactionInFlight = true;
} catch (final ProducerFencedException | InvalidProducerEpochException error) {
throw new TaskMigratedException(formatException("Producer got fenced trying to begin a new transaction"), error);
} catch (final KafkaException error) {
throw new StreamsException(formatException("Error encountered trying to begin a new transaction"), error);
}
}
}
Aggregations