use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class RecordAccumulatorTest method testAbortUnsentBatches.
@Test
public void testAbortUnsentBatches() throws Exception {
long lingerMs = Long.MAX_VALUE;
int numRecords = 100;
final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0);
final RecordAccumulator accum = createTestRecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs);
final KafkaException cause = new KafkaException();
class TestCallback implements Callback {
@Override
public void onCompletion(RecordMetadata metadata, Exception exception) {
assertEquals(cause, exception);
numExceptionReceivedInCallback.incrementAndGet();
}
}
for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs);
RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds());
assertFalse(result.readyNodes.isEmpty());
Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds());
assertTrue(accum.hasUndrained());
assertTrue(accum.hasIncomplete());
accum.abortUndrainedBatches(cause);
int numDrainedRecords = 0;
for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) {
for (ProducerBatch batch : drainedEntry.getValue()) {
assertTrue(batch.isClosed());
assertFalse(batch.produceFuture.completed());
numDrainedRecords += batch.recordCount;
}
}
assertTrue(numDrainedRecords > 0);
assertTrue(numExceptionReceivedInCallback.get() > 0);
assertEquals(numRecords, numExceptionReceivedInCallback.get() + numDrainedRecords);
assertFalse(accum.hasUndrained());
assertTrue(accum.hasIncomplete());
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class TransactionManagerTest method testMaybeAddPartitionToTransactionAfterFatalError.
@Test(expected = KafkaException.class)
public void testMaybeAddPartitionToTransactionAfterFatalError() {
long pid = 13131L;
short epoch = 1;
doInitTransactions(pid, epoch);
transactionManager.transitionToFatalError(new KafkaException());
transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0));
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class TransactionalMessageCopier method main.
public static void main(String[] args) throws IOException {
Namespace parsedArgs = argParser().parseArgsOrFail(args);
Integer numMessagesPerTransaction = parsedArgs.getInt("messagesPerTransaction");
final String transactionalId = parsedArgs.getString("transactionalId");
final String outputTopic = parsedArgs.getString("outputTopic");
String consumerGroup = parsedArgs.getString("consumerGroup");
TopicPartition inputPartition = new TopicPartition(parsedArgs.getString("inputTopic"), parsedArgs.getInt("inputPartition"));
final KafkaProducer<String, String> producer = createProducer(parsedArgs);
final KafkaConsumer<String, String> consumer = createConsumer(parsedArgs);
consumer.assign(singleton(inputPartition));
long maxMessages = parsedArgs.getInt("maxMessages") == -1 ? Long.MAX_VALUE : parsedArgs.getInt("maxMessages");
maxMessages = Math.min(messagesRemaining(consumer, inputPartition), maxMessages);
final boolean enableRandomAborts = parsedArgs.getBoolean("enableRandomAborts");
producer.initTransactions();
final AtomicBoolean isShuttingDown = new AtomicBoolean(false);
final AtomicLong remainingMessages = new AtomicLong(maxMessages);
final AtomicLong numMessagesProcessed = new AtomicLong(0);
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
isShuttingDown.set(true);
// Flush any remaining messages
producer.close();
synchronized (consumer) {
consumer.close();
}
System.out.println(shutDownString(numMessagesProcessed.get(), remainingMessages.get(), transactionalId));
}
});
try {
Random random = new Random();
while (0 < remainingMessages.get()) {
System.out.println(statusAsJson(numMessagesProcessed.get(), remainingMessages.get(), transactionalId));
if (isShuttingDown.get())
break;
int messagesInCurrentTransaction = 0;
long numMessagesForNextTransaction = Math.min(numMessagesPerTransaction, remainingMessages.get());
try {
producer.beginTransaction();
while (messagesInCurrentTransaction < numMessagesForNextTransaction) {
ConsumerRecords<String, String> records = consumer.poll(200L);
for (ConsumerRecord<String, String> record : records) {
producer.send(producerRecordFromConsumerRecord(outputTopic, record));
messagesInCurrentTransaction++;
}
}
producer.sendOffsetsToTransaction(consumerPositions(consumer), consumerGroup);
if (enableRandomAborts && random.nextInt() % 3 == 0) {
throw new KafkaException("Aborting transaction");
} else {
producer.commitTransaction();
remainingMessages.set(maxMessages - numMessagesProcessed.addAndGet(messagesInCurrentTransaction));
}
} catch (ProducerFencedException | OutOfOrderSequenceException e) {
// We cannot recover from these errors, so just rethrow them and let the process fail
throw e;
} catch (KafkaException e) {
producer.abortTransaction();
resetToLastCommittedPositions(consumer);
}
}
} finally {
producer.close();
synchronized (consumer) {
consumer.close();
}
}
System.exit(0);
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class WorkerSourceTask method sendRecords.
/**
* Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can
* be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException.
* @return true if all messages were sent, false if some need to be retried
*/
private boolean sendRecords() {
int processed = 0;
recordBatch(toSend.size());
final SourceRecordWriteCounter counter = new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup);
for (final SourceRecord preTransformRecord : toSend) {
final SourceRecord record = transformationChain.apply(preTransformRecord);
if (record == null) {
counter.skipRecord();
commitTaskRecord(preTransformRecord);
continue;
}
RecordHeaders headers = convertHeaderFor(record);
byte[] key = keyConverter.fromConnectData(record.topic(), record.keySchema(), record.key());
byte[] value = valueConverter.fromConnectData(record.topic(), record.valueSchema(), record.value());
final ProducerRecord<byte[], byte[]> producerRecord = new ProducerRecord<>(record.topic(), record.kafkaPartition(), ConnectUtils.checkAndConvertTimestamp(record.timestamp()), key, value, headers);
log.trace("{} Appending record with key {}, value {}", this, record.key(), record.value());
// messages and update the offsets.
synchronized (this) {
if (!lastSendFailed) {
if (!flushing) {
outstandingMessages.put(producerRecord, producerRecord);
} else {
outstandingMessagesBacklog.put(producerRecord, producerRecord);
}
// Offsets are converted & serialized in the OffsetWriter
offsetWriter.offset(record.sourcePartition(), record.sourceOffset());
}
}
try {
final String topic = producerRecord.topic();
producer.send(producerRecord, new Callback() {
@Override
public void onCompletion(RecordMetadata recordMetadata, Exception e) {
if (e != null) {
// Given the default settings for zero data loss, this should basically never happen --
// between "infinite" retries, indefinite blocking on full buffers, and "infinite" request
// timeouts, callbacks with exceptions should never be invoked in practice. If the
// user overrode these settings, the best we can do is notify them of the failure via
// logging.
log.error("{} failed to send record to {}: {}", this, topic, e);
log.debug("{} Failed record: {}", this, preTransformRecord);
} else {
log.trace("{} Wrote record successfully: topic {} partition {} offset {}", this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset());
commitTaskRecord(preTransformRecord);
}
recordSent(producerRecord);
counter.completeRecord();
}
});
lastSendFailed = false;
} catch (RetriableException e) {
log.warn("{} Failed to send {}, backing off before retrying:", this, producerRecord, e);
toSend = toSend.subList(processed, toSend.size());
lastSendFailed = true;
counter.retryRemaining();
return false;
} catch (KafkaException e) {
throw new ConnectException("Unrecoverable exception trying to send", e);
}
processed++;
}
toSend = null;
return true;
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class KafkaBasedLog method stop.
public void stop() {
log.info("Stopping KafkaBasedLog for topic " + topic);
synchronized (this) {
stopRequested = true;
}
consumer.wakeup();
try {
thread.join();
} catch (InterruptedException e) {
throw new ConnectException("Failed to stop KafkaBasedLog. Exiting without cleanly shutting " + "down it's producer and consumer.", e);
}
try {
producer.close();
} catch (KafkaException e) {
log.error("Failed to stop KafkaBasedLog producer", e);
}
try {
consumer.close();
} catch (KafkaException e) {
log.error("Failed to stop KafkaBasedLog consumer", e);
}
log.info("Stopped KafkaBasedLog for topic " + topic);
}
Aggregations