use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class AbstractTaskTest method shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException.
@Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException() {
final Consumer consumer = mockConsumer(new KafkaException("blah"));
final AbstractTask task = createTask(consumer, Collections.<StateStore, String>emptyMap());
task.updateOffsetLimits();
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testParseInvalidRecordBatch.
@Test
public void testParseInvalidRecordBatch() throws Exception {
MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, CompressionType.NONE, TimestampType.CREATE_TIME, new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
ByteBuffer buffer = records.buffer();
// flip some bits to fail the crc
buffer.putInt(32, buffer.get(32) ^ 87238423);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
consumerClient.poll(0);
try {
fetcher.fetchedRecords();
fail("fetchedRecords should have raised");
} catch (KafkaException e) {
// the position should not advance since no data has been returned
assertEquals(0, subscriptions.position(tp0).longValue());
}
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class FetcherTest method testInvalidDefaultRecordBatch.
@Test
public void testInvalidDefaultRecordBatch() {
ByteBuffer buffer = ByteBuffer.allocate(1024);
ByteBufferOutputStream out = new ByteBufferOutputStream(buffer);
MemoryRecordsBuilder builder = new MemoryRecordsBuilder(out, DefaultRecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, 10L, 0L, (short) 0, 0, false, false, 0, 1024);
builder.append(10L, "key".getBytes(), "value".getBytes());
builder.close();
buffer.flip();
// Garble the CRC
buffer.position(17);
buffer.put("beef".getBytes());
buffer.position(0);
subscriptions.assignFromUser(singleton(tp0));
subscriptions.seek(tp0, 0);
// normal fetch
assertEquals(1, fetcher.sendFetches());
client.prepareResponse(fullFetchResponse(tp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
consumerClient.poll(0);
// the fetchedRecords() should always throw exception due to the bad batch.
for (int i = 0; i < 2; i++) {
try {
fetcher.fetchedRecords();
fail("fetchedRecords should have raised KafkaException");
} catch (KafkaException e) {
assertEquals(0, subscriptions.position(tp0).longValue());
}
}
}
use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.
the class StreamTask method process.
/**
* Process one record.
*
* @return true if this method processes a record, false if it does not process a record.
* @throws TaskMigratedException if the task producer got fenced (EOS only)
*/
@SuppressWarnings("unchecked")
public boolean process() {
// get the next record to process
final StampedRecord record = partitionGroup.nextRecord(recordInfo);
// if there is no record to process, return immediately
if (record == null) {
return false;
}
try {
// process the record by passing to the source node of the topology
final ProcessorNode currNode = recordInfo.node();
final TopicPartition partition = recordInfo.partition();
log.trace("Start processing one record [{}]", record);
updateProcessorContext(record, currNode);
currNode.process(record.key(), record.value());
log.trace("Completed processing one record [{}]", record);
// update the consumed offset map after processing is done
consumedOffsets.put(partition, record.offset());
commitOffsetNeeded = true;
// decreased to the threshold, we can then resume the consumption on this partition
if (recordInfo.queue().size() == maxBufferedSize) {
consumer.resume(singleton(partition));
}
} catch (final ProducerFencedException fatal) {
throw new TaskMigratedException(this, fatal);
} catch (final KafkaException e) {
throw new StreamsException(format("Exception caught in process. taskId=%s, processor=%s, topic=%s, partition=%d, offset=%d", id(), processorContext.currentNode().name(), record.topic(), record.partition(), record.offset()), e);
} finally {
processorContext.setCurrentNode(null);
}
return true;
}
use of org.apache.kafka.common.KafkaException in project cdap by caskdata.
the class KafkaLogProcessorPipeline method run.
@Override
protected void run() {
runThread = Thread.currentThread();
try {
initializeOffsets();
LOG.info("Kafka offsets initialize for pipeline {} as {}", name, offsets);
Map<Integer, Future<Iterable<MessageAndOffset>>> futures = new HashMap<>();
String topic = config.getTopic();
lastCheckpointTime = System.currentTimeMillis();
while (!stopped) {
boolean hasMessageProcessed = false;
for (Map.Entry<Integer, Future<Iterable<MessageAndOffset>>> entry : fetchAll(offsets, futures).entrySet()) {
int partition = entry.getKey();
try {
if (processMessages(topic, partition, entry.getValue())) {
hasMessageProcessed = true;
}
} catch (IOException | KafkaException e) {
OUTAGE_LOG.warn("Failed to fetch or process messages from {}:{}. Will be retried in next iteration.", topic, partition, e);
}
}
long now = System.currentTimeMillis();
unSyncedEvents += appendEvents(now, false);
long nextCheckpointDelay = trySyncAndPersistCheckpoints(now);
// Sleep until the earliest event in the buffer is time to be written out.
if (!hasMessageProcessed) {
long sleepMillis = config.getEventDelayMillis();
if (!eventQueue.isEmpty()) {
sleepMillis += eventQueue.first().getTimeStamp() - now;
}
sleepMillis = Math.min(sleepMillis, nextCheckpointDelay);
if (sleepMillis > 0) {
TimeUnit.MILLISECONDS.sleep(sleepMillis);
}
}
}
} catch (InterruptedException e) {
// Interruption means stopping the service.
}
}
Aggregations