Search in sources :

Example 31 with KafkaException

use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.

the class AbstractTaskTest method shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException.

@Test(expected = ProcessorStateException.class)
public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException() {
    final Consumer consumer = mockConsumer(new KafkaException("blah"));
    final AbstractTask task = createTask(consumer, Collections.<StateStore, String>emptyMap());
    task.updateOffsetLimits();
}
Also used : MockConsumer(org.apache.kafka.clients.consumer.MockConsumer) Consumer(org.apache.kafka.clients.consumer.Consumer) KafkaException(org.apache.kafka.common.KafkaException) Test(org.junit.Test)

Example 32 with KafkaException

use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testParseInvalidRecordBatch.

@Test
public void testParseInvalidRecordBatch() throws Exception {
    MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, CompressionType.NONE, TimestampType.CREATE_TIME, new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes()));
    ByteBuffer buffer = records.buffer();
    // flip some bits to fail the crc
    buffer.putInt(32, buffer.get(32) ^ 87238423);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
    consumerClient.poll(0);
    try {
        fetcher.fetchedRecords();
        fail("fetchedRecords should have raised");
    } catch (KafkaException e) {
        // the position should not advance since no data has been returned
        assertEquals(0, subscriptions.position(tp0).longValue());
    }
}
Also used : SimpleRecord(org.apache.kafka.common.record.SimpleRecord) KafkaException(org.apache.kafka.common.KafkaException) ByteBuffer(java.nio.ByteBuffer) MemoryRecords(org.apache.kafka.common.record.MemoryRecords) Test(org.junit.Test)

Example 33 with KafkaException

use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.

the class FetcherTest method testInvalidDefaultRecordBatch.

@Test
public void testInvalidDefaultRecordBatch() {
    ByteBuffer buffer = ByteBuffer.allocate(1024);
    ByteBufferOutputStream out = new ByteBufferOutputStream(buffer);
    MemoryRecordsBuilder builder = new MemoryRecordsBuilder(out, DefaultRecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, 10L, 0L, (short) 0, 0, false, false, 0, 1024);
    builder.append(10L, "key".getBytes(), "value".getBytes());
    builder.close();
    buffer.flip();
    // Garble the CRC
    buffer.position(17);
    buffer.put("beef".getBytes());
    buffer.position(0);
    subscriptions.assignFromUser(singleton(tp0));
    subscriptions.seek(tp0, 0);
    // normal fetch
    assertEquals(1, fetcher.sendFetches());
    client.prepareResponse(fullFetchResponse(tp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0));
    consumerClient.poll(0);
    // the fetchedRecords() should always throw exception due to the bad batch.
    for (int i = 0; i < 2; i++) {
        try {
            fetcher.fetchedRecords();
            fail("fetchedRecords should have raised KafkaException");
        } catch (KafkaException e) {
            assertEquals(0, subscriptions.position(tp0).longValue());
        }
    }
}
Also used : ByteBufferOutputStream(org.apache.kafka.common.utils.ByteBufferOutputStream) MemoryRecordsBuilder(org.apache.kafka.common.record.MemoryRecordsBuilder) KafkaException(org.apache.kafka.common.KafkaException) ByteBuffer(java.nio.ByteBuffer) Test(org.junit.Test)

Example 34 with KafkaException

use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.

the class StreamTask method process.

/**
 * Process one record.
 *
 * @return true if this method processes a record, false if it does not process a record.
 * @throws TaskMigratedException if the task producer got fenced (EOS only)
 */
@SuppressWarnings("unchecked")
public boolean process() {
    // get the next record to process
    final StampedRecord record = partitionGroup.nextRecord(recordInfo);
    // if there is no record to process, return immediately
    if (record == null) {
        return false;
    }
    try {
        // process the record by passing to the source node of the topology
        final ProcessorNode currNode = recordInfo.node();
        final TopicPartition partition = recordInfo.partition();
        log.trace("Start processing one record [{}]", record);
        updateProcessorContext(record, currNode);
        currNode.process(record.key(), record.value());
        log.trace("Completed processing one record [{}]", record);
        // update the consumed offset map after processing is done
        consumedOffsets.put(partition, record.offset());
        commitOffsetNeeded = true;
        // decreased to the threshold, we can then resume the consumption on this partition
        if (recordInfo.queue().size() == maxBufferedSize) {
            consumer.resume(singleton(partition));
        }
    } catch (final ProducerFencedException fatal) {
        throw new TaskMigratedException(this, fatal);
    } catch (final KafkaException e) {
        throw new StreamsException(format("Exception caught in process. taskId=%s, processor=%s, topic=%s, partition=%d, offset=%d", id(), processorContext.currentNode().name(), record.topic(), record.partition(), record.offset()), e);
    } finally {
        processorContext.setCurrentNode(null);
    }
    return true;
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) StreamsException(org.apache.kafka.streams.errors.StreamsException) KafkaException(org.apache.kafka.common.KafkaException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) TaskMigratedException(org.apache.kafka.streams.errors.TaskMigratedException)

Example 35 with KafkaException

use of org.apache.kafka.common.KafkaException in project cdap by caskdata.

the class KafkaLogProcessorPipeline method run.

@Override
protected void run() {
    runThread = Thread.currentThread();
    try {
        initializeOffsets();
        LOG.info("Kafka offsets initialize for pipeline {} as {}", name, offsets);
        Map<Integer, Future<Iterable<MessageAndOffset>>> futures = new HashMap<>();
        String topic = config.getTopic();
        lastCheckpointTime = System.currentTimeMillis();
        while (!stopped) {
            boolean hasMessageProcessed = false;
            for (Map.Entry<Integer, Future<Iterable<MessageAndOffset>>> entry : fetchAll(offsets, futures).entrySet()) {
                int partition = entry.getKey();
                try {
                    if (processMessages(topic, partition, entry.getValue())) {
                        hasMessageProcessed = true;
                    }
                } catch (IOException | KafkaException e) {
                    OUTAGE_LOG.warn("Failed to fetch or process messages from {}:{}. Will be retried in next iteration.", topic, partition, e);
                }
            }
            long now = System.currentTimeMillis();
            unSyncedEvents += appendEvents(now, false);
            long nextCheckpointDelay = trySyncAndPersistCheckpoints(now);
            // Sleep until the earliest event in the buffer is time to be written out.
            if (!hasMessageProcessed) {
                long sleepMillis = config.getEventDelayMillis();
                if (!eventQueue.isEmpty()) {
                    sleepMillis += eventQueue.first().getTimeStamp() - now;
                }
                sleepMillis = Math.min(sleepMillis, nextCheckpointDelay);
                if (sleepMillis > 0) {
                    TimeUnit.MILLISECONDS.sleep(sleepMillis);
                }
            }
        }
    } catch (InterruptedException e) {
    // Interruption means stopping the service.
    }
}
Also used : HashMap(java.util.HashMap) Int2LongOpenHashMap(it.unimi.dsi.fastutil.ints.Int2LongOpenHashMap) Int2ObjectOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap) MessageAndOffset(kafka.message.MessageAndOffset) IOException(java.io.IOException) Checkpoint(co.cask.cdap.logging.meta.Checkpoint) Future(java.util.concurrent.Future) KafkaException(org.apache.kafka.common.KafkaException) HashMap(java.util.HashMap) Int2LongMap(it.unimi.dsi.fastutil.ints.Int2LongMap) Map(java.util.Map) Int2LongOpenHashMap(it.unimi.dsi.fastutil.ints.Int2LongOpenHashMap) Int2ObjectOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap) Int2ObjectMap(it.unimi.dsi.fastutil.ints.Int2ObjectMap)

Aggregations

KafkaException (org.apache.kafka.common.KafkaException)262 Test (org.junit.Test)69 TopicPartition (org.apache.kafka.common.TopicPartition)56 Test (org.junit.jupiter.api.Test)47 HashMap (java.util.HashMap)40 IOException (java.io.IOException)39 StreamsException (org.apache.kafka.streams.errors.StreamsException)34 Map (java.util.Map)32 TimeoutException (org.apache.kafka.common.errors.TimeoutException)28 ArrayList (java.util.ArrayList)27 List (java.util.List)21 ByteBuffer (java.nio.ByteBuffer)19 ExecutionException (java.util.concurrent.ExecutionException)19 ConfigException (org.apache.kafka.common.config.ConfigException)16 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)14 HashSet (java.util.HashSet)13 Properties (java.util.Properties)13 Set (java.util.Set)11 Collectors (java.util.stream.Collectors)11 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)11