Search in sources :

Example 36 with KafkaException

use of org.apache.kafka.common.KafkaException in project nakadi by zalando.

the class NakadiKafkaConsumerTest method whenReadEventsThenNakadiException.

@Test
@SuppressWarnings("unchecked")
public void whenReadEventsThenNakadiException() {
    // ARRANGE //
    final ImmutableList<RuntimeException> exceptions = ImmutableList.of(new NoOffsetForPartitionException(new TopicPartition("", 0)), new KafkaException());
    int numberOfNakadiExceptions = 0;
    for (final Exception exception : exceptions) {
        final KafkaConsumer<byte[], byte[]> kafkaConsumerMock = mock(KafkaConsumer.class);
        when(kafkaConsumerMock.poll(POLL_TIMEOUT)).thenThrow(exception);
        try {
            // ACT //
            final NakadiKafkaConsumer consumer = new NakadiKafkaConsumer(kafkaConsumerMock, ImmutableList.of(), createTpTimelineMap(), POLL_TIMEOUT);
            consumer.readEvents();
            // ASSERT //
            fail("An Exception was expected to be be thrown");
        } catch (final Exception e) {
            numberOfNakadiExceptions++;
        }
    }
    assertThat("We should get a NakadiException for every call", numberOfNakadiExceptions, equalTo(exceptions.size()));
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) NoOffsetForPartitionException(org.apache.kafka.clients.consumer.NoOffsetForPartitionException) KafkaException(org.apache.kafka.common.KafkaException) NoOffsetForPartitionException(org.apache.kafka.clients.consumer.NoOffsetForPartitionException) Test(org.junit.Test)

Example 37 with KafkaException

use of org.apache.kafka.common.KafkaException in project nakadi by zalando.

the class KafkaTopicRepositoryTest method createKafkaFactory.

@SuppressWarnings("unchecked")
private KafkaFactory createKafkaFactory() {
    // Consumer
    final Consumer consumer = mock(Consumer.class);
    allTopics().forEach(topic -> when(consumer.partitionsFor(topic)).thenReturn(partitionsOfTopic(topic)));
    doAnswer(invocation -> {
        offsetMode = ConsumerOffsetMode.EARLIEST;
        return null;
    }).when(consumer).seekToBeginning(anyVararg());
    doAnswer(invocation -> {
        offsetMode = ConsumerOffsetMode.LATEST;
        return null;
    }).when(consumer).seekToEnd(anyVararg());
    when(consumer.position(any())).thenAnswer(invocation -> {
        final org.apache.kafka.common.TopicPartition tp = (org.apache.kafka.common.TopicPartition) invocation.getArguments()[0];
        return PARTITIONS.stream().filter(ps -> ps.topic.equals(tp.topic()) && ps.partition == tp.partition()).findFirst().map(ps -> offsetMode == ConsumerOffsetMode.LATEST ? ps.latestOffset : ps.earliestOffset).orElseThrow(KafkaException::new);
    });
    // KafkaProducer
    when(kafkaProducer.send(EXPECTED_PRODUCER_RECORD)).thenReturn(mock(Future.class));
    // KafkaFactory
    final KafkaFactory kafkaFactory = mock(KafkaFactory.class);
    when(kafkaFactory.getConsumer(KAFKA_CLIENT_ID)).thenReturn(consumer);
    when(kafkaFactory.getConsumer()).thenReturn(consumer);
    when(kafkaFactory.takeProducer()).thenReturn(kafkaProducer);
    return kafkaFactory;
}
Also used : EventPublishingException(org.zalando.nakadi.exceptions.EventPublishingException) GetChildrenBuilder(org.apache.curator.framework.api.GetChildrenBuilder) KafkaException(org.apache.kafka.common.KafkaException) NakadiException(org.zalando.nakadi.exceptions.NakadiException) Assert.assertThat(org.junit.Assert.assertThat) Future(java.util.concurrent.Future) Arrays.asList(java.util.Arrays.asList) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Sets.newHashSet(com.google.common.collect.Sets.newHashSet) Assert.fail(org.junit.Assert.fail) Matchers.anyVararg(org.mockito.Matchers.anyVararg) Consumer(org.apache.kafka.clients.consumer.Consumer) ZooKeeperHolder(org.zalando.nakadi.repository.zookeeper.ZooKeeperHolder) PartitionStatistics(org.zalando.nakadi.domain.PartitionStatistics) Set(java.util.Set) PartitionInfo(org.apache.kafka.common.PartitionInfo) InvalidCursorException(org.zalando.nakadi.exceptions.InvalidCursorException) Collectors(java.util.stream.Collectors) Matchers.any(org.mockito.Matchers.any) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) TestUtils.buildTimelineWithTopic(org.zalando.nakadi.utils.TestUtils.buildTimelineWithTopic) Timeline(org.zalando.nakadi.domain.Timeline) ZookeeperSettings(org.zalando.nakadi.repository.zookeeper.ZookeeperSettings) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) Matchers.equalTo(org.hamcrest.Matchers.equalTo) BatchItem(org.zalando.nakadi.domain.BatchItem) Node(org.apache.kafka.common.Node) Matchers.is(org.hamcrest.Matchers.is) Callback(org.apache.kafka.clients.producer.Callback) Mockito.mock(org.mockito.Mockito.mock) ProducerRecord(org.apache.kafka.clients.producer.ProducerRecord) NakadiCursor(org.zalando.nakadi.domain.NakadiCursor) NakadiSettings(org.zalando.nakadi.config.NakadiSettings) Cursor(org.zalando.nakadi.view.Cursor) Matchers.anyString(org.mockito.Matchers.anyString) ArrayList(java.util.ArrayList) UUIDGenerator(org.zalando.nakadi.util.UUIDGenerator) HashSet(java.util.HashSet) KafkaProducer(org.apache.kafka.clients.producer.KafkaProducer) ImmutableList(com.google.common.collect.ImmutableList) LinkedList(java.util.LinkedList) TimeoutException(org.apache.kafka.common.errors.TimeoutException) PartitionEndStatistics(org.zalando.nakadi.domain.PartitionEndStatistics) Test(org.junit.Test) BufferExhaustedException(org.apache.kafka.clients.producer.BufferExhaustedException) Mockito.when(org.mockito.Mockito.when) Mockito(org.mockito.Mockito) Collectors.toList(java.util.stream.Collectors.toList) EventPublishingStatus(org.zalando.nakadi.domain.EventPublishingStatus) CursorError(org.zalando.nakadi.domain.CursorError) Assert(org.junit.Assert) Collections(java.util.Collections) Consumer(org.apache.kafka.clients.consumer.Consumer) Future(java.util.concurrent.Future) KafkaException(org.apache.kafka.common.KafkaException)

Example 38 with KafkaException

use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.

the class KafkaProducer method waitOnMetadata.

/**
 * Wait for cluster metadata including partitions for the given topic to be available.
 * @param topic The topic we want metadata for
 * @param partition A specific partition expected to exist in metadata, or null if there's no preference
 * @param maxWaitMs The maximum time in ms for waiting on the metadata
 * @return The cluster containing topic metadata and the amount of time we waited in ms
 */
private ClusterAndWaitTime waitOnMetadata(String topic, Integer partition, long maxWaitMs) throws InterruptedException {
    // add topic to metadata topic list if it is not there already and reset expiry
    metadata.add(topic);
    Cluster cluster = metadata.fetch();
    Integer partitionsCount = cluster.partitionCountForTopic(topic);
    // or within the known partition range
    if (partitionsCount != null && (partition == null || partition < partitionsCount))
        return new ClusterAndWaitTime(cluster, 0);
    long begin = time.milliseconds();
    long remainingWaitMs = maxWaitMs;
    long elapsed;
    // is stale and the number of partitions for this topic has increased in the meantime.
    do {
        log.trace("Requesting metadata update for topic {}.", topic);
        metadata.add(topic);
        int version = metadata.requestUpdate();
        sender.wakeup();
        try {
            metadata.awaitUpdate(version, remainingWaitMs);
        } catch (TimeoutException ex) {
            // Rethrow with original maxWaitMs to prevent logging exception with remainingWaitMs
            throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms.");
        }
        cluster = metadata.fetch();
        elapsed = time.milliseconds() - begin;
        if (elapsed >= maxWaitMs)
            throw new TimeoutException("Failed to update metadata after " + maxWaitMs + " ms.");
        if (cluster.unauthorizedTopics().contains(topic))
            throw new TopicAuthorizationException(topic);
        remainingWaitMs = maxWaitMs - elapsed;
        partitionsCount = cluster.partitionCountForTopic(topic);
    } while (partitionsCount == null);
    if (partition != null && partition >= partitionsCount) {
        throw new KafkaException(String.format("Invalid partition given with record: %d is not in the range [0...%d).", partition, partitionsCount));
    }
    return new ClusterAndWaitTime(cluster, elapsed);
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Cluster(org.apache.kafka.common.Cluster) KafkaException(org.apache.kafka.common.KafkaException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) TimeoutException(org.apache.kafka.common.errors.TimeoutException)

Example 39 with KafkaException

use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.

the class KafkaProducer method doSend.

/**
 * Implementation of asynchronously send a record to a topic.
 */
private Future<RecordMetadata> doSend(ProducerRecord<K, V> record, Callback callback) {
    TopicPartition tp = null;
    try {
        // first make sure the metadata for the topic is available
        ClusterAndWaitTime clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), maxBlockTimeMs);
        long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs);
        Cluster cluster = clusterAndWaitTime.cluster;
        byte[] serializedKey;
        try {
            serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key());
        } catch (ClassCastException cce) {
            throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + " specified in key.serializer", cce);
        }
        byte[] serializedValue;
        try {
            serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value());
        } catch (ClassCastException cce) {
            throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + " specified in value.serializer", cce);
        }
        int partition = partition(record, serializedKey, serializedValue, cluster);
        tp = new TopicPartition(record.topic(), partition);
        setReadOnly(record.headers());
        Header[] headers = record.headers().toArray();
        int serializedSize = AbstractRecords.estimateSizeInBytesUpperBound(apiVersions.maxUsableProduceMagic(), compressionType, serializedKey, serializedValue, headers);
        ensureValidRecordSize(serializedSize);
        long timestamp = record.timestamp() == null ? time.milliseconds() : record.timestamp();
        log.trace("Sending record {} with callback {} to topic {} partition {}", record, callback, record.topic(), partition);
        // producer callback will make sure to call both 'callback' and interceptor callback
        Callback interceptCallback = new InterceptorCallback<>(callback, this.interceptors, tp);
        if (transactionManager != null && transactionManager.isTransactional())
            transactionManager.maybeAddPartitionToTransaction(tp);
        RecordAccumulator.RecordAppendResult result = accumulator.append(tp, timestamp, serializedKey, serializedValue, headers, interceptCallback, remainingWaitMs);
        if (result.batchIsFull || result.newBatchCreated) {
            log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", record.topic(), partition);
            this.sender.wakeup();
        }
        return result.future;
    // handling exceptions and record the errors;
    // for API exceptions return them in the future,
    // for other exceptions throw directly
    } catch (ApiException e) {
        log.debug("Exception occurred during message send:", e);
        if (callback != null)
            callback.onCompletion(null, e);
        this.errors.record();
        this.interceptors.onSendError(record, tp, e);
        return new FutureFailure(e);
    } catch (InterruptedException e) {
        this.errors.record();
        this.interceptors.onSendError(record, tp, e);
        throw new InterruptException(e);
    } catch (BufferExhaustedException e) {
        this.errors.record();
        this.metrics.sensor("buffer-exhausted-records").record();
        this.interceptors.onSendError(record, tp, e);
        throw e;
    } catch (KafkaException e) {
        this.errors.record();
        this.interceptors.onSendError(record, tp, e);
        throw e;
    } catch (Exception e) {
        // we notify interceptor about all exceptions, since onSend is called before anything else in this method
        this.interceptors.onSendError(record, tp, e);
        throw e;
    }
}
Also used : SerializationException(org.apache.kafka.common.errors.SerializationException) InterruptException(org.apache.kafka.common.errors.InterruptException) Cluster(org.apache.kafka.common.Cluster) RecordAccumulator(org.apache.kafka.clients.producer.internals.RecordAccumulator) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) ProducerFencedException(org.apache.kafka.common.errors.ProducerFencedException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) AuthenticationException(org.apache.kafka.common.errors.AuthenticationException) InterruptException(org.apache.kafka.common.errors.InterruptException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) AuthorizationException(org.apache.kafka.common.errors.AuthorizationException) ConfigException(org.apache.kafka.common.config.ConfigException) ExecutionException(java.util.concurrent.ExecutionException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) ApiException(org.apache.kafka.common.errors.ApiException) OffsetCommitCallback(org.apache.kafka.clients.consumer.OffsetCommitCallback) Header(org.apache.kafka.common.header.Header) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ApiException(org.apache.kafka.common.errors.ApiException)

Example 40 with KafkaException

use of org.apache.kafka.common.KafkaException in project apache-kafka-on-k8s by banzaicloud.

the class Sender method maybeSendTransactionalRequest.

private boolean maybeSendTransactionalRequest(long now) {
    if (transactionManager.isCompleting() && accumulator.hasIncomplete()) {
        if (transactionManager.isAborting())
            accumulator.abortUndrainedBatches(new KafkaException("Failing batch since transaction was aborted"));
        // be correct which would lead to an OutOfSequenceException.
        if (!accumulator.flushInProgress())
            accumulator.beginFlush();
    }
    TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequestHandler(accumulator.hasIncomplete());
    if (nextRequestHandler == null)
        return false;
    AbstractRequest.Builder<?> requestBuilder = nextRequestHandler.requestBuilder();
    while (running) {
        Node targetNode = null;
        try {
            if (nextRequestHandler.needsCoordinator()) {
                targetNode = transactionManager.coordinator(nextRequestHandler.coordinatorType());
                if (targetNode == null) {
                    transactionManager.lookupCoordinator(nextRequestHandler);
                    break;
                }
                if (!NetworkClientUtils.awaitReady(client, targetNode, time, requestTimeout)) {
                    transactionManager.lookupCoordinator(nextRequestHandler);
                    break;
                }
            } else {
                targetNode = awaitLeastLoadedNodeReady(requestTimeout);
            }
            if (targetNode != null) {
                if (nextRequestHandler.isRetry())
                    time.sleep(nextRequestHandler.retryBackoffMs());
                ClientRequest clientRequest = client.newClientRequest(targetNode.idString(), requestBuilder, now, true, nextRequestHandler);
                transactionManager.setInFlightTransactionalRequestCorrelationId(clientRequest.correlationId());
                log.debug("Sending transactional request {} to node {}", requestBuilder, targetNode);
                client.send(clientRequest, now);
                return true;
            }
        } catch (IOException e) {
            log.debug("Disconnect from {} while trying to send request {}. Going " + "to back off and retry", targetNode, requestBuilder);
            if (nextRequestHandler.needsCoordinator()) {
                // We break here so that we pick up the FindCoordinator request immediately.
                transactionManager.lookupCoordinator(nextRequestHandler);
                break;
            }
        }
        time.sleep(retryBackoffMs);
        metadata.requestUpdate();
    }
    transactionManager.retry(nextRequestHandler);
    return true;
}
Also used : AbstractRequest(org.apache.kafka.common.requests.AbstractRequest) Node(org.apache.kafka.common.Node) KafkaException(org.apache.kafka.common.KafkaException) IOException(java.io.IOException) ClientRequest(org.apache.kafka.clients.ClientRequest)

Aggregations

KafkaException (org.apache.kafka.common.KafkaException)262 Test (org.junit.Test)69 TopicPartition (org.apache.kafka.common.TopicPartition)56 Test (org.junit.jupiter.api.Test)47 HashMap (java.util.HashMap)40 IOException (java.io.IOException)39 StreamsException (org.apache.kafka.streams.errors.StreamsException)34 Map (java.util.Map)32 TimeoutException (org.apache.kafka.common.errors.TimeoutException)28 ArrayList (java.util.ArrayList)27 List (java.util.List)21 ByteBuffer (java.nio.ByteBuffer)19 ExecutionException (java.util.concurrent.ExecutionException)19 ConfigException (org.apache.kafka.common.config.ConfigException)16 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)14 HashSet (java.util.HashSet)13 Properties (java.util.Properties)13 Set (java.util.Set)11 Collectors (java.util.stream.Collectors)11 RecordMetadata (org.apache.kafka.clients.producer.RecordMetadata)11