Search in sources :

Example 1 with InterruptException

use of org.apache.kafka.common.errors.InterruptException in project kafka by apache.

the class KafkaConsumerTest method consumerCloseTest.

private void consumerCloseTest(final long closeTimeoutMs, List<? extends AbstractResponse> responses, long waitMs, boolean interrupt) throws Exception {
    int rebalanceTimeoutMs = 60000;
    int sessionTimeoutMs = 30000;
    int heartbeatIntervalMs = 5000;
    Time time = new MockTime();
    Cluster cluster = TestUtils.singletonCluster(topic, 1);
    Node node = cluster.nodes().get(0);
    Metadata metadata = new Metadata(0, Long.MAX_VALUE);
    metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds());
    MockClient client = new MockClient(time, metadata);
    client.setNode(node);
    PartitionAssignor assignor = new RoundRobinAssignor();
    final KafkaConsumer<String, String> consumer = newConsumer(time, client, metadata, assignor, rebalanceTimeoutMs, sessionTimeoutMs, heartbeatIntervalMs, false, 1000);
    consumer.subscribe(Arrays.asList(topic), getConsumerRebalanceListener(consumer));
    Node coordinator = prepareRebalance(client, node, assignor, Arrays.asList(tp0), null);
    client.prepareMetadataUpdate(cluster, Collections.<String>emptySet());
    // Poll with responses
    client.prepareResponseFrom(fetchResponse(tp0, 0, 1), node);
    client.prepareResponseFrom(fetchResponse(tp0, 1, 0), node);
    consumer.poll(0);
    // Initiate close() after a commit request on another thread.
    // Kafka consumer is single-threaded, but the implementation allows calls on a
    // different thread as long as the calls are not executed concurrently. So this is safe.
    ExecutorService executor = Executors.newSingleThreadExecutor();
    final AtomicReference<Exception> closeException = new AtomicReference<Exception>();
    try {
        Future<?> future = executor.submit(new Runnable() {

            @Override
            public void run() {
                consumer.commitAsync();
                try {
                    consumer.close(closeTimeoutMs, TimeUnit.MILLISECONDS);
                } catch (Exception e) {
                    closeException.set(e);
                }
            }
        });
        // if close timeout is not zero.
        try {
            future.get(100, TimeUnit.MILLISECONDS);
            if (closeTimeoutMs != 0)
                fail("Close completed without waiting for commit or leave response");
        } catch (TimeoutException e) {
        // Expected exception
        }
        // Ensure close has started and queued at least one more request after commitAsync
        client.waitForRequests(2, 1000);
        // In non-graceful mode, close() times out without an exception even though commit response is pending
        for (int i = 0; i < responses.size(); i++) {
            client.waitForRequests(1, 1000);
            client.respondFrom(responses.get(i), coordinator);
            if (i != responses.size() - 1) {
                try {
                    future.get(100, TimeUnit.MILLISECONDS);
                    fail("Close completed without waiting for response");
                } catch (TimeoutException e) {
                // Expected exception
                }
            }
        }
        if (waitMs > 0)
            time.sleep(waitMs);
        if (interrupt)
            assertTrue("Close terminated prematurely", future.cancel(true));
        // Make sure that close task completes and another task can be run on the single threaded executor
        executor.submit(new Runnable() {

            @Override
            public void run() {
            }
        }).get(500, TimeUnit.MILLISECONDS);
        if (!interrupt) {
            // Should succeed without TimeoutException or ExecutionException
            future.get(500, TimeUnit.MILLISECONDS);
            assertNull("Unexpected exception during close", closeException.get());
        } else
            assertTrue("Expected exception not thrown " + closeException, closeException.get() instanceof InterruptException);
    } finally {
        executor.shutdownNow();
    }
}
Also used : Node(org.apache.kafka.common.Node) Metadata(org.apache.kafka.clients.Metadata) InterruptException(org.apache.kafka.common.errors.InterruptException) Cluster(org.apache.kafka.common.Cluster) MockTime(org.apache.kafka.common.utils.MockTime) Time(org.apache.kafka.common.utils.Time) AtomicReference(java.util.concurrent.atomic.AtomicReference) KafkaException(org.apache.kafka.common.KafkaException) TimeoutException(java.util.concurrent.TimeoutException) WakeupException(org.apache.kafka.common.errors.WakeupException) InterruptException(org.apache.kafka.common.errors.InterruptException) ExpectedException(org.junit.rules.ExpectedException) ExecutorService(java.util.concurrent.ExecutorService) PartitionAssignor(org.apache.kafka.clients.consumer.internals.PartitionAssignor) MockTime(org.apache.kafka.common.utils.MockTime) MockClient(org.apache.kafka.clients.MockClient) TimeoutException(java.util.concurrent.TimeoutException)

Example 2 with InterruptException

use of org.apache.kafka.common.errors.InterruptException in project kafka by apache.

the class ConsumerCoordinator method onJoinPrepare.

@Override
protected void onJoinPrepare(int generation, String memberId) {
    // commit offsets prior to rebalance if auto-commit enabled
    maybeAutoCommitOffsetsSync(rebalanceTimeoutMs);
    // execute the user's callback before rebalance
    ConsumerRebalanceListener listener = subscriptions.listener();
    log.info("Revoking previously assigned partitions {} for group {}", subscriptions.assignedPartitions(), groupId);
    try {
        Set<TopicPartition> revoked = new HashSet<>(subscriptions.assignedPartitions());
        listener.onPartitionsRevoked(revoked);
    } catch (WakeupException | InterruptException e) {
        throw e;
    } catch (Exception e) {
        log.error("User provided listener {} for group {} failed on partition revocation", listener.getClass().getName(), groupId, e);
    }
    isLeader = false;
    subscriptions.resetGroupSubscription();
}
Also used : TopicPartition(org.apache.kafka.common.TopicPartition) InterruptException(org.apache.kafka.common.errors.InterruptException) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) WakeupException(org.apache.kafka.common.errors.WakeupException) GroupAuthorizationException(org.apache.kafka.common.errors.GroupAuthorizationException) RetriableCommitFailedException(org.apache.kafka.clients.consumer.RetriableCommitFailedException) KafkaException(org.apache.kafka.common.KafkaException) RetriableException(org.apache.kafka.common.errors.RetriableException) InterruptException(org.apache.kafka.common.errors.InterruptException) WakeupException(org.apache.kafka.common.errors.WakeupException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) HashSet(java.util.HashSet)

Example 3 with InterruptException

use of org.apache.kafka.common.errors.InterruptException in project kafka by apache.

the class ConsumerCoordinator method onJoinComplete.

@Override
protected void onJoinComplete(int generation, String memberId, String assignmentStrategy, ByteBuffer assignmentBuffer) {
    // only the leader is responsible for monitoring for metadata changes (i.e. partition changes)
    if (!isLeader)
        assignmentSnapshot = null;
    PartitionAssignor assignor = lookupAssignor(assignmentStrategy);
    if (assignor == null)
        throw new IllegalStateException("Coordinator selected invalid assignment protocol: " + assignmentStrategy);
    Assignment assignment = ConsumerProtocol.deserializeAssignment(assignmentBuffer);
    // set the flag to refresh last committed offsets
    subscriptions.needRefreshCommits();
    // update partition assignment
    subscriptions.assignFromSubscribed(assignment.partitions());
    // check if the assignment contains some topics that were not in the original
    // subscription, if yes we will obey what leader has decided and add these topics
    // into the subscriptions as long as they still match the subscribed pattern
    //
    // TODO this part of the logic should be removed once we allow regex on leader assign
    Set<String> addedTopics = new HashSet<>();
    for (TopicPartition tp : subscriptions.assignedPartitions()) {
        if (!joinedSubscription.contains(tp.topic()))
            addedTopics.add(tp.topic());
    }
    if (!addedTopics.isEmpty()) {
        Set<String> newSubscription = new HashSet<>(subscriptions.subscription());
        Set<String> newJoinedSubscription = new HashSet<>(joinedSubscription);
        newSubscription.addAll(addedTopics);
        newJoinedSubscription.addAll(addedTopics);
        this.subscriptions.subscribeFromPattern(newSubscription);
        this.joinedSubscription = newJoinedSubscription;
    }
    // update the metadata and enforce a refresh to make sure the fetcher can start
    // fetching data in the next iteration
    this.metadata.setTopics(subscriptions.groupSubscription());
    client.ensureFreshMetadata();
    // give the assignor a chance to update internal state based on the received assignment
    assignor.onAssignment(assignment);
    // reschedule the auto commit starting from now
    this.nextAutoCommitDeadline = time.milliseconds() + autoCommitIntervalMs;
    // execute the user's callback after rebalance
    ConsumerRebalanceListener listener = subscriptions.listener();
    log.info("Setting newly assigned partitions {} for group {}", subscriptions.assignedPartitions(), groupId);
    try {
        Set<TopicPartition> assigned = new HashSet<>(subscriptions.assignedPartitions());
        listener.onPartitionsAssigned(assigned);
    } catch (WakeupException | InterruptException e) {
        throw e;
    } catch (Exception e) {
        log.error("User provided listener {} for group {} failed on partition assignment", listener.getClass().getName(), groupId, e);
    }
}
Also used : InterruptException(org.apache.kafka.common.errors.InterruptException) ConsumerRebalanceListener(org.apache.kafka.clients.consumer.ConsumerRebalanceListener) WakeupException(org.apache.kafka.common.errors.WakeupException) GroupAuthorizationException(org.apache.kafka.common.errors.GroupAuthorizationException) RetriableCommitFailedException(org.apache.kafka.clients.consumer.RetriableCommitFailedException) KafkaException(org.apache.kafka.common.KafkaException) RetriableException(org.apache.kafka.common.errors.RetriableException) InterruptException(org.apache.kafka.common.errors.InterruptException) WakeupException(org.apache.kafka.common.errors.WakeupException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) CommitFailedException(org.apache.kafka.clients.consumer.CommitFailedException) Assignment(org.apache.kafka.clients.consumer.internals.PartitionAssignor.Assignment) TopicPartition(org.apache.kafka.common.TopicPartition) HashSet(java.util.HashSet)

Example 4 with InterruptException

use of org.apache.kafka.common.errors.InterruptException in project kafka by apache.

the class KafkaProducer method doSend.

/**
     * Implementation of asynchronously send a record to a topic.
     */
private Future<RecordMetadata> doSend(ProducerRecord<K, V> record, Callback callback) {
    TopicPartition tp = null;
    try {
        // first make sure the metadata for the topic is available
        ClusterAndWaitTime clusterAndWaitTime = waitOnMetadata(record.topic(), record.partition(), maxBlockTimeMs);
        long remainingWaitMs = Math.max(0, maxBlockTimeMs - clusterAndWaitTime.waitedOnMetadataMs);
        Cluster cluster = clusterAndWaitTime.cluster;
        byte[] serializedKey;
        try {
            serializedKey = keySerializer.serialize(record.topic(), record.key());
        } catch (ClassCastException cce) {
            throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + " specified in key.serializer");
        }
        byte[] serializedValue;
        try {
            serializedValue = valueSerializer.serialize(record.topic(), record.value());
        } catch (ClassCastException cce) {
            throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + " specified in value.serializer");
        }
        int partition = partition(record, serializedKey, serializedValue, cluster);
        int serializedSize = Records.LOG_OVERHEAD + Record.recordSize(serializedKey, serializedValue);
        ensureValidRecordSize(serializedSize);
        tp = new TopicPartition(record.topic(), partition);
        long timestamp = record.timestamp() == null ? time.milliseconds() : record.timestamp();
        log.trace("Sending record {} with callback {} to topic {} partition {}", record, callback, record.topic(), partition);
        // producer callback will make sure to call both 'callback' and interceptor callback
        Callback interceptCallback = this.interceptors == null ? callback : new InterceptorCallback<>(callback, this.interceptors, tp);
        RecordAccumulator.RecordAppendResult result = accumulator.append(tp, timestamp, serializedKey, serializedValue, interceptCallback, remainingWaitMs);
        if (result.batchIsFull || result.newBatchCreated) {
            log.trace("Waking up the sender since topic {} partition {} is either full or getting a new batch", record.topic(), partition);
            this.sender.wakeup();
        }
        return result.future;
    // handling exceptions and record the errors;
    // for API exceptions return them in the future,
    // for other exceptions throw directly
    } catch (ApiException e) {
        log.debug("Exception occurred during message send:", e);
        if (callback != null)
            callback.onCompletion(null, e);
        this.errors.record();
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        return new FutureFailure(e);
    } catch (InterruptedException e) {
        this.errors.record();
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        throw new InterruptException(e);
    } catch (BufferExhaustedException e) {
        this.errors.record();
        this.metrics.sensor("buffer-exhausted-records").record();
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        throw e;
    } catch (KafkaException e) {
        this.errors.record();
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        throw e;
    } catch (Exception e) {
        // we notify interceptor about all exceptions, since onSend is called before anything else in this method
        if (this.interceptors != null)
            this.interceptors.onSendError(record, tp, e);
        throw e;
    }
}
Also used : SerializationException(org.apache.kafka.common.errors.SerializationException) InterruptException(org.apache.kafka.common.errors.InterruptException) Cluster(org.apache.kafka.common.Cluster) RecordAccumulator(org.apache.kafka.clients.producer.internals.RecordAccumulator) SerializationException(org.apache.kafka.common.errors.SerializationException) KafkaException(org.apache.kafka.common.KafkaException) InterruptException(org.apache.kafka.common.errors.InterruptException) TimeoutException(org.apache.kafka.common.errors.TimeoutException) ConfigException(org.apache.kafka.common.config.ConfigException) ExecutionException(java.util.concurrent.ExecutionException) TopicAuthorizationException(org.apache.kafka.common.errors.TopicAuthorizationException) RecordTooLargeException(org.apache.kafka.common.errors.RecordTooLargeException) ApiException(org.apache.kafka.common.errors.ApiException) TopicPartition(org.apache.kafka.common.TopicPartition) KafkaException(org.apache.kafka.common.KafkaException) ApiException(org.apache.kafka.common.errors.ApiException)

Example 5 with InterruptException

use of org.apache.kafka.common.errors.InterruptException in project kafka by apache.

the class KafkaProducer method flush.

/**
     * Invoking this method makes all buffered records immediately available to send (even if <code>linger.ms</code> is
     * greater than 0) and blocks on the completion of the requests associated with these records. The post-condition
     * of <code>flush()</code> is that any previously sent record will have completed (e.g. <code>Future.isDone() == true</code>).
     * A request is considered completed when it is successfully acknowledged
     * according to the <code>acks</code> configuration you have specified or else it results in an error.
     * <p>
     * Other threads can continue sending records while one thread is blocked waiting for a flush call to complete,
     * however no guarantee is made about the completion of records sent after the flush call begins.
     * <p>
     * This method can be useful when consuming from some input system and producing into Kafka. The <code>flush()</code> call
     * gives a convenient way to ensure all previously sent messages have actually completed.
     * <p>
     * This example shows how to consume from one Kafka topic and produce to another Kafka topic:
     * <pre>
     * {@code
     * for(ConsumerRecord<String, String> record: consumer.poll(100))
     *     producer.send(new ProducerRecord("my-topic", record.key(), record.value());
     * producer.flush();
     * consumer.commit();
     * }
     * </pre>
     *
     * Note that the above example may drop records if the produce request fails. If we want to ensure that this does not occur
     * we need to set <code>retries=&lt;large_number&gt;</code> in our config.
     *
     * @throws InterruptException If the thread is interrupted while blocked
     */
@Override
public void flush() {
    log.trace("Flushing accumulated records in producer.");
    this.accumulator.beginFlush();
    this.sender.wakeup();
    try {
        this.accumulator.awaitFlushCompletion();
    } catch (InterruptedException e) {
        throw new InterruptException("Flush interrupted.", e);
    }
}
Also used : InterruptException(org.apache.kafka.common.errors.InterruptException)

Aggregations

InterruptException (org.apache.kafka.common.errors.InterruptException)5 KafkaException (org.apache.kafka.common.KafkaException)4 TopicPartition (org.apache.kafka.common.TopicPartition)3 TopicAuthorizationException (org.apache.kafka.common.errors.TopicAuthorizationException)3 WakeupException (org.apache.kafka.common.errors.WakeupException)3 HashSet (java.util.HashSet)2 CommitFailedException (org.apache.kafka.clients.consumer.CommitFailedException)2 ConsumerRebalanceListener (org.apache.kafka.clients.consumer.ConsumerRebalanceListener)2 RetriableCommitFailedException (org.apache.kafka.clients.consumer.RetriableCommitFailedException)2 Cluster (org.apache.kafka.common.Cluster)2 GroupAuthorizationException (org.apache.kafka.common.errors.GroupAuthorizationException)2 RetriableException (org.apache.kafka.common.errors.RetriableException)2 ExecutionException (java.util.concurrent.ExecutionException)1 ExecutorService (java.util.concurrent.ExecutorService)1 TimeoutException (java.util.concurrent.TimeoutException)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1 Metadata (org.apache.kafka.clients.Metadata)1 MockClient (org.apache.kafka.clients.MockClient)1 PartitionAssignor (org.apache.kafka.clients.consumer.internals.PartitionAssignor)1 Assignment (org.apache.kafka.clients.consumer.internals.PartitionAssignor.Assignment)1