Search in sources :

Example 1 with BlockingChannel

use of kafka.network.BlockingChannel in project voltdb by VoltDB.

the class KafkaTopicPartitionImporter method commitOffset.

public boolean commitOffset(boolean usePausedOffset) {
    final short version = 1;
    long safe = m_gapTracker.commit(-1L);
    final long pausedOffset = usePausedOffset ? m_pauseOffset.get() : -1;
    if (m_lastCommittedOffset != pausedOffset && (safe > m_lastCommittedOffset || pausedOffset != -1)) {
        long now = System.currentTimeMillis();
        OffsetCommitResponse offsetCommitResponse = null;
        try {
            BlockingChannel channel = null;
            int retries = 3;
            if (pausedOffset != -1) {
                rateLimitedLog(Level.INFO, null, m_topicAndPartition + " is using paused offset to commit: " + pausedOffset);
            }
            while (channel == null && --retries >= 0) {
                if ((channel = m_offsetManager.get()) == null) {
                    getOffsetCoordinator();
                    rateLimitedLog(Level.ERROR, null, "Commit Offset Failed to get offset coordinator for " + m_topicAndPartition);
                    continue;
                }
                safe = (pausedOffset != -1 ? pausedOffset : safe);
                OffsetCommitRequest offsetCommitRequest = new OffsetCommitRequest(m_config.getGroupId(), singletonMap(m_topicAndPartition, new OffsetAndMetadata(safe, "commit", now)), nextCorrelationId(), KafkaStreamImporterConfig.CLIENT_ID, version);
                channel.send(offsetCommitRequest.underlying());
                offsetCommitResponse = OffsetCommitResponse.readFrom(channel.receive().buffer());
                final short code = ((Short) offsetCommitResponse.errors().get(m_topicAndPartition)).shortValue();
                if (code == ErrorMapping.NotCoordinatorForConsumerCode() || code == ErrorMapping.ConsumerCoordinatorNotAvailableCode()) {
                    info(null, "Not coordinator for committing offset for " + m_topicAndPartition + " Updating coordinator.");
                    getOffsetCoordinator();
                    channel = null;
                    continue;
                }
            }
            if (retries < 0 || offsetCommitResponse == null) {
                return false;
            }
        } catch (Exception e) {
            rateLimitedLog(Level.ERROR, e, "Failed to commit Offset for " + m_topicAndPartition);
            if (e instanceof IOException) {
                getOffsetCoordinator();
            }
            return false;
        }
        final short code = ((Short) offsetCommitResponse.errors().get(m_topicAndPartition)).shortValue();
        if (code != ErrorMapping.NoError()) {
            final String msg = "Commit Offset Failed to commit for " + m_topicAndPartition;
            rateLimitedLog(Level.ERROR, ErrorMapping.exceptionFor(code), msg);
            return false;
        }
        m_lastCommittedOffset = safe;
        resetCounters();
        return true;
    }
    return false;
}
Also used : OffsetCommitRequest(kafka.javaapi.OffsetCommitRequest) OffsetCommitResponse(kafka.javaapi.OffsetCommitResponse) OffsetAndMetadata(kafka.common.OffsetAndMetadata) IOException(java.io.IOException) BlockingChannel(kafka.network.BlockingChannel) FormatException(org.voltdb.importer.formatter.FormatException) IOException(java.io.IOException)

Example 2 with BlockingChannel

use of kafka.network.BlockingChannel in project voltdb by VoltDB.

the class KafkaTopicPartitionImporter method accept.

@Override
protected void accept() {
    info(null, "Starting partition fetcher for " + m_topicAndPartition);
    long submitCount = 0;
    PendingWorkTracker callbackTracker = new PendingWorkTracker();
    Formatter formatter = m_config.getFormatterBuilder().create();
    try {
        //Start with the starting leader.
        resetLeader();
        int sleepCounter = 1;
        while (shouldRun()) {
            if (m_currentOffset.get() < 0) {
                getOffsetCoordinator();
                if (m_offsetManager.get() == null) {
                    sleepCounter = backoffSleep(sleepCounter);
                    continue;
                }
                long lastOffset = getLastOffset();
                if (lastOffset == -1) {
                    sleepCounter = backoffSleep(sleepCounter);
                    continue;
                }
                m_gapTracker.resetTo(lastOffset);
                m_lastCommittedOffset = lastOffset;
                m_currentOffset.set(lastOffset);
                if (m_currentOffset.get() < 0) {
                    //If we dont know the offset get it backoff if we fail.
                    sleepCounter = backoffSleep(sleepCounter);
                    info(null, "No valid offset found for " + m_topicAndPartition);
                    continue;
                }
                info(null, "Starting offset for " + m_topicAndPartition + " is " + m_currentOffset.get());
            }
            long currentFetchCount = 0;
            //Build fetch request of we have a valid offset and not too many are pending.
            FetchRequest req = m_fetchRequestBuilder.addFetch(m_topicAndPartition.topic(), m_topicAndPartition.partition(), m_currentOffset.get(), m_config.getFetchSize()).build();
            FetchResponse fetchResponse = null;
            try {
                fetchResponse = m_consumer.fetch(req);
                if (fetchResponse == null) {
                    sleepCounter = backoffSleep(sleepCounter);
                    continue;
                }
            } catch (Exception ex) {
                rateLimitedLog(Level.WARN, ex, "Failed to fetch from " + m_topicAndPartition);
                //See if its network error and find new leader for this partition.
                if (ex instanceof IOException) {
                    resetLeader();
                    //find leader in resetLeader would sleep and backoff
                    continue;
                }
                sleepCounter = backoffSleep(sleepCounter);
                continue;
            }
            if (fetchResponse.hasError()) {
                // Something went wrong!
                short code = fetchResponse.errorCode(m_topicAndPartition.topic(), m_topicAndPartition.partition());
                warn(ErrorMapping.exceptionFor(code), "Failed to fetch messages for %s", m_topicAndPartition);
                sleepCounter = backoffSleep(sleepCounter);
                if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                    // We asked for an invalid offset. For simple case ask for the last element to reset
                    info(null, "Invalid offset requested for " + m_topicAndPartition);
                    getOffsetCoordinator();
                    m_currentOffset.set(-1L);
                    continue;
                }
                resetLeader();
                continue;
            }
            sleepCounter = 1;
            for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(m_topicAndPartition.topic(), m_topicAndPartition.partition())) {
                //You may be catchin up so dont sleep.
                currentFetchCount++;
                long currentOffset = messageAndOffset.offset();
                //if currentOffset is less means we have already pushed it and also check pending queue.
                if (currentOffset < m_currentOffset.get()) {
                    continue;
                }
                ByteBuffer payload = messageAndOffset.message().payload();
                Object[] params = null;
                try {
                    m_gapTracker.submit(messageAndOffset.nextOffset());
                    params = formatter.transform(payload);
                    Invocation invocation = new Invocation(m_config.getProcedure(), params);
                    TopicPartitionInvocationCallback cb = new TopicPartitionInvocationCallback(messageAndOffset.offset(), messageAndOffset.nextOffset(), callbackTracker, m_gapTracker, m_dead, m_pauseOffset);
                    if (!noTransaction) {
                        if (callProcedure(invocation, cb)) {
                            callbackTracker.produceWork();
                        } else {
                            if (isDebugEnabled()) {
                                debug(null, "Failed to process Invocation possibly bad data: " + Arrays.toString(params));
                            }
                            m_gapTracker.commit(messageAndOffset.nextOffset());
                        }
                    }
                } catch (FormatException e) {
                    rateLimitedLog(Level.WARN, e, "Failed to tranform data: %s", Arrays.toString(params));
                    m_gapTracker.commit(messageAndOffset.nextOffset());
                }
                submitCount++;
                m_currentOffset.set(messageAndOffset.nextOffset());
                if (!shouldRun()) {
                    break;
                }
            }
            if (!shouldRun()) {
                break;
            }
            //wait to fetch more if we read nothing last time.
            if (currentFetchCount == 0) {
                try {
                    Thread.sleep(m_waitSleepMs);
                } catch (InterruptedException ie) {
                }
            }
            if (shouldCommit()) {
                commitOffset(false);
            }
        }
    } catch (Exception ex) {
        error(ex, "Failed to start topic partition fetcher for " + m_topicAndPartition);
    } finally {
        final boolean usePausedOffset = m_pauseOffset.get() != -1;
        boolean skipCommit = false;
        if (usePausedOffset) {
            // Paused offset is not guaranteed reliable until all callbacks have been called.
            if (callbackTracker.waitForWorkToFinish() == false) {
                if (m_pauseOffset.get() < m_lastCommittedOffset) {
                    warn(null, "Committing paused offset even though a timeout occurred waiting for pending stored procedures to finish.");
                } else {
                    warn(null, "Refusing to commit paused offset because a timeout occurred waiting for pending stored procedures to finish.");
                    skipCommit = true;
                }
            }
        }
        if (skipCommit == false) {
            // Force a commit. Paused offset will be re-acquired if needed.
            commitOffset(usePausedOffset);
        }
        KafkaStreamImporterConfig.closeConsumer(m_consumer);
        m_consumer = null;
        BlockingChannel channel = m_offsetManager.getAndSet(null);
        if (channel != null) {
            try {
                channel.disconnect();
            } catch (Exception ignoreIt) {
            }
        }
    }
    m_dead.compareAndSet(false, true);
    info(null, "Partition fetcher stopped for " + m_topicAndPartition + " Last commit point is: " + m_lastCommittedOffset + " Callback Rcvd: " + callbackTracker.getCallbackCount() + " Submitted: " + submitCount);
}
Also used : Invocation(org.voltdb.importer.Invocation) Formatter(org.voltdb.importer.formatter.Formatter) OffsetFetchResponse(kafka.javaapi.OffsetFetchResponse) FetchResponse(kafka.javaapi.FetchResponse) IOException(java.io.IOException) MessageAndOffset(kafka.message.MessageAndOffset) ByteBuffer(java.nio.ByteBuffer) FormatException(org.voltdb.importer.formatter.FormatException) IOException(java.io.IOException) FormatException(org.voltdb.importer.formatter.FormatException) BlockingChannel(kafka.network.BlockingChannel) OffsetFetchRequest(kafka.javaapi.OffsetFetchRequest) FetchRequest(kafka.api.FetchRequest)

Example 3 with BlockingChannel

use of kafka.network.BlockingChannel in project voltdb by VoltDB.

the class KafkaTopicPartitionImporter method getClientTopicOffset.

private OffsetFetchResponse getClientTopicOffset() {
    final short version = 1;
    OffsetFetchResponse rsp = null;
    Throwable fault = null;
    for (int attempts = 0; attempts < 3; ++attempts) try {
        final OffsetFetchRequest rq = new OffsetFetchRequest(m_config.getGroupId(), singletonList(m_topicAndPartition), version, nextCorrelationId(), KafkaStreamImporterConfig.CLIENT_ID);
        BlockingChannel channel = m_offsetManager.get();
        channel.send(rq.underlying());
        rsp = OffsetFetchResponse.readFrom(channel.receive().buffer());
        short code = rsp.offsets().get(m_topicAndPartition).error();
        if (code != ErrorMapping.NoError()) {
            fault = ErrorMapping.exceptionFor(code);
            backoffSleep(attempts + 1);
            if (code == ErrorMapping.NotCoordinatorForConsumerCode()) {
                getOffsetCoordinator();
            } else if (code == ErrorMapping.ConsumerCoordinatorNotAvailableCode()) {
                getOffsetCoordinator();
            } else if (code == ErrorMapping.UnknownTopicOrPartitionCode()) {
                getOffsetCoordinator();
                fault = null;
                continue;
            }
        } else {
            fault = null;
            break;
        }
    } catch (Exception e) {
        if (e instanceof IOException) {
            getOffsetCoordinator();
        }
        fault = e;
    }
    if (fault != null) {
        rateLimitedLog(Level.WARN, fault, "unable to fetch earliest offset for " + m_topicAndPartition);
        rsp = null;
    }
    return rsp;
}
Also used : OffsetFetchResponse(kafka.javaapi.OffsetFetchResponse) OffsetFetchRequest(kafka.javaapi.OffsetFetchRequest) IOException(java.io.IOException) BlockingChannel(kafka.network.BlockingChannel) FormatException(org.voltdb.importer.formatter.FormatException) IOException(java.io.IOException)

Aggregations

IOException (java.io.IOException)3 BlockingChannel (kafka.network.BlockingChannel)3 FormatException (org.voltdb.importer.formatter.FormatException)3 OffsetFetchRequest (kafka.javaapi.OffsetFetchRequest)2 OffsetFetchResponse (kafka.javaapi.OffsetFetchResponse)2 ByteBuffer (java.nio.ByteBuffer)1 FetchRequest (kafka.api.FetchRequest)1 OffsetAndMetadata (kafka.common.OffsetAndMetadata)1 FetchResponse (kafka.javaapi.FetchResponse)1 OffsetCommitRequest (kafka.javaapi.OffsetCommitRequest)1 OffsetCommitResponse (kafka.javaapi.OffsetCommitResponse)1 MessageAndOffset (kafka.message.MessageAndOffset)1 Invocation (org.voltdb.importer.Invocation)1 Formatter (org.voltdb.importer.formatter.Formatter)1