Search in sources :

Example 1 with Invocation

use of org.voltdb.importer.Invocation in project voltdb by VoltDB.

the class PullSocketImporter method susceptibleRun.

private void susceptibleRun() {
    if (m_eos.get())
        return;
    info(null, "Starting socket puller for " + m_config.getResourceID());
    m_thread = Optional.of(Thread.currentThread());
    Optional<BufferedReader> reader = null;
    Formatter formatter = m_config.getFormatterBuilder().create();
    while (!m_eos.get()) {
        try {
            reader = attemptBufferedReader();
            if (!reader.isPresent()) {
                sleep(2_000);
                continue;
            }
            BufferedReader br = reader.get();
            String csv = null;
            while ((csv = br.readLine()) != null) {
                try {
                    Object[] params = formatter.transform(ByteBuffer.wrap(csv.getBytes()));
                    Invocation invocation = new Invocation(m_config.getProcedure(), params);
                    if (!callProcedure(invocation)) {
                        if (isDebugEnabled()) {
                            debug(null, "Failed to process Invocation possibly bad data: " + csv);
                        }
                    }
                } catch (FormatException e) {
                    rateLimitedLog(Level.ERROR, e, "Failed to tranform data: %s", csv);
                    ;
                }
            }
            if (csv == null) {
                warn(null, m_config.getResourceID() + " peer terminated stream");
            }
        } catch (EOFException e) {
            rateLimitedLog(Level.WARN, e, m_config.getResourceID() + " peer terminated stream");
        } catch (InterruptedException e) {
            if (m_eos.get())
                return;
            rateLimitedLog(Level.ERROR, e, "Socket puller %s was interrupted", m_config.getResourceID());
        } catch (InterruptedIOException e) {
            if (m_eos.get())
                return;
            rateLimitedLog(Level.ERROR, e, "Socket puller for %s was interrupted", m_config.getResourceID());
        } catch (IOException e) {
            rateLimitedLog(Level.ERROR, e, "Read fault for %s", m_config.getResourceID());
        }
    }
    info(null, "Stopping socket puller for " + m_config.getResourceID());
}
Also used : InterruptedIOException(java.io.InterruptedIOException) Invocation(org.voltdb.importer.Invocation) Formatter(org.voltdb.importer.formatter.Formatter) BufferedReader(java.io.BufferedReader) EOFException(java.io.EOFException) IOException(java.io.IOException) InterruptedIOException(java.io.InterruptedIOException) FormatException(org.voltdb.importer.formatter.FormatException)

Example 2 with Invocation

use of org.voltdb.importer.Invocation in project voltdb by VoltDB.

the class KafkaTopicPartitionImporter method accept.

@Override
protected void accept() {
    info(null, "Starting partition fetcher for " + m_topicAndPartition);
    long submitCount = 0;
    PendingWorkTracker callbackTracker = new PendingWorkTracker();
    Formatter formatter = m_config.getFormatterBuilder().create();
    try {
        //Start with the starting leader.
        resetLeader();
        int sleepCounter = 1;
        while (shouldRun()) {
            if (m_currentOffset.get() < 0) {
                getOffsetCoordinator();
                if (m_offsetManager.get() == null) {
                    sleepCounter = backoffSleep(sleepCounter);
                    continue;
                }
                long lastOffset = getLastOffset();
                if (lastOffset == -1) {
                    sleepCounter = backoffSleep(sleepCounter);
                    continue;
                }
                m_gapTracker.resetTo(lastOffset);
                m_lastCommittedOffset = lastOffset;
                m_currentOffset.set(lastOffset);
                if (m_currentOffset.get() < 0) {
                    //If we dont know the offset get it backoff if we fail.
                    sleepCounter = backoffSleep(sleepCounter);
                    info(null, "No valid offset found for " + m_topicAndPartition);
                    continue;
                }
                info(null, "Starting offset for " + m_topicAndPartition + " is " + m_currentOffset.get());
            }
            long currentFetchCount = 0;
            //Build fetch request of we have a valid offset and not too many are pending.
            FetchRequest req = m_fetchRequestBuilder.addFetch(m_topicAndPartition.topic(), m_topicAndPartition.partition(), m_currentOffset.get(), m_config.getFetchSize()).build();
            FetchResponse fetchResponse = null;
            try {
                fetchResponse = m_consumer.fetch(req);
                if (fetchResponse == null) {
                    sleepCounter = backoffSleep(sleepCounter);
                    continue;
                }
            } catch (Exception ex) {
                rateLimitedLog(Level.WARN, ex, "Failed to fetch from " + m_topicAndPartition);
                //See if its network error and find new leader for this partition.
                if (ex instanceof IOException) {
                    resetLeader();
                    //find leader in resetLeader would sleep and backoff
                    continue;
                }
                sleepCounter = backoffSleep(sleepCounter);
                continue;
            }
            if (fetchResponse.hasError()) {
                // Something went wrong!
                short code = fetchResponse.errorCode(m_topicAndPartition.topic(), m_topicAndPartition.partition());
                warn(ErrorMapping.exceptionFor(code), "Failed to fetch messages for %s", m_topicAndPartition);
                sleepCounter = backoffSleep(sleepCounter);
                if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                    // We asked for an invalid offset. For simple case ask for the last element to reset
                    info(null, "Invalid offset requested for " + m_topicAndPartition);
                    getOffsetCoordinator();
                    m_currentOffset.set(-1L);
                    continue;
                }
                resetLeader();
                continue;
            }
            sleepCounter = 1;
            for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(m_topicAndPartition.topic(), m_topicAndPartition.partition())) {
                //You may be catchin up so dont sleep.
                currentFetchCount++;
                long currentOffset = messageAndOffset.offset();
                //if currentOffset is less means we have already pushed it and also check pending queue.
                if (currentOffset < m_currentOffset.get()) {
                    continue;
                }
                ByteBuffer payload = messageAndOffset.message().payload();
                Object[] params = null;
                try {
                    m_gapTracker.submit(messageAndOffset.nextOffset());
                    params = formatter.transform(payload);
                    Invocation invocation = new Invocation(m_config.getProcedure(), params);
                    TopicPartitionInvocationCallback cb = new TopicPartitionInvocationCallback(messageAndOffset.offset(), messageAndOffset.nextOffset(), callbackTracker, m_gapTracker, m_dead, m_pauseOffset);
                    if (!noTransaction) {
                        if (callProcedure(invocation, cb)) {
                            callbackTracker.produceWork();
                        } else {
                            if (isDebugEnabled()) {
                                debug(null, "Failed to process Invocation possibly bad data: " + Arrays.toString(params));
                            }
                            m_gapTracker.commit(messageAndOffset.nextOffset());
                        }
                    }
                } catch (FormatException e) {
                    rateLimitedLog(Level.WARN, e, "Failed to tranform data: %s", Arrays.toString(params));
                    m_gapTracker.commit(messageAndOffset.nextOffset());
                }
                submitCount++;
                m_currentOffset.set(messageAndOffset.nextOffset());
                if (!shouldRun()) {
                    break;
                }
            }
            if (!shouldRun()) {
                break;
            }
            //wait to fetch more if we read nothing last time.
            if (currentFetchCount == 0) {
                try {
                    Thread.sleep(m_waitSleepMs);
                } catch (InterruptedException ie) {
                }
            }
            if (shouldCommit()) {
                commitOffset(false);
            }
        }
    } catch (Exception ex) {
        error(ex, "Failed to start topic partition fetcher for " + m_topicAndPartition);
    } finally {
        final boolean usePausedOffset = m_pauseOffset.get() != -1;
        boolean skipCommit = false;
        if (usePausedOffset) {
            // Paused offset is not guaranteed reliable until all callbacks have been called.
            if (callbackTracker.waitForWorkToFinish() == false) {
                if (m_pauseOffset.get() < m_lastCommittedOffset) {
                    warn(null, "Committing paused offset even though a timeout occurred waiting for pending stored procedures to finish.");
                } else {
                    warn(null, "Refusing to commit paused offset because a timeout occurred waiting for pending stored procedures to finish.");
                    skipCommit = true;
                }
            }
        }
        if (skipCommit == false) {
            // Force a commit. Paused offset will be re-acquired if needed.
            commitOffset(usePausedOffset);
        }
        KafkaStreamImporterConfig.closeConsumer(m_consumer);
        m_consumer = null;
        BlockingChannel channel = m_offsetManager.getAndSet(null);
        if (channel != null) {
            try {
                channel.disconnect();
            } catch (Exception ignoreIt) {
            }
        }
    }
    m_dead.compareAndSet(false, true);
    info(null, "Partition fetcher stopped for " + m_topicAndPartition + " Last commit point is: " + m_lastCommittedOffset + " Callback Rcvd: " + callbackTracker.getCallbackCount() + " Submitted: " + submitCount);
}
Also used : Invocation(org.voltdb.importer.Invocation) Formatter(org.voltdb.importer.formatter.Formatter) OffsetFetchResponse(kafka.javaapi.OffsetFetchResponse) FetchResponse(kafka.javaapi.FetchResponse) IOException(java.io.IOException) MessageAndOffset(kafka.message.MessageAndOffset) ByteBuffer(java.nio.ByteBuffer) FormatException(org.voltdb.importer.formatter.FormatException) IOException(java.io.IOException) FormatException(org.voltdb.importer.formatter.FormatException) BlockingChannel(kafka.network.BlockingChannel) OffsetFetchRequest(kafka.javaapi.OffsetFetchRequest) FetchRequest(kafka.api.FetchRequest)

Aggregations

IOException (java.io.IOException)2 Invocation (org.voltdb.importer.Invocation)2 FormatException (org.voltdb.importer.formatter.FormatException)2 Formatter (org.voltdb.importer.formatter.Formatter)2 BufferedReader (java.io.BufferedReader)1 EOFException (java.io.EOFException)1 InterruptedIOException (java.io.InterruptedIOException)1 ByteBuffer (java.nio.ByteBuffer)1 FetchRequest (kafka.api.FetchRequest)1 FetchResponse (kafka.javaapi.FetchResponse)1 OffsetFetchRequest (kafka.javaapi.OffsetFetchRequest)1 OffsetFetchResponse (kafka.javaapi.OffsetFetchResponse)1 MessageAndOffset (kafka.message.MessageAndOffset)1 BlockingChannel (kafka.network.BlockingChannel)1