Search in sources :

Example 11 with EventDeliveryException

use of org.apache.flume.EventDeliveryException in project rocketmq-externals by apache.

the class RocketMQSink method process.

@Override
public Status process() throws EventDeliveryException {
    Channel channel = getChannel();
    Transaction transaction = null;
    try {
        transaction = channel.getTransaction();
        transaction.begin();
        /*
            batch take
             */
        List<Event> events = new ArrayList<>();
        long beginTime = System.currentTimeMillis();
        while (true) {
            Event event = channel.take();
            if (event != null) {
                events.add(event);
            }
            if (events.size() == batchSize || System.currentTimeMillis() - beginTime > maxProcessTime) {
                break;
            }
        }
        if (events.size() == 0) {
            sinkCounter.incrementBatchEmptyCount();
            transaction.rollback();
            return Status.BACKOFF;
        }
        /*
            async send
             */
        CountDownLatch latch = new CountDownLatch(events.size());
        AtomicInteger errorNum = new AtomicInteger();
        for (Event event : events) {
            byte[] body = event.getBody();
            Message message = new Message(topic, tag, body);
            if (log.isDebugEnabled()) {
                log.debug("Processing event,body={}", new String(body, "UTF-8"));
            }
            producer.send(message, new SendCallBackHandler(message, latch, errorNum));
        }
        latch.await();
        sinkCounter.addToEventDrainAttemptCount(events.size());
        if (errorNum.get() > 0) {
            log.error("errorNum=" + errorNum + ",transaction will rollback");
            transaction.rollback();
            return Status.BACKOFF;
        } else {
            transaction.commit();
            sinkCounter.addToEventDrainSuccessCount(events.size());
            return Status.READY;
        }
    } catch (Throwable e) {
        log.error("Failed to processing event", e);
        if (transaction != null) {
            try {
                transaction.rollback();
            } catch (Throwable ex) {
                log.error("Failed to rollback transaction", ex);
                throw new EventDeliveryException("Failed to rollback transaction", ex);
            }
        }
        return Status.BACKOFF;
    } finally {
        if (transaction != null) {
            transaction.close();
        }
    }
}
Also used : Message(org.apache.rocketmq.common.message.Message) Channel(org.apache.flume.Channel) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) Transaction(org.apache.flume.Transaction) EventDeliveryException(org.apache.flume.EventDeliveryException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Event(org.apache.flume.Event)

Example 12 with EventDeliveryException

use of org.apache.flume.EventDeliveryException in project apex-malhar by apache.

the class FlumeSink method process.

/* Begin implementing Flume Sink interface */
@Override
@SuppressWarnings({ "BroadCatchBlock", "TooBroadCatch", "UseSpecificCatch", "SleepWhileInLoop" })
public Status process() throws EventDeliveryException {
    Slice slice;
    synchronized (server.requests) {
        for (Request r : server.requests) {
            logger.debug("found {}", r);
            switch(r.type) {
                case SEEK:
                    lastCommitEventTimeMillis = System.currentTimeMillis();
                    slice = r.getAddress();
                    playback = storage.retrieve(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
                    client = r.client;
                    break;
                case COMMITTED:
                    lastCommitEventTimeMillis = System.currentTimeMillis();
                    slice = r.getAddress();
                    storage.clean(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
                    break;
                case CONNECTED:
                    logger.debug("Connected received, ignoring it!");
                    break;
                case DISCONNECTED:
                    if (r.client == client) {
                        client = null;
                        outstandingEventsCount = 0;
                    }
                    break;
                case WINDOWED:
                    lastConsumedEventsCount = r.getEventCount();
                    idleCount = r.getIdleCount();
                    outstandingEventsCount -= lastConsumedEventsCount;
                    break;
                case SERVER_ERROR:
                    throw new IOError(null);
                default:
                    logger.debug("Cannot understand the request {}", r);
                    break;
            }
        }
        server.requests.clear();
    }
    if (client == null) {
        logger.info("No client expressed interest yet to consume the events.");
        return Status.BACKOFF;
    } else if (System.currentTimeMillis() - lastCommitEventTimeMillis > commitEventTimeoutMillis) {
        logger.info("Client has not processed the workload given for the last {} milliseconds, so backing off.", System.currentTimeMillis() - lastCommitEventTimeMillis);
        return Status.BACKOFF;
    }
    int maxTuples;
    // the following logic needs to be fixed... this is a quick put together.
    if (outstandingEventsCount < 0) {
        if (idleCount > 1) {
            maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
        } else {
            maxTuples = (int) ((1 + throughputAdjustmentFactor) * lastConsumedEventsCount);
        }
    } else if (outstandingEventsCount > lastConsumedEventsCount) {
        maxTuples = (int) ((1 - throughputAdjustmentFactor) * lastConsumedEventsCount);
    } else {
        if (idleCount > 0) {
            maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
            if (maxTuples <= 0) {
                maxTuples = minimumEventsPerTransaction;
            }
        } else {
            maxTuples = lastConsumedEventsCount;
        }
    }
    if (maxTuples >= maximumEventsPerTransaction) {
        maxTuples = maximumEventsPerTransaction;
    } else if (maxTuples <= 0) {
        maxTuples = minimumEventsPerTransaction;
    }
    if (maxTuples > 0) {
        if (playback != null) {
            try {
                int i = 0;
                do {
                    if (!client.write(playback)) {
                        retryWrite(playback, null);
                    }
                    outstandingEventsCount++;
                    playback = storage.retrieveNext();
                } while (++i < maxTuples && playback != null);
            } catch (Exception ex) {
                logger.warn("Playback Failed", ex);
                if (ex instanceof NetletThrowable) {
                    try {
                        eventloop.disconnect(client);
                    } finally {
                        client = null;
                        outstandingEventsCount = 0;
                    }
                }
                return Status.BACKOFF;
            }
        } else {
            int storedTuples = 0;
            Transaction t = getChannel().getTransaction();
            try {
                t.begin();
                Event e;
                while (storedTuples < maxTuples && (e = getChannel().take()) != null) {
                    Slice event = codec.toByteArray(e);
                    byte[] address = storage.store(event);
                    if (address != null) {
                        if (!client.write(address, event)) {
                            retryWrite(address, event);
                        }
                        outstandingEventsCount++;
                    } else {
                        logger.debug("Detected the condition of recovery from flume crash!");
                    }
                    storedTuples++;
                }
                if (storedTuples > 0) {
                    storage.flush();
                }
                t.commit();
                if (storedTuples > 0) {
                    /* log less frequently */
                    logger.debug("Transaction details maxTuples = {}, storedTuples = {}, outstanding = {}", maxTuples, storedTuples, outstandingEventsCount);
                }
            } catch (Error er) {
                t.rollback();
                throw er;
            } catch (Exception ex) {
                logger.error("Transaction Failed", ex);
                if (ex instanceof NetletRuntimeException && client != null) {
                    try {
                        eventloop.disconnect(client);
                    } finally {
                        client = null;
                        outstandingEventsCount = 0;
                    }
                }
                t.rollback();
                return Status.BACKOFF;
            } finally {
                t.close();
            }
            if (storedTuples == 0) {
                sleep();
            }
        }
    }
    return Status.READY;
}
Also used : NetletThrowable(com.datatorrent.netlet.NetletThrowable) Transaction(org.apache.flume.Transaction) IOError(java.io.IOError) Slice(com.datatorrent.netlet.util.Slice) Request(org.apache.apex.malhar.flume.sink.Server.Request) Event(org.apache.flume.Event) ServiceConfigurationError(java.util.ServiceConfigurationError) IOError(java.io.IOError) NetletRuntimeException(com.datatorrent.netlet.NetletThrowable.NetletRuntimeException) EventDeliveryException(org.apache.flume.EventDeliveryException) IOException(java.io.IOException) NetletRuntimeException(com.datatorrent.netlet.NetletThrowable.NetletRuntimeException)

Aggregations

EventDeliveryException (org.apache.flume.EventDeliveryException)12 Event (org.apache.flume.Event)11 Transaction (org.apache.flume.Transaction)9 Channel (org.apache.flume.Channel)8 SQLException (java.sql.SQLException)4 ArrayList (java.util.ArrayList)4 MemoryChannel (org.apache.flume.channel.MemoryChannel)4 Test (org.junit.Test)4 PhoenixSink (org.apache.phoenix.flume.sink.PhoenixSink)3 ChannelException (org.apache.flume.ChannelException)2 FlumeException (org.apache.flume.FlumeException)2 PollableSource (org.apache.flume.PollableSource)2 MQClientException (org.apache.rocketmq.client.exception.MQClientException)2 Message (org.apache.rocketmq.common.message.Message)2 NetletThrowable (com.datatorrent.netlet.NetletThrowable)1 NetletRuntimeException (com.datatorrent.netlet.NetletThrowable.NetletRuntimeException)1 Slice (com.datatorrent.netlet.util.Slice)1 IOError (java.io.IOError)1 IOException (java.io.IOException)1 UnsupportedEncodingException (java.io.UnsupportedEncodingException)1