Search in sources :

Example 36 with Transaction

use of org.apache.flume.Transaction in project ignite by apache.

the class IgniteSink method process.

/**
 * Processes Flume events.
 */
@Override
public Status process() throws EventDeliveryException {
    Channel channel = getChannel();
    Transaction transaction = channel.getTransaction();
    int eventCount = 0;
    try {
        transaction.begin();
        List<Event> batch = new ArrayList<>(batchSize);
        for (; eventCount < batchSize; ++eventCount) {
            Event event = channel.take();
            if (event == null) {
                break;
            }
            batch.add(event);
        }
        if (!batch.isEmpty()) {
            ignite.cache(cacheName).putAll(eventTransformer.transform(batch));
            if (batch.size() < batchSize)
                sinkCounter.incrementBatchUnderflowCount();
            else
                sinkCounter.incrementBatchCompleteCount();
        } else {
            sinkCounter.incrementBatchEmptyCount();
        }
        sinkCounter.addToEventDrainAttemptCount(batch.size());
        transaction.commit();
        sinkCounter.addToEventDrainSuccessCount(batch.size());
    } catch (Exception e) {
        log.error("Failed to process events", e);
        try {
            transaction.rollback();
        } catch (Throwable e1) {
            e.addSuppressed(e1);
        }
        throw new EventDeliveryException(e);
    } finally {
        transaction.close();
    }
    return eventCount == 0 ? Status.BACKOFF : Status.READY;
}
Also used : Transaction(org.apache.flume.Transaction) EventDeliveryException(org.apache.flume.EventDeliveryException) Channel(org.apache.flume.Channel) ArrayList(java.util.ArrayList) Event(org.apache.flume.Event) FlumeException(org.apache.flume.FlumeException) EventDeliveryException(org.apache.flume.EventDeliveryException)

Example 37 with Transaction

use of org.apache.flume.Transaction in project ignite by apache.

the class IgniteSinkTest method testSink.

/**
 * @throws Exception {@link Exception}.
 */
public void testSink() throws Exception {
    IgniteConfiguration cfg = loadConfiguration("modules/flume/src/test/resources/example-ignite.xml");
    cfg.setClientMode(false);
    final Ignite grid = startGrid("igniteServerNode", cfg);
    Context channelContext = new Context();
    channelContext.put("capacity", String.valueOf(EVENT_CNT));
    channelContext.put("transactionCapacity", String.valueOf(EVENT_CNT));
    Channel memoryChannel = new MemoryChannel();
    Configurables.configure(memoryChannel, channelContext);
    final CountDownLatch latch = new CountDownLatch(EVENT_CNT);
    final IgnitePredicate<Event> putLsnr = new IgnitePredicate<Event>() {

        @Override
        public boolean apply(Event evt) {
            assert evt != null;
            latch.countDown();
            return true;
        }
    };
    IgniteSink sink = new IgniteSink() {

        // Setting the listener on cache before sink processing starts.
        @Override
        public synchronized void start() {
            super.start();
            grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).localListen(putLsnr, EVT_CACHE_OBJECT_PUT);
        }
    };
    sink.setName("IgniteSink");
    sink.setChannel(memoryChannel);
    Context ctx = new Context();
    ctx.put(IgniteSinkConstants.CFG_CACHE_NAME, CACHE_NAME);
    ctx.put(IgniteSinkConstants.CFG_PATH, "example-ignite.xml");
    ctx.put(IgniteSinkConstants.CFG_EVENT_TRANSFORMER, "org.apache.ignite.stream.flume.TestEventTransformer");
    Configurables.configure(sink, ctx);
    sink.start();
    try {
        Transaction tx = memoryChannel.getTransaction();
        tx.begin();
        for (int i = 0; i < EVENT_CNT; i++) memoryChannel.put(EventBuilder.withBody((String.valueOf(i) + ": " + i).getBytes()));
        tx.commit();
        tx.close();
        Sink.Status status = Sink.Status.READY;
        while (status != Sink.Status.BACKOFF) {
            status = sink.process();
        }
    } finally {
        sink.stop();
    }
    // Checks that 10000 events successfully processed in 10 seconds.
    assertTrue(latch.await(10, TimeUnit.SECONDS));
    grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).stopLocalListen(putLsnr);
    IgniteCache<String, Integer> cache = grid.cache(CACHE_NAME);
    // Checks that each event was processed properly.
    for (int i = 0; i < EVENT_CNT; i++) {
        assertEquals(i, (int) cache.get(String.valueOf(i)));
    }
    assertEquals(EVENT_CNT, cache.size(CachePeekMode.PRIMARY));
}
Also used : Context(org.apache.flume.Context) MemoryChannel(org.apache.flume.channel.MemoryChannel) MemoryChannel(org.apache.flume.channel.MemoryChannel) Channel(org.apache.flume.Channel) IgnitePredicate(org.apache.ignite.lang.IgnitePredicate) CountDownLatch(java.util.concurrent.CountDownLatch) IgniteConfiguration(org.apache.ignite.configuration.IgniteConfiguration) Transaction(org.apache.flume.Transaction) Sink(org.apache.flume.Sink) Event(org.apache.ignite.events.Event) Ignite(org.apache.ignite.Ignite)

Example 38 with Transaction

use of org.apache.flume.Transaction in project apex-malhar by apache.

the class FlumeSink method process.

/* Begin implementing Flume Sink interface */
@Override
@SuppressWarnings({ "BroadCatchBlock", "TooBroadCatch", "UseSpecificCatch", "SleepWhileInLoop" })
public Status process() throws EventDeliveryException {
    Slice slice;
    synchronized (server.requests) {
        for (Request r : server.requests) {
            logger.debug("found {}", r);
            switch(r.type) {
                case SEEK:
                    lastCommitEventTimeMillis = System.currentTimeMillis();
                    slice = r.getAddress();
                    playback = storage.retrieve(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
                    client = r.client;
                    break;
                case COMMITTED:
                    lastCommitEventTimeMillis = System.currentTimeMillis();
                    slice = r.getAddress();
                    storage.clean(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
                    break;
                case CONNECTED:
                    logger.debug("Connected received, ignoring it!");
                    break;
                case DISCONNECTED:
                    if (r.client == client) {
                        client = null;
                        outstandingEventsCount = 0;
                    }
                    break;
                case WINDOWED:
                    lastConsumedEventsCount = r.getEventCount();
                    idleCount = r.getIdleCount();
                    outstandingEventsCount -= lastConsumedEventsCount;
                    break;
                case SERVER_ERROR:
                    throw new IOError(null);
                default:
                    logger.debug("Cannot understand the request {}", r);
                    break;
            }
        }
        server.requests.clear();
    }
    if (client == null) {
        logger.info("No client expressed interest yet to consume the events.");
        return Status.BACKOFF;
    } else if (System.currentTimeMillis() - lastCommitEventTimeMillis > commitEventTimeoutMillis) {
        logger.info("Client has not processed the workload given for the last {} milliseconds, so backing off.", System.currentTimeMillis() - lastCommitEventTimeMillis);
        return Status.BACKOFF;
    }
    int maxTuples;
    // the following logic needs to be fixed... this is a quick put together.
    if (outstandingEventsCount < 0) {
        if (idleCount > 1) {
            maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
        } else {
            maxTuples = (int) ((1 + throughputAdjustmentFactor) * lastConsumedEventsCount);
        }
    } else if (outstandingEventsCount > lastConsumedEventsCount) {
        maxTuples = (int) ((1 - throughputAdjustmentFactor) * lastConsumedEventsCount);
    } else {
        if (idleCount > 0) {
            maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
            if (maxTuples <= 0) {
                maxTuples = minimumEventsPerTransaction;
            }
        } else {
            maxTuples = lastConsumedEventsCount;
        }
    }
    if (maxTuples >= maximumEventsPerTransaction) {
        maxTuples = maximumEventsPerTransaction;
    } else if (maxTuples <= 0) {
        maxTuples = minimumEventsPerTransaction;
    }
    if (maxTuples > 0) {
        if (playback != null) {
            try {
                int i = 0;
                do {
                    if (!client.write(playback)) {
                        retryWrite(playback, null);
                    }
                    outstandingEventsCount++;
                    playback = storage.retrieveNext();
                } while (++i < maxTuples && playback != null);
            } catch (Exception ex) {
                logger.warn("Playback Failed", ex);
                if (ex instanceof NetletThrowable) {
                    try {
                        eventloop.disconnect(client);
                    } finally {
                        client = null;
                        outstandingEventsCount = 0;
                    }
                }
                return Status.BACKOFF;
            }
        } else {
            int storedTuples = 0;
            Transaction t = getChannel().getTransaction();
            try {
                t.begin();
                Event e;
                while (storedTuples < maxTuples && (e = getChannel().take()) != null) {
                    Slice event = codec.toByteArray(e);
                    byte[] address = storage.store(event);
                    if (address != null) {
                        if (!client.write(address, event)) {
                            retryWrite(address, event);
                        }
                        outstandingEventsCount++;
                    } else {
                        logger.debug("Detected the condition of recovery from flume crash!");
                    }
                    storedTuples++;
                }
                if (storedTuples > 0) {
                    storage.flush();
                }
                t.commit();
                if (storedTuples > 0) {
                    /* log less frequently */
                    logger.debug("Transaction details maxTuples = {}, storedTuples = {}, outstanding = {}", maxTuples, storedTuples, outstandingEventsCount);
                }
            } catch (Error er) {
                t.rollback();
                throw er;
            } catch (Exception ex) {
                logger.error("Transaction Failed", ex);
                if (ex instanceof NetletRuntimeException && client != null) {
                    try {
                        eventloop.disconnect(client);
                    } finally {
                        client = null;
                        outstandingEventsCount = 0;
                    }
                }
                t.rollback();
                return Status.BACKOFF;
            } finally {
                t.close();
            }
            if (storedTuples == 0) {
                sleep();
            }
        }
    }
    return Status.READY;
}
Also used : NetletThrowable(com.datatorrent.netlet.NetletThrowable) Transaction(org.apache.flume.Transaction) IOError(java.io.IOError) Slice(com.datatorrent.netlet.util.Slice) Request(org.apache.apex.malhar.flume.sink.Server.Request) Event(org.apache.flume.Event) ServiceConfigurationError(java.util.ServiceConfigurationError) IOError(java.io.IOError) NetletRuntimeException(com.datatorrent.netlet.NetletThrowable.NetletRuntimeException) EventDeliveryException(org.apache.flume.EventDeliveryException) IOException(java.io.IOException) NetletRuntimeException(com.datatorrent.netlet.NetletThrowable.NetletRuntimeException)

Example 39 with Transaction

use of org.apache.flume.Transaction in project rocketmq-externals by apache.

the class RocketMQSink method process.

@Override
public Status process() throws EventDeliveryException {
    Channel channel = getChannel();
    Transaction transaction = null;
    try {
        transaction = channel.getTransaction();
        transaction.begin();
        /*
            batch take
             */
        List<Event> events = new ArrayList<>();
        long beginTime = System.currentTimeMillis();
        while (true) {
            Event event = channel.take();
            if (event != null) {
                events.add(event);
            }
            if (events.size() == batchSize || System.currentTimeMillis() - beginTime > maxProcessTime) {
                break;
            }
        }
        if (events.size() == 0) {
            sinkCounter.incrementBatchEmptyCount();
            transaction.rollback();
            return Status.BACKOFF;
        }
        /*
            async send
             */
        CountDownLatch latch = new CountDownLatch(events.size());
        AtomicInteger errorNum = new AtomicInteger();
        for (Event event : events) {
            byte[] body = event.getBody();
            Message message = new Message(topic, tag, body);
            if (log.isDebugEnabled()) {
                log.debug("Processing event,body={}", new String(body, "UTF-8"));
            }
            producer.send(message, new SendCallBackHandler(message, latch, errorNum));
        }
        latch.await();
        sinkCounter.addToEventDrainAttemptCount(events.size());
        if (errorNum.get() > 0) {
            log.error("errorNum=" + errorNum + ",transaction will rollback");
            transaction.rollback();
            return Status.BACKOFF;
        } else {
            transaction.commit();
            sinkCounter.addToEventDrainSuccessCount(events.size());
            return Status.READY;
        }
    } catch (Exception e) {
        log.error("Failed to processing event", e);
        if (transaction != null) {
            try {
                transaction.rollback();
            } catch (Exception ex) {
                log.error("Failed to rollback transaction", ex);
                throw new EventDeliveryException("Failed to rollback transaction", ex);
            }
        }
        return Status.BACKOFF;
    } finally {
        if (transaction != null) {
            transaction.close();
        }
    }
}
Also used : Message(org.apache.rocketmq.common.message.Message) Channel(org.apache.flume.Channel) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) MQClientException(org.apache.rocketmq.client.exception.MQClientException) EventDeliveryException(org.apache.flume.EventDeliveryException) ConfigurationException(org.apache.flume.conf.ConfigurationException) FlumeException(org.apache.flume.FlumeException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) Transaction(org.apache.flume.Transaction) EventDeliveryException(org.apache.flume.EventDeliveryException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) Event(org.apache.flume.Event)

Example 40 with Transaction

use of org.apache.flume.Transaction in project rocketmq-externals by apache.

the class RocketMQSinkTest method testEvent.

@Test
public void testEvent() throws MQClientException, InterruptedException, EventDeliveryException, RemotingException, MQBrokerException, UnsupportedEncodingException {
    /*
        start sink
         */
    Context context = new Context();
    context.put(NAME_SERVER_CONFIG, nameServer);
    context.put(TAG_CONFIG, tag);
    RocketMQSink sink = new RocketMQSink();
    Configurables.configure(sink, context);
    MemoryChannel channel = new MemoryChannel();
    Configurables.configure(channel, context);
    sink.setChannel(channel);
    sink.start();
    /*
        mock flume source
         */
    String sendMsg = "\"Hello RocketMQ\"" + "," + DateFormatUtils.format(new Date(), "yyyy-MM-DD hh:mm:ss");
    Transaction tx = channel.getTransaction();
    tx.begin();
    Event event = EventBuilder.withBody(sendMsg.getBytes(), null);
    channel.put(event);
    tx.commit();
    tx.close();
    log.info("publish message : {}", sendMsg);
    Sink.Status status = sink.process();
    if (status == Sink.Status.BACKOFF) {
        fail("Error");
    }
    sink.stop();
    /*
        consumer message
         */
    consumer = new DefaultMQPullConsumer(consumerGroup);
    consumer.setNamesrvAddr(nameServer);
    consumer.setMessageModel(MessageModel.valueOf("BROADCASTING"));
    consumer.registerMessageQueueListener(TOPIC_DEFAULT, null);
    consumer.start();
    String receiveMsg = null;
    Set<MessageQueue> queues = consumer.fetchSubscribeMessageQueues(TOPIC_DEFAULT);
    for (MessageQueue queue : queues) {
        long offset = getMessageQueueOffset(queue);
        PullResult pullResult = consumer.pull(queue, tag, offset, 32);
        if (pullResult.getPullStatus() == PullStatus.FOUND) {
            for (MessageExt message : pullResult.getMsgFoundList()) {
                byte[] body = message.getBody();
                receiveMsg = new String(body, "UTF-8");
                log.info("receive message : {}", receiveMsg);
            }
            long nextBeginOffset = pullResult.getNextBeginOffset();
            putMessageQueueOffset(queue, nextBeginOffset);
        }
    }
    /*
        wait for processQueueTable init
         */
    Thread.sleep(1000);
    consumer.shutdown();
    assertEquals(sendMsg, receiveMsg);
}
Also used : Context(org.apache.flume.Context) MemoryChannel(org.apache.flume.channel.MemoryChannel) Date(java.util.Date) DefaultMQPullConsumer(org.apache.rocketmq.client.consumer.DefaultMQPullConsumer) PullResult(org.apache.rocketmq.client.consumer.PullResult) MessageExt(org.apache.rocketmq.common.message.MessageExt) Transaction(org.apache.flume.Transaction) Sink(org.apache.flume.Sink) MessageQueue(org.apache.rocketmq.common.message.MessageQueue) Event(org.apache.flume.Event) Test(org.junit.Test)

Aggregations

Transaction (org.apache.flume.Transaction)41 Event (org.apache.flume.Event)38 Test (org.junit.Test)34 Channel (org.apache.flume.Channel)29 MemoryChannel (org.apache.flume.channel.MemoryChannel)26 PhoenixSink (org.apache.phoenix.flume.sink.PhoenixSink)22 Context (org.apache.flume.Context)11 EventDeliveryException (org.apache.flume.EventDeliveryException)9 Connection (java.sql.Connection)5 ResultSet (java.sql.ResultSet)5 Properties (java.util.Properties)5 SQLException (java.sql.SQLException)4 ChannelException (org.apache.flume.ChannelException)4 ArrayList (java.util.ArrayList)3 Date (java.util.Date)3 Sink (org.apache.flume.Sink)3 IOException (java.io.IOException)2 UnsupportedEncodingException (java.io.UnsupportedEncodingException)2 CountDownLatch (java.util.concurrent.CountDownLatch)2 FlumeException (org.apache.flume.FlumeException)2