use of org.apache.flume.Transaction in project ignite by apache.
the class IgniteSink method process.
/**
* Processes Flume events.
*/
@Override
public Status process() throws EventDeliveryException {
Channel channel = getChannel();
Transaction transaction = channel.getTransaction();
int eventCount = 0;
try {
transaction.begin();
List<Event> batch = new ArrayList<>(batchSize);
for (; eventCount < batchSize; ++eventCount) {
Event event = channel.take();
if (event == null) {
break;
}
batch.add(event);
}
if (!batch.isEmpty()) {
ignite.cache(cacheName).putAll(eventTransformer.transform(batch));
if (batch.size() < batchSize)
sinkCounter.incrementBatchUnderflowCount();
else
sinkCounter.incrementBatchCompleteCount();
} else {
sinkCounter.incrementBatchEmptyCount();
}
sinkCounter.addToEventDrainAttemptCount(batch.size());
transaction.commit();
sinkCounter.addToEventDrainSuccessCount(batch.size());
} catch (Exception e) {
log.error("Failed to process events", e);
try {
transaction.rollback();
} catch (Throwable e1) {
e.addSuppressed(e1);
}
throw new EventDeliveryException(e);
} finally {
transaction.close();
}
return eventCount == 0 ? Status.BACKOFF : Status.READY;
}
use of org.apache.flume.Transaction in project ignite by apache.
the class IgniteSinkTest method testSink.
/**
* @throws Exception {@link Exception}.
*/
public void testSink() throws Exception {
IgniteConfiguration cfg = loadConfiguration("modules/flume/src/test/resources/example-ignite.xml");
cfg.setClientMode(false);
final Ignite grid = startGrid("igniteServerNode", cfg);
Context channelContext = new Context();
channelContext.put("capacity", String.valueOf(EVENT_CNT));
channelContext.put("transactionCapacity", String.valueOf(EVENT_CNT));
Channel memoryChannel = new MemoryChannel();
Configurables.configure(memoryChannel, channelContext);
final CountDownLatch latch = new CountDownLatch(EVENT_CNT);
final IgnitePredicate<Event> putLsnr = new IgnitePredicate<Event>() {
@Override
public boolean apply(Event evt) {
assert evt != null;
latch.countDown();
return true;
}
};
IgniteSink sink = new IgniteSink() {
// Setting the listener on cache before sink processing starts.
@Override
public synchronized void start() {
super.start();
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).localListen(putLsnr, EVT_CACHE_OBJECT_PUT);
}
};
sink.setName("IgniteSink");
sink.setChannel(memoryChannel);
Context ctx = new Context();
ctx.put(IgniteSinkConstants.CFG_CACHE_NAME, CACHE_NAME);
ctx.put(IgniteSinkConstants.CFG_PATH, "example-ignite.xml");
ctx.put(IgniteSinkConstants.CFG_EVENT_TRANSFORMER, "org.apache.ignite.stream.flume.TestEventTransformer");
Configurables.configure(sink, ctx);
sink.start();
try {
Transaction tx = memoryChannel.getTransaction();
tx.begin();
for (int i = 0; i < EVENT_CNT; i++) memoryChannel.put(EventBuilder.withBody((String.valueOf(i) + ": " + i).getBytes()));
tx.commit();
tx.close();
Sink.Status status = Sink.Status.READY;
while (status != Sink.Status.BACKOFF) {
status = sink.process();
}
} finally {
sink.stop();
}
// Checks that 10000 events successfully processed in 10 seconds.
assertTrue(latch.await(10, TimeUnit.SECONDS));
grid.events(grid.cluster().forCacheNodes(CACHE_NAME)).stopLocalListen(putLsnr);
IgniteCache<String, Integer> cache = grid.cache(CACHE_NAME);
// Checks that each event was processed properly.
for (int i = 0; i < EVENT_CNT; i++) {
assertEquals(i, (int) cache.get(String.valueOf(i)));
}
assertEquals(EVENT_CNT, cache.size(CachePeekMode.PRIMARY));
}
use of org.apache.flume.Transaction in project apex-malhar by apache.
the class FlumeSink method process.
/* Begin implementing Flume Sink interface */
@Override
@SuppressWarnings({ "BroadCatchBlock", "TooBroadCatch", "UseSpecificCatch", "SleepWhileInLoop" })
public Status process() throws EventDeliveryException {
Slice slice;
synchronized (server.requests) {
for (Request r : server.requests) {
logger.debug("found {}", r);
switch(r.type) {
case SEEK:
lastCommitEventTimeMillis = System.currentTimeMillis();
slice = r.getAddress();
playback = storage.retrieve(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
client = r.client;
break;
case COMMITTED:
lastCommitEventTimeMillis = System.currentTimeMillis();
slice = r.getAddress();
storage.clean(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
break;
case CONNECTED:
logger.debug("Connected received, ignoring it!");
break;
case DISCONNECTED:
if (r.client == client) {
client = null;
outstandingEventsCount = 0;
}
break;
case WINDOWED:
lastConsumedEventsCount = r.getEventCount();
idleCount = r.getIdleCount();
outstandingEventsCount -= lastConsumedEventsCount;
break;
case SERVER_ERROR:
throw new IOError(null);
default:
logger.debug("Cannot understand the request {}", r);
break;
}
}
server.requests.clear();
}
if (client == null) {
logger.info("No client expressed interest yet to consume the events.");
return Status.BACKOFF;
} else if (System.currentTimeMillis() - lastCommitEventTimeMillis > commitEventTimeoutMillis) {
logger.info("Client has not processed the workload given for the last {} milliseconds, so backing off.", System.currentTimeMillis() - lastCommitEventTimeMillis);
return Status.BACKOFF;
}
int maxTuples;
// the following logic needs to be fixed... this is a quick put together.
if (outstandingEventsCount < 0) {
if (idleCount > 1) {
maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
} else {
maxTuples = (int) ((1 + throughputAdjustmentFactor) * lastConsumedEventsCount);
}
} else if (outstandingEventsCount > lastConsumedEventsCount) {
maxTuples = (int) ((1 - throughputAdjustmentFactor) * lastConsumedEventsCount);
} else {
if (idleCount > 0) {
maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
if (maxTuples <= 0) {
maxTuples = minimumEventsPerTransaction;
}
} else {
maxTuples = lastConsumedEventsCount;
}
}
if (maxTuples >= maximumEventsPerTransaction) {
maxTuples = maximumEventsPerTransaction;
} else if (maxTuples <= 0) {
maxTuples = minimumEventsPerTransaction;
}
if (maxTuples > 0) {
if (playback != null) {
try {
int i = 0;
do {
if (!client.write(playback)) {
retryWrite(playback, null);
}
outstandingEventsCount++;
playback = storage.retrieveNext();
} while (++i < maxTuples && playback != null);
} catch (Exception ex) {
logger.warn("Playback Failed", ex);
if (ex instanceof NetletThrowable) {
try {
eventloop.disconnect(client);
} finally {
client = null;
outstandingEventsCount = 0;
}
}
return Status.BACKOFF;
}
} else {
int storedTuples = 0;
Transaction t = getChannel().getTransaction();
try {
t.begin();
Event e;
while (storedTuples < maxTuples && (e = getChannel().take()) != null) {
Slice event = codec.toByteArray(e);
byte[] address = storage.store(event);
if (address != null) {
if (!client.write(address, event)) {
retryWrite(address, event);
}
outstandingEventsCount++;
} else {
logger.debug("Detected the condition of recovery from flume crash!");
}
storedTuples++;
}
if (storedTuples > 0) {
storage.flush();
}
t.commit();
if (storedTuples > 0) {
/* log less frequently */
logger.debug("Transaction details maxTuples = {}, storedTuples = {}, outstanding = {}", maxTuples, storedTuples, outstandingEventsCount);
}
} catch (Error er) {
t.rollback();
throw er;
} catch (Exception ex) {
logger.error("Transaction Failed", ex);
if (ex instanceof NetletRuntimeException && client != null) {
try {
eventloop.disconnect(client);
} finally {
client = null;
outstandingEventsCount = 0;
}
}
t.rollback();
return Status.BACKOFF;
} finally {
t.close();
}
if (storedTuples == 0) {
sleep();
}
}
}
return Status.READY;
}
use of org.apache.flume.Transaction in project rocketmq-externals by apache.
the class RocketMQSink method process.
@Override
public Status process() throws EventDeliveryException {
Channel channel = getChannel();
Transaction transaction = null;
try {
transaction = channel.getTransaction();
transaction.begin();
/*
batch take
*/
List<Event> events = new ArrayList<>();
long beginTime = System.currentTimeMillis();
while (true) {
Event event = channel.take();
if (event != null) {
events.add(event);
}
if (events.size() == batchSize || System.currentTimeMillis() - beginTime > maxProcessTime) {
break;
}
}
if (events.size() == 0) {
sinkCounter.incrementBatchEmptyCount();
transaction.rollback();
return Status.BACKOFF;
}
/*
async send
*/
CountDownLatch latch = new CountDownLatch(events.size());
AtomicInteger errorNum = new AtomicInteger();
for (Event event : events) {
byte[] body = event.getBody();
Message message = new Message(topic, tag, body);
if (log.isDebugEnabled()) {
log.debug("Processing event,body={}", new String(body, "UTF-8"));
}
producer.send(message, new SendCallBackHandler(message, latch, errorNum));
}
latch.await();
sinkCounter.addToEventDrainAttemptCount(events.size());
if (errorNum.get() > 0) {
log.error("errorNum=" + errorNum + ",transaction will rollback");
transaction.rollback();
return Status.BACKOFF;
} else {
transaction.commit();
sinkCounter.addToEventDrainSuccessCount(events.size());
return Status.READY;
}
} catch (Exception e) {
log.error("Failed to processing event", e);
if (transaction != null) {
try {
transaction.rollback();
} catch (Exception ex) {
log.error("Failed to rollback transaction", ex);
throw new EventDeliveryException("Failed to rollback transaction", ex);
}
}
return Status.BACKOFF;
} finally {
if (transaction != null) {
transaction.close();
}
}
}
use of org.apache.flume.Transaction in project rocketmq-externals by apache.
the class RocketMQSinkTest method testEvent.
@Test
public void testEvent() throws MQClientException, InterruptedException, EventDeliveryException, RemotingException, MQBrokerException, UnsupportedEncodingException {
/*
start sink
*/
Context context = new Context();
context.put(NAME_SERVER_CONFIG, nameServer);
context.put(TAG_CONFIG, tag);
RocketMQSink sink = new RocketMQSink();
Configurables.configure(sink, context);
MemoryChannel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
/*
mock flume source
*/
String sendMsg = "\"Hello RocketMQ\"" + "," + DateFormatUtils.format(new Date(), "yyyy-MM-DD hh:mm:ss");
Transaction tx = channel.getTransaction();
tx.begin();
Event event = EventBuilder.withBody(sendMsg.getBytes(), null);
channel.put(event);
tx.commit();
tx.close();
log.info("publish message : {}", sendMsg);
Sink.Status status = sink.process();
if (status == Sink.Status.BACKOFF) {
fail("Error");
}
sink.stop();
/*
consumer message
*/
consumer = new DefaultMQPullConsumer(consumerGroup);
consumer.setNamesrvAddr(nameServer);
consumer.setMessageModel(MessageModel.valueOf("BROADCASTING"));
consumer.registerMessageQueueListener(TOPIC_DEFAULT, null);
consumer.start();
String receiveMsg = null;
Set<MessageQueue> queues = consumer.fetchSubscribeMessageQueues(TOPIC_DEFAULT);
for (MessageQueue queue : queues) {
long offset = getMessageQueueOffset(queue);
PullResult pullResult = consumer.pull(queue, tag, offset, 32);
if (pullResult.getPullStatus() == PullStatus.FOUND) {
for (MessageExt message : pullResult.getMsgFoundList()) {
byte[] body = message.getBody();
receiveMsg = new String(body, "UTF-8");
log.info("receive message : {}", receiveMsg);
}
long nextBeginOffset = pullResult.getNextBeginOffset();
putMessageQueueOffset(queue, nextBeginOffset);
}
}
/*
wait for processQueueTable init
*/
Thread.sleep(1000);
consumer.shutdown();
assertEquals(sendMsg, receiveMsg);
}
Aggregations