use of org.apache.flume.EventDeliveryException in project rocketmq-externals by apache.
the class RocketMQSink method process.
@Override
public Status process() throws EventDeliveryException {
Channel channel = getChannel();
Transaction transaction = null;
try {
transaction = channel.getTransaction();
transaction.begin();
/*
batch take
*/
List<Event> events = new ArrayList<>();
long beginTime = System.currentTimeMillis();
while (true) {
Event event = channel.take();
if (event != null) {
events.add(event);
}
if (events.size() == batchSize || System.currentTimeMillis() - beginTime > maxProcessTime) {
break;
}
}
if (events.size() == 0) {
sinkCounter.incrementBatchEmptyCount();
transaction.rollback();
return Status.BACKOFF;
}
/*
async send
*/
CountDownLatch latch = new CountDownLatch(events.size());
AtomicInteger errorNum = new AtomicInteger();
for (Event event : events) {
byte[] body = event.getBody();
Message message = new Message(topic, tag, body);
if (log.isDebugEnabled()) {
log.debug("Processing event,body={}", new String(body, "UTF-8"));
}
producer.send(message, new SendCallBackHandler(message, latch, errorNum));
}
latch.await();
sinkCounter.addToEventDrainAttemptCount(events.size());
if (errorNum.get() > 0) {
log.error("errorNum=" + errorNum + ",transaction will rollback");
transaction.rollback();
return Status.BACKOFF;
} else {
transaction.commit();
sinkCounter.addToEventDrainSuccessCount(events.size());
return Status.READY;
}
} catch (Throwable e) {
log.error("Failed to processing event", e);
if (transaction != null) {
try {
transaction.rollback();
} catch (Throwable ex) {
log.error("Failed to rollback transaction", ex);
throw new EventDeliveryException("Failed to rollback transaction", ex);
}
}
return Status.BACKOFF;
} finally {
if (transaction != null) {
transaction.close();
}
}
}
use of org.apache.flume.EventDeliveryException in project apex-malhar by apache.
the class FlumeSink method process.
/* Begin implementing Flume Sink interface */
@Override
@SuppressWarnings({ "BroadCatchBlock", "TooBroadCatch", "UseSpecificCatch", "SleepWhileInLoop" })
public Status process() throws EventDeliveryException {
Slice slice;
synchronized (server.requests) {
for (Request r : server.requests) {
logger.debug("found {}", r);
switch(r.type) {
case SEEK:
lastCommitEventTimeMillis = System.currentTimeMillis();
slice = r.getAddress();
playback = storage.retrieve(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
client = r.client;
break;
case COMMITTED:
lastCommitEventTimeMillis = System.currentTimeMillis();
slice = r.getAddress();
storage.clean(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
break;
case CONNECTED:
logger.debug("Connected received, ignoring it!");
break;
case DISCONNECTED:
if (r.client == client) {
client = null;
outstandingEventsCount = 0;
}
break;
case WINDOWED:
lastConsumedEventsCount = r.getEventCount();
idleCount = r.getIdleCount();
outstandingEventsCount -= lastConsumedEventsCount;
break;
case SERVER_ERROR:
throw new IOError(null);
default:
logger.debug("Cannot understand the request {}", r);
break;
}
}
server.requests.clear();
}
if (client == null) {
logger.info("No client expressed interest yet to consume the events.");
return Status.BACKOFF;
} else if (System.currentTimeMillis() - lastCommitEventTimeMillis > commitEventTimeoutMillis) {
logger.info("Client has not processed the workload given for the last {} milliseconds, so backing off.", System.currentTimeMillis() - lastCommitEventTimeMillis);
return Status.BACKOFF;
}
int maxTuples;
// the following logic needs to be fixed... this is a quick put together.
if (outstandingEventsCount < 0) {
if (idleCount > 1) {
maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
} else {
maxTuples = (int) ((1 + throughputAdjustmentFactor) * lastConsumedEventsCount);
}
} else if (outstandingEventsCount > lastConsumedEventsCount) {
maxTuples = (int) ((1 - throughputAdjustmentFactor) * lastConsumedEventsCount);
} else {
if (idleCount > 0) {
maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
if (maxTuples <= 0) {
maxTuples = minimumEventsPerTransaction;
}
} else {
maxTuples = lastConsumedEventsCount;
}
}
if (maxTuples >= maximumEventsPerTransaction) {
maxTuples = maximumEventsPerTransaction;
} else if (maxTuples <= 0) {
maxTuples = minimumEventsPerTransaction;
}
if (maxTuples > 0) {
if (playback != null) {
try {
int i = 0;
do {
if (!client.write(playback)) {
retryWrite(playback, null);
}
outstandingEventsCount++;
playback = storage.retrieveNext();
} while (++i < maxTuples && playback != null);
} catch (Exception ex) {
logger.warn("Playback Failed", ex);
if (ex instanceof NetletThrowable) {
try {
eventloop.disconnect(client);
} finally {
client = null;
outstandingEventsCount = 0;
}
}
return Status.BACKOFF;
}
} else {
int storedTuples = 0;
Transaction t = getChannel().getTransaction();
try {
t.begin();
Event e;
while (storedTuples < maxTuples && (e = getChannel().take()) != null) {
Slice event = codec.toByteArray(e);
byte[] address = storage.store(event);
if (address != null) {
if (!client.write(address, event)) {
retryWrite(address, event);
}
outstandingEventsCount++;
} else {
logger.debug("Detected the condition of recovery from flume crash!");
}
storedTuples++;
}
if (storedTuples > 0) {
storage.flush();
}
t.commit();
if (storedTuples > 0) {
/* log less frequently */
logger.debug("Transaction details maxTuples = {}, storedTuples = {}, outstanding = {}", maxTuples, storedTuples, outstandingEventsCount);
}
} catch (Error er) {
t.rollback();
throw er;
} catch (Exception ex) {
logger.error("Transaction Failed", ex);
if (ex instanceof NetletRuntimeException && client != null) {
try {
eventloop.disconnect(client);
} finally {
client = null;
outstandingEventsCount = 0;
}
}
t.rollback();
return Status.BACKOFF;
} finally {
t.close();
}
if (storedTuples == 0) {
sleep();
}
}
}
return Status.READY;
}
Aggregations