use of com.datatorrent.netlet.NetletThrowable in project apex-malhar by apache.
the class FlumeSink method process.
/* Begin implementing Flume Sink interface */
@Override
@SuppressWarnings({ "BroadCatchBlock", "TooBroadCatch", "UseSpecificCatch", "SleepWhileInLoop" })
public Status process() throws EventDeliveryException {
Slice slice;
synchronized (server.requests) {
for (Request r : server.requests) {
logger.debug("found {}", r);
switch(r.type) {
case SEEK:
lastCommitEventTimeMillis = System.currentTimeMillis();
slice = r.getAddress();
playback = storage.retrieve(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
client = r.client;
break;
case COMMITTED:
lastCommitEventTimeMillis = System.currentTimeMillis();
slice = r.getAddress();
storage.clean(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
break;
case CONNECTED:
logger.debug("Connected received, ignoring it!");
break;
case DISCONNECTED:
if (r.client == client) {
client = null;
outstandingEventsCount = 0;
}
break;
case WINDOWED:
lastConsumedEventsCount = r.getEventCount();
idleCount = r.getIdleCount();
outstandingEventsCount -= lastConsumedEventsCount;
break;
case SERVER_ERROR:
throw new IOError(null);
default:
logger.debug("Cannot understand the request {}", r);
break;
}
}
server.requests.clear();
}
if (client == null) {
logger.info("No client expressed interest yet to consume the events.");
return Status.BACKOFF;
} else if (System.currentTimeMillis() - lastCommitEventTimeMillis > commitEventTimeoutMillis) {
logger.info("Client has not processed the workload given for the last {} milliseconds, so backing off.", System.currentTimeMillis() - lastCommitEventTimeMillis);
return Status.BACKOFF;
}
int maxTuples;
// the following logic needs to be fixed... this is a quick put together.
if (outstandingEventsCount < 0) {
if (idleCount > 1) {
maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
} else {
maxTuples = (int) ((1 + throughputAdjustmentFactor) * lastConsumedEventsCount);
}
} else if (outstandingEventsCount > lastConsumedEventsCount) {
maxTuples = (int) ((1 - throughputAdjustmentFactor) * lastConsumedEventsCount);
} else {
if (idleCount > 0) {
maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
if (maxTuples <= 0) {
maxTuples = minimumEventsPerTransaction;
}
} else {
maxTuples = lastConsumedEventsCount;
}
}
if (maxTuples >= maximumEventsPerTransaction) {
maxTuples = maximumEventsPerTransaction;
} else if (maxTuples <= 0) {
maxTuples = minimumEventsPerTransaction;
}
if (maxTuples > 0) {
if (playback != null) {
try {
int i = 0;
do {
if (!client.write(playback)) {
retryWrite(playback, null);
}
outstandingEventsCount++;
playback = storage.retrieveNext();
} while (++i < maxTuples && playback != null);
} catch (Exception ex) {
logger.warn("Playback Failed", ex);
if (ex instanceof NetletThrowable) {
try {
eventloop.disconnect(client);
} finally {
client = null;
outstandingEventsCount = 0;
}
}
return Status.BACKOFF;
}
} else {
int storedTuples = 0;
Transaction t = getChannel().getTransaction();
try {
t.begin();
Event e;
while (storedTuples < maxTuples && (e = getChannel().take()) != null) {
Slice event = codec.toByteArray(e);
byte[] address = storage.store(event);
if (address != null) {
if (!client.write(address, event)) {
retryWrite(address, event);
}
outstandingEventsCount++;
} else {
logger.debug("Detected the condition of recovery from flume crash!");
}
storedTuples++;
}
if (storedTuples > 0) {
storage.flush();
}
t.commit();
if (storedTuples > 0) {
/* log less frequently */
logger.debug("Transaction details maxTuples = {}, storedTuples = {}, outstanding = {}", maxTuples, storedTuples, outstandingEventsCount);
}
} catch (Error er) {
t.rollback();
throw er;
} catch (Exception ex) {
logger.error("Transaction Failed", ex);
if (ex instanceof NetletRuntimeException && client != null) {
try {
eventloop.disconnect(client);
} finally {
client = null;
outstandingEventsCount = 0;
}
}
t.rollback();
return Status.BACKOFF;
} finally {
t.close();
}
if (storedTuples == 0) {
sleep();
}
}
}
return Status.READY;
}
Aggregations