use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class AbstractFlumeInputOperator method emitTuples.
@Override
public void emitTuples() {
int i = handoverBuffer.size();
if (i > 0 && eventCounter < maxEventsPerWindow) {
while (--i > 0 && eventCounter < maxEventsPerWindow - 1) {
final Slice slice = handoverBuffer.poll();
slice.offset += 8;
slice.length -= 8;
T convert = convert((Event) codec.fromByteArray(slice));
if (convert == null) {
drop.emit(slice);
} else {
output.emit(convert);
}
eventCounter++;
}
final Slice slice = handoverBuffer.poll();
slice.offset += 8;
slice.length -= 8;
T convert = convert((Event) codec.fromByteArray(slice));
if (convert == null) {
drop.emit(slice);
} else {
output.emit(convert);
}
eventCounter++;
address = Arrays.copyOfRange(slice.buffer, slice.offset - 8, slice.offset);
}
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class FlumeSink method configure.
/* End implementing Flume Sink interface */
/* Begin Configurable Interface */
@Override
public void configure(Context context) {
hostname = context.getString(HOSTNAME_STRING, HOSTNAME_DEFAULT);
port = context.getInteger("port", 0);
id = context.getString("id");
if (id == null) {
id = getName();
}
acceptedTolerance = context.getLong("acceptedTolerance", ACCEPTED_TOLERANCE);
sleepMillis = context.getLong("sleepMillis", 5L);
throughputAdjustmentFactor = context.getInteger("throughputAdjustmentPercent", 5) / 100.0;
maximumEventsPerTransaction = context.getInteger("maximumEventsPerTransaction", 10000);
minimumEventsPerTransaction = context.getInteger("minimumEventsPerTransaction", 100);
commitEventTimeoutMillis = context.getLong("commitEventTimeoutMillis", Long.MAX_VALUE);
@SuppressWarnings("unchecked") Discovery<byte[]> ldiscovery = configure("discovery", Discovery.class, context);
if (ldiscovery == null) {
logger.warn("Discovery agent not configured for the sink!");
discovery = new Discovery<byte[]>() {
@Override
public void unadvertise(Service<byte[]> service) {
logger.debug("Sink {} stopped listening on {}:{}", service.getId(), service.getHost(), service.getPort());
}
@Override
public void advertise(Service<byte[]> service) {
logger.debug("Sink {} started listening on {}:{}", service.getId(), service.getHost(), service.getPort());
}
@Override
@SuppressWarnings("unchecked")
public Collection<Service<byte[]>> discover() {
return Collections.EMPTY_SET;
}
};
} else {
discovery = ldiscovery;
}
storage = configure("storage", Storage.class, context);
if (storage == null) {
logger.warn("storage key missing... FlumeSink may lose data!");
storage = new Storage() {
@Override
public byte[] store(Slice slice) {
return null;
}
@Override
public byte[] retrieve(byte[] identifier) {
return null;
}
@Override
public byte[] retrieveNext() {
return null;
}
@Override
public void clean(byte[] identifier) {
}
@Override
public void flush() {
}
};
}
@SuppressWarnings("unchecked") StreamCodec<Event> lCodec = configure("codec", StreamCodec.class, context);
if (lCodec == null) {
codec = new EventCodec();
} else {
codec = lCodec;
}
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class EventCodec method toByteArray.
@Override
public Slice toByteArray(Event event) {
ByteArrayOutputStream os = new ByteArrayOutputStream();
Output output = new Output(os);
Map<String, String> headers = event.getHeaders();
if (headers != null && headers.getClass() != HashMap.class) {
HashMap<String, String> tmp = new HashMap<String, String>(headers.size());
tmp.putAll(headers);
headers = tmp;
}
kryo.writeObjectOrNull(output, headers, HashMap.class);
kryo.writeObjectOrNull(output, event.getBody(), byte[].class);
output.flush();
final byte[] bytes = os.toByteArray();
return new Slice(bytes, 0, bytes.length);
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class FlumeSink method process.
/* Begin implementing Flume Sink interface */
@Override
@SuppressWarnings({ "BroadCatchBlock", "TooBroadCatch", "UseSpecificCatch", "SleepWhileInLoop" })
public Status process() throws EventDeliveryException {
Slice slice;
synchronized (server.requests) {
for (Request r : server.requests) {
logger.debug("found {}", r);
switch(r.type) {
case SEEK:
lastCommitEventTimeMillis = System.currentTimeMillis();
slice = r.getAddress();
playback = storage.retrieve(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
client = r.client;
break;
case COMMITTED:
lastCommitEventTimeMillis = System.currentTimeMillis();
slice = r.getAddress();
storage.clean(Arrays.copyOfRange(slice.buffer, slice.offset, slice.offset + slice.length));
break;
case CONNECTED:
logger.debug("Connected received, ignoring it!");
break;
case DISCONNECTED:
if (r.client == client) {
client = null;
outstandingEventsCount = 0;
}
break;
case WINDOWED:
lastConsumedEventsCount = r.getEventCount();
idleCount = r.getIdleCount();
outstandingEventsCount -= lastConsumedEventsCount;
break;
case SERVER_ERROR:
throw new IOError(null);
default:
logger.debug("Cannot understand the request {}", r);
break;
}
}
server.requests.clear();
}
if (client == null) {
logger.info("No client expressed interest yet to consume the events.");
return Status.BACKOFF;
} else if (System.currentTimeMillis() - lastCommitEventTimeMillis > commitEventTimeoutMillis) {
logger.info("Client has not processed the workload given for the last {} milliseconds, so backing off.", System.currentTimeMillis() - lastCommitEventTimeMillis);
return Status.BACKOFF;
}
int maxTuples;
// the following logic needs to be fixed... this is a quick put together.
if (outstandingEventsCount < 0) {
if (idleCount > 1) {
maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
} else {
maxTuples = (int) ((1 + throughputAdjustmentFactor) * lastConsumedEventsCount);
}
} else if (outstandingEventsCount > lastConsumedEventsCount) {
maxTuples = (int) ((1 - throughputAdjustmentFactor) * lastConsumedEventsCount);
} else {
if (idleCount > 0) {
maxTuples = (int) ((1 + throughputAdjustmentFactor * idleCount) * lastConsumedEventsCount);
if (maxTuples <= 0) {
maxTuples = minimumEventsPerTransaction;
}
} else {
maxTuples = lastConsumedEventsCount;
}
}
if (maxTuples >= maximumEventsPerTransaction) {
maxTuples = maximumEventsPerTransaction;
} else if (maxTuples <= 0) {
maxTuples = minimumEventsPerTransaction;
}
if (maxTuples > 0) {
if (playback != null) {
try {
int i = 0;
do {
if (!client.write(playback)) {
retryWrite(playback, null);
}
outstandingEventsCount++;
playback = storage.retrieveNext();
} while (++i < maxTuples && playback != null);
} catch (Exception ex) {
logger.warn("Playback Failed", ex);
if (ex instanceof NetletThrowable) {
try {
eventloop.disconnect(client);
} finally {
client = null;
outstandingEventsCount = 0;
}
}
return Status.BACKOFF;
}
} else {
int storedTuples = 0;
Transaction t = getChannel().getTransaction();
try {
t.begin();
Event e;
while (storedTuples < maxTuples && (e = getChannel().take()) != null) {
Slice event = codec.toByteArray(e);
byte[] address = storage.store(event);
if (address != null) {
if (!client.write(address, event)) {
retryWrite(address, event);
}
outstandingEventsCount++;
} else {
logger.debug("Detected the condition of recovery from flume crash!");
}
storedTuples++;
}
if (storedTuples > 0) {
storage.flush();
}
t.commit();
if (storedTuples > 0) {
/* log less frequently */
logger.debug("Transaction details maxTuples = {}, storedTuples = {}, outstanding = {}", maxTuples, storedTuples, outstandingEventsCount);
}
} catch (Error er) {
t.rollback();
throw er;
} catch (Exception ex) {
logger.error("Transaction Failed", ex);
if (ex instanceof NetletRuntimeException && client != null) {
try {
eventloop.disconnect(client);
} finally {
client = null;
outstandingEventsCount = 0;
}
}
t.rollback();
return Status.BACKOFF;
} finally {
t.close();
}
if (storedTuples == 0) {
sleep();
}
}
}
return Status.READY;
}
use of com.datatorrent.netlet.util.Slice in project apex-malhar by apache.
the class RawEvent method from.
public static RawEvent from(byte[] row, byte separator) {
final int rowsize = row.length;
/*
* Lets get the guid out of the current record
*/
int sliceLengh = -1;
while (++sliceLengh < rowsize) {
if (row[sliceLengh] == separator) {
break;
}
}
int i = sliceLengh + 1;
/* lets parse the date */
int dateStart = i;
while (i < rowsize) {
if (row[i++] == separator) {
long time = DATE_PARSER.parseMillis(new String(row, dateStart, i - dateStart - 1));
RawEvent event = new RawEvent();
event.guid = new Slice(row, 0, sliceLengh);
event.time = time;
event.dimensionsOffset = i;
return event;
}
}
return null;
}
Aggregations