use of com.linkedin.databus.core.DbusEventBuffer.DbusEventIterator in project databus by linkedin.
the class EventLogReader method read.
public Checkpoint read() {
Checkpoint checkPoint = new Checkpoint();
checkPoint.setFlexible();
if (_enabled) {
// ArrayList<InternalDatabusEventsListener> eventListeners = new ArrayList<InternalDatabusEventsListener>();
_eventBuffer.addInternalListener(checkPoint);
_eventBuffer.addInternalListener(this);
if (_filesToRead != null) {
for (File f : _filesToRead) {
FileChannel readChannel = null;
try {
readChannel = new FileInputStream(f).getChannel();
} catch (FileNotFoundException e) {
throw new RuntimeException(e);
}
int numEvents = 0;
try {
numEvents = _eventBuffer.readEvents(readChannel, getEncoding(f));
} catch (InvalidEventException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
LOG.info("Read " + numEvents + " events");
}
_eventBuffer.removeInternalListener(checkPoint);
_eventBuffer.removeInternalListener(this);
LOG.info("Checkpoint = " + checkPoint);
if (_eventSeen) {
DbusEventIterator iter = _eventBuffer.acquireIterator("EventLogReader:firstEvent");
try {
DbusEvent event = iter.next();
String firstEventContent = event.toString();
// if we didn't wrap the buffer, and the first event is not an eop marker, delete the first window
if (_firstEventContent.equalsIgnoreCase(firstEventContent) && !event.isEndOfPeriodMarker()) {
long result = _eventBuffer.deleteFirstWindow();
if (result < 0) {
throw new RuntimeException("eventBuffer could not delete first window");
}
}
} finally {
_eventBuffer.releaseIterator(iter);
}
if (_lastEopOffset >= 0) {
iter = _eventBuffer.new DbusEventIterator(this._lastEopOffset, _eventBuffer.getTail(), "EventLogReader:lastEOP");
try {
DbusEvent event = iter.next();
if (!event.isEndOfPeriodMarker()) {
throw new RuntimeException("Tried reading end of period marker, but failed");
}
if (iter.hasNext()) {
_eventBuffer.setTail(iter.getCurrentPosition());
}
} catch (NoSuchElementException e) {
LOG.error("NoSuchElementException at : " + _eventBuffer.getBufferPositionParser().toString(_lastEopOffset));
throw e;
} finally {
_eventBuffer.releaseIterator(iter);
}
}
}
}
}
return checkPoint;
}
use of com.linkedin.databus.core.DbusEventBuffer.DbusEventIterator in project databus by linkedin.
the class ReadEventsTestParams method testInternalIteratorSingleBufFull.
/**
* Verify that Iterator's CP never gets ahead of Iterator's tail, even at
* the end of the buffer (client, NO-OVERWITE policy).
*/
@Test
public void testInternalIteratorSingleBufFull() throws Exception {
final Logger log = Logger.getLogger("TestDbusEventBufferIterator.testInternalIteratorSingleBufFull");
//log.setLevel(Level.DEBUG);
log.info("starting");
final DbusEventBuffer dbusBuf = new DbusEventBuffer(TestDbusEventBuffer.getConfig(845, 100000, 256, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.BLOCK_ON_WRITE, AssertLevel.NONE));
dbusBuf.start(0);
log.info("append a full buffer");
DbusEventGenerator generator = new DbusEventGenerator(10);
final Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(6, 2, 120, 38, events);
log.debug(dbusBuf.toShortString());
dbusBuf.assertBuffersLimits();
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null, 1.0, false, -1);
appender.run();
log.info("verify new iterator");
DbusEventIterator iter1 = dbusBuf.acquireIterator("testInternalIteratorSingleBufFull");
log.debug("it1=" + iter1);
Assert.assertEquals(iter1.getCurrentPosition(), dbusBuf.getHead());
Assert.assertEquals(iter1._iteratorTail.getPosition(), dbusBuf.getTail());
Assert.assertEquals(dbusBuf._busyIteratorPool.size(), 1);
Assert.assertTrue(iter1.hasNext());
DbusEvent e = iter1.next();
Assert.assertTrue(e.isEndOfPeriodMarker());
Assert.assertTrue(iter1.hasNext());
dbusBuf.assertBuffersLimits();
log.info("make sure we can read some events");
readAndCompareIteratorEvents(iter1, events, 0, 6, true, true, true);
log.debug("after read: " + dbusBuf.toShortString());
log.debug(iter1);
log.info("append more windows");
final Vector<DbusEvent> events2 = new Vector<DbusEvent>();
generator = new DbusEventGenerator(200);
generator.generateEvents(2, 1, 120, 39, events2);
appender = new DbusEventAppender(events2, dbusBuf, null, 1.0, false, -1);
appender.run();
log.debug("after 2 more events added: " + dbusBuf.toShortString());
log.debug(iter1);
readAndCompareIteratorEvents(iter1, events2, 0, 2, true, false, true);
log.debug("after 2 more events read: " + dbusBuf.toShortString());
log.debug(iter1);
dbusBuf.assertBuffersLimits();
// create another iterator - make sure it can read too
DbusEventIterator iter2 = dbusBuf.acquireIterator("testInternalIteratorSingleBufFull2");
long iCWP = iter2.getCurrentPosition();
long head = dbusBuf.getBufferPositionParser().sanitize(dbusBuf.getHead(), dbusBuf.getBuffer());
Assert.assertEquals(iCWP, head);
Assert.assertEquals(iter2._iteratorTail.getPosition(), dbusBuf.getTail());
Assert.assertEquals(dbusBuf._busyIteratorPool.size(), 2);
Assert.assertTrue(iter2.hasNext());
log.debug("iter2=" + iter2);
// read same events and don't remove
readAndCompareIteratorEvents(iter2, events2, 0, 2, true, false, true);
dbusBuf.releaseIterator(iter2);
dbusBuf.assertBuffersLimits();
log.debug("iter1=" + iter1);
iter1.remove();
log.debug("buf (after read)=" + dbusBuf);
generator = new DbusEventGenerator(300);
final Vector<DbusEvent> events3 = new Vector<DbusEvent>();
generator.generateEvents(4, 2, 120, 39, events3);
appender = new DbusEventAppender(events3, dbusBuf, null, 1.0, false, -1);
appender.run();
dbusBuf.assertBuffersLimits();
log.info("make sure we can read remainder of events");
readAndCompareIteratorEvents(iter1, events3, 0, 4, false, true, true);
dbusBuf.assertBuffersLimits();
Assert.assertTrue(dbusBuf.empty());
dbusBuf.releaseIterator(iter1);
log.info("done");
}
use of com.linkedin.databus.core.DbusEventBuffer.DbusEventIterator in project databus by linkedin.
the class ReadEventsTestParams method testGetStreamedEventsWithRegression.
@Test
public void testGetStreamedEventsWithRegression() throws IOException, InvalidEventException, InvalidConfigException, OffsetNotFoundException {
DbusEventBuffer dbuf = new DbusEventBuffer(getConfig(10000000, DbusEventBuffer.Config.DEFAULT_INDIVIDUAL_BUFFER_SIZE, 100000, 1000000, AllocationPolicy.HEAP_MEMORY, QueuePolicy.OVERWRITE_ON_WRITE, AssertLevel.ALL));
int numEntries = 50000;
int eventWindowSize = 20;
HashMap<Long, KeyValue> testDataMap = new HashMap<Long, KeyValue>(20000);
dbuf.start(0);
for (long i = 1; i < numEntries; i += 20) {
//LOG.info("Iteration:"+i);
DbusEventKey key = new DbusEventKey(RngUtils.randomLong());
String value = RngUtils.randomString(20);
dbuf.startEvents();
for (int j = 0; j < eventWindowSize; ++j) {
assertTrue(dbuf.appendEvent(key, pPartitionId, lPartitionId, timeStamp, srcId, schemaId, value.getBytes(Charset.defaultCharset()), false));
testDataMap.put(i, new KeyValue(key, value));
}
dbuf.endEvents(i);
}
long minDbusEventBufferScn = dbuf.getMinScn();
//TODO (medium) try out corner cases, more batches, etc.
int batchFetchSize = 5000;
Checkpoint cp = new Checkpoint();
cp.setWindowScn(minDbusEventBufferScn);
cp.setWindowOffset(0);
cp.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
WritableByteChannel writeChannel = null;
File directory = new File(".");
File writeFile = File.createTempFile("test", ".dbus", directory);
try {
writeChannel = Utils.openChannel(writeFile, true);
StreamEventsArgs args = new StreamEventsArgs(batchFetchSize);
dbuf.streamEvents(cp, writeChannel, args);
} catch (ScnNotFoundException e) {
}
writeChannel.close();
LOG.debug(writeFile.canRead());
ReadableByteChannel readChannel = Utils.openChannel(writeFile, false);
DbusEventBuffer checkDbusEventBuffer = new DbusEventBuffer(getConfig(50000, DbusEventBuffer.Config.DEFAULT_INDIVIDUAL_BUFFER_SIZE, 100000, 10000, AllocationPolicy.HEAP_MEMORY, QueuePolicy.OVERWRITE_ON_WRITE, AssertLevel.ALL));
int messageSize = 0;
long lastWindowScn = 0;
checkDbusEventBuffer.clear();
checkDbusEventBuffer.readEvents(readChannel);
LOG.debug("Reading events");
DbusEventIterator eventIterator = checkDbusEventBuffer.acquireIterator("check");
DbusEventInternalWritable e = null;
while (eventIterator.hasNext()) {
e = eventIterator.next();
//LOG.info(e.scn()+"," + e.windowScn());
messageSize += e.size();
lastWindowScn = e.sequence();
}
assertTrue(messageSize <= batchFetchSize);
LOG.debug("Reading events 2");
// now we regress
cp.setWindowScn(lastWindowScn - 5);
cp.setWindowOffset(0);
checkDbusEventBuffer.releaseIterator(eventIterator);
LOG.debug("Reading events 3");
writeFile.delete();
writeFile = File.createTempFile("test", ".dbus", directory);
try {
writeChannel = Utils.openChannel(writeFile, true);
StreamEventsArgs args = new StreamEventsArgs(batchFetchSize);
dbuf.streamEvents(cp, writeChannel, args);
} catch (ScnNotFoundException e1) {
LOG.error("mainDbus threw ScnNotFound exception");
}
LOG.debug("mainDbus Read status a = " + dbuf.getReadStatus());
assertEquals(0, dbuf.getReadStatus());
LOG.debug("Reading events 4");
LOG.debug(writeFile.canRead());
readChannel = Utils.openChannel(writeFile, false);
checkDbusEventBuffer.clear();
messageSize = 0;
lastWindowScn = 0;
checkDbusEventBuffer.readEvents(readChannel);
LOG.debug("Reading events 5");
eventIterator = checkDbusEventBuffer.acquireIterator("eventIterator");
LOG.debug("Reading events 6");
while (eventIterator.hasNext()) {
e = eventIterator.next();
//LOG.info(e.scn()+"," + e.windowScn());
//assertEquals(startScn+messageOffset + messageNum, e.scn());
messageSize += e.size();
lastWindowScn = e.sequence();
//LOG.info("Reading events...");
}
assertTrue(messageSize <= batchFetchSize);
LOG.debug("Reading events 7");
checkDbusEventBuffer.releaseIterator(eventIterator);
LOG.debug("mainDbus Read status = " + dbuf.getReadStatus());
writeFile.delete();
}
use of com.linkedin.databus.core.DbusEventBuffer.DbusEventIterator in project databus by linkedin.
the class ReadEventsTestParams method testReadEventOverlap.
@Test
public /*
* This testcase is to recreate the bug where pull thread incorrectly writes to the head of
* the iterator when in BLOCK_ON_WRITE mode. The error was because readEvents() incorrectly
* relies on remaining() to give an accurate value.
*/
void testReadEventOverlap() throws Exception {
/*
* Recreate the head and tail position such that the eventBuffer is in the below state
* --------------------------------------------------------------
* ^ ^ ^ ^
* | | | |
* 0 head tail capacity
*
* The space between tail and capacity is such that with no internal fragmentation, the free space
* will be sufficient to store 2 events but with internal fragmentation, the free space will not be
* enough. In this case, the readEvents should block until an event is removed by the other thread.
*/
final DbusEventBuffer dbusBuf = new DbusEventBuffer(getConfig(1000, 1000, 100, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.BLOCK_ON_WRITE, AssertLevel.NONE));
BufferPositionParser parser = dbusBuf.getBufferPositionParser();
DbusEventGenerator generator = new DbusEventGenerator();
Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(11, 11, 100, 10, events);
// Add events to the EventBuffer
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null);
// running in the same thread
appender.run();
LOG.info("Head:" + dbusBuf.getHead() + ",Tail:" + dbusBuf.getTail());
assertEquals("Head Check", 0, dbusBuf.getHead());
assertEquals("Tail Check", 903, dbusBuf.getTail());
// Remove the first event
DbusEventIterator itr = dbusBuf.acquireIterator("dummy");
assertTrue(itr.hasNext());
DbusEvent event = itr.next();
assertTrue(event.isValid());
itr.remove();
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
assertEquals("Head Check", 61, dbusBuf.getHead());
assertEquals("Tail Check", 903, dbusBuf.getTail());
for (DbusEvent e : events) {
assertTrue("invalid event", e.isValid());
}
// set up the ReadChannel with 2 events
ByteArrayOutputStream oStream = new ByteArrayOutputStream();
WritableByteChannel oChannel = Channels.newChannel(oStream);
for (int i = 0; i < 2; ++i) {
((DbusEventInternalReadable) events.get(i)).writeTo(oChannel, Encoding.BINARY);
}
byte[] writeBytes = oStream.toByteArray();
ByteArrayInputStream iStream = new ByteArrayInputStream(writeBytes);
final ReadableByteChannel rChannel = Channels.newChannel(iStream);
// Create a Thread to call readEvents on the channel
Runnable writer = new Runnable() {
@Override
public void run() {
try {
dbusBuf.readEvents(rChannel);
} catch (InvalidEventException ie) {
ie.printStackTrace();
throw new RuntimeException(ie);
}
}
};
Thread writerThread = new Thread(writer);
writerThread.start();
//Check if the thread is alive (blocked) and head/tail is not overlapped
trySleep(1000);
assertTrue(writerThread.isAlive());
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
assertEquals("Head Check", 61, dbusBuf.getHead());
//GenId set here but tail is not yet overlapped
assertEquals("Tail Check", 2048, dbusBuf.getTail());
//Read the next event to unblock the writer
event = itr.next();
assertTrue(event.isValid());
itr.remove();
try {
writerThread.join(1000);
} catch (InterruptedException ie) {
ie.printStackTrace();
}
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
assertFalse(writerThread.isAlive());
assertEquals("Head Check", 132, dbusBuf.getHead());
assertEquals("Tail Check", 2119, dbusBuf.getTail());
while (itr.hasNext()) {
assertTrue(itr.next().isValid(true));
itr.remove();
}
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
assertEquals("Head Check", dbusBuf.getHead(), dbusBuf.getTail());
}
use of com.linkedin.databus.core.DbusEventBuffer.DbusEventIterator in project databus by linkedin.
the class TestDbusEvent method testAppendToEventBuffer_many.
@Test
public void testAppendToEventBuffer_many() throws Exception {
ByteArrayOutputStream jsonOut = new ByteArrayOutputStream();
ByteBuffer serializationBuffer = ByteBuffer.allocate(1000).order(_eventV1Factory.getByteOrder());
WritableByteChannel jsonOutChannel = Channels.newChannel(jsonOut);
for (int i = 0; i < 10; ++i) {
int savePosition = serializationBuffer.position();
String valueStr = "eventValue" + i;
DbusEventInfo eventInfo = new DbusEventInfo(DbusOpcode.UPSERT, 0L, (short) 0, (short) (i + 3), 4L + i, (short) (5 + i), schemaId, valueStr.getBytes(Charset.defaultCharset()), false, false);
// make this explicit
eventInfo.setEventSerializationVersion(DbusEventFactory.DBUS_EVENT_V1);
DbusEventFactory.serializeEvent(new DbusEventKey(1L + i), serializationBuffer, eventInfo);
DbusEventInternalReadable event = _eventV1Factory.createReadOnlyDbusEventFromBuffer(serializationBuffer, savePosition);
event.writeTo(jsonOutChannel, Encoding.JSON);
}
byte[] jsonBytes = jsonOut.toByteArray();
String jsonString = new String(jsonBytes);
BufferedReader jsonIn = new BufferedReader(new StringReader(jsonString));
DbusEventBuffer eventBuffer1 = new DbusEventBuffer(getConfig(100000, DbusEventBuffer.Config.DEFAULT_INDIVIDUAL_BUFFER_SIZE, 10000, 1000, AllocationPolicy.HEAP_MEMORY, QueuePolicy.OVERWRITE_ON_WRITE));
eventBuffer1.startEvents();
assertEquals("jsonDeserialization", 10, DbusEventSerializable.appendToEventBuffer(jsonIn, eventBuffer1, null, false));
eventBuffer1.endEvents(2);
DbusEventIterator eventIter = eventBuffer1.acquireIterator("it1");
for (int i = 0; i < 10; ++i) {
String valueStr = "eventValue" + i;
assertTrue("buffer has event " + i, eventIter.hasNext());
DbusEvent testEvent = eventIter.next();
assertEquals("key correct for event " + i, 1L + i, testEvent.key());
assertEquals("partitionId correct for event " + i, 3 + i, testEvent.logicalPartitionId());
assertEquals("timestamp correct for event " + i, 4L + i, testEvent.timestampInNanos());
assertEquals("srcId correct for event " + i, 5 + i, testEvent.srcId());
assertEquals("schemaId correct for event " + i, new String(schemaId), new String(testEvent.schemaId()));
assertEquals("value correct for event " + i, valueStr, Utils.byteBufferToString(testEvent.value()));
}
assertTrue("buffer has event", eventIter.hasNext());
DbusEvent testEvent = eventIter.next();
assertTrue("end of window", testEvent.isEndOfPeriodMarker());
}
Aggregations