use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class ReadEventsTestParams method testHeadDrift.
/**
* TestCase to recreate the bug (DDSDBUS-387) where SCNIndex.head and EVB.Head do not match.
*/
@Test
public void testHeadDrift() throws Exception {
// DbusEventBuffer.LOG.setLevel(Level.DEBUG);
DbusEventBuffer dbusBuf = new DbusEventBuffer(getConfig(10000, 10000, 320, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.OVERWRITE_ON_WRITE, AssertLevel.ALL));
DbusEventGenerator generator = new DbusEventGenerator();
Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(215, 5, 100, 10, events);
// Add events to the EventBuffer
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null);
appender.run();
LOG.info("Dbus Event Buffer is :" + dbusBuf);
LOG.info("SCNIndex is :" + dbusBuf.getScnIndex());
assertEquals("ScnIndex Head Location", 256, dbusBuf.getScnIndex().getHead());
assertEquals("ScnIndex Tail Location", 256, dbusBuf.getScnIndex().getTail());
assertEquals("EVB Head Location", 8381, dbusBuf.getHead());
long oldEVBTail = 40733;
assertEquals("EVB Tail Location", oldEVBTail, dbusBuf.getTail());
dbusBuf.getScnIndex().printVerboseString(LOG, Level.DEBUG);
long lastScn = events.get(events.size() - 1).sequence();
generator = new DbusEventGenerator(lastScn + 1);
events = new Vector<DbusEvent>();
generator.generateEvents(3, 3, 80, 10, events);
appender = new DbusEventAppender(events, dbusBuf, null);
appender.run();
// Ensure SCNINdex tail did not move.
assertEquals("ScnIndex Head Location", 256, dbusBuf.getScnIndex().getHead());
assertEquals("ScnIndex Tail Location", 256, dbusBuf.getScnIndex().getTail());
assertEquals("EVB Head Location", 8381, dbusBuf.getHead());
long newEVBTail = 41068;
assertEquals("EVB Tail Location", newEVBTail, dbusBuf.getTail());
// Make sure the EVB Tail did move. Old EVB Tail belongs to SCNINdex blockNumber 15 and new EVB tail to blockNumber 16
assertEquals("Old EVB Tail's index block", 15, dbusBuf.getScnIndex().getBlockNumber(oldEVBTail));
assertEquals("New EVB Tail's index block", 16, dbusBuf.getScnIndex().getBlockNumber(newEVBTail));
}
use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class ReadEventsTestParams method testAppendEventOverlapNgt0.
// Case when n> 0
@Test
public void testAppendEventOverlapNgt0() throws Exception {
final DbusEventBuffer dbusBuf = new DbusEventBuffer(getConfig(1145, 5000, 100, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.OVERWRITE_ON_WRITE, AssertLevel.ALL));
BufferPositionParser parser = dbusBuf.getBufferPositionParser();
DbusEventGenerator generator = new DbusEventGenerator();
Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(9, 3, 120, 39, events);
// Add events to the EventBuffer. Now the buffer is full
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null);
// running in the same thread
appender.run();
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
long headPos = dbusBuf.getHead();
long tailPos = dbusBuf.getTail();
long scnIndexHead = dbusBuf.getScnIndex().getHead();
long scnIndexTail = dbusBuf.getScnIndex().getTail();
long headGenId = parser.bufferGenId(headPos);
long headIndexId = parser.bufferIndex(headPos);
long headOffset = parser.bufferOffset(headPos);
long tailGenId = parser.bufferGenId(tailPos);
long tailIndexId = parser.bufferIndex(tailPos);
long tailOffset = parser.bufferOffset(tailPos);
assertEquals("Head GenId", 0, headGenId);
assertEquals("Head Index", 0, headIndexId);
assertEquals("Head Offset", 0, headOffset);
assertEquals("Tail GenId", 0, tailGenId);
assertEquals("Tail Index", 0, tailIndexId);
assertEquals("Tail Offset", 1144, tailOffset);
assertEquals("SCNIndex Head", 0, scnIndexHead);
assertEquals("SCNIndex Tail", 80, scnIndexTail);
headPos = parser.setGenId(headPos, 300);
tailPos = parser.setGenId(tailPos, 300);
dbusBuf.setHead(headPos);
dbusBuf.setTail(tailPos);
dbusBuf.recreateIndex();
events = new Vector<DbusEvent>();
generator = new DbusEventGenerator(1000);
/*
* The event size is carefully created such that after adding 2nd
* event CWP and tail points to the same location. Now the 3rd event corrupts the EVB and index (in the presence of bug).
*/
generator.generateEvents(3, 2, 150, 89, events);
appender = new DbusEventAppender(events, dbusBuf, null);
LOG.info("1");
// Logger.getRootLogger().setLevel(Level.ALL);
// running in the same thread
appender.run();
LOG.info("2");
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
headPos = dbusBuf.getHead();
tailPos = dbusBuf.getTail();
headGenId = parser.bufferGenId(headPos);
headIndexId = parser.bufferIndex(headPos);
headOffset = parser.bufferOffset(headPos);
tailGenId = parser.bufferGenId(tailPos);
tailIndexId = parser.bufferIndex(tailPos);
tailOffset = parser.bufferOffset(tailPos);
scnIndexHead = dbusBuf.getScnIndex().getHead();
scnIndexTail = dbusBuf.getScnIndex().getTail();
assertEquals("Head GenId", 300, headGenId);
assertEquals("Head Index", 0, headIndexId);
assertEquals("Head Offset", 783, headOffset);
assertEquals("Tail GenId", 301, tailGenId);
assertEquals("Tail Index", 0, tailIndexId);
assertEquals("Tail Offset", 633, tailOffset);
assertEquals("SCNIndex Head", 64, scnIndexHead);
assertEquals("SCNIndex Tail", 48, scnIndexTail);
}
use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class ReadEventsTestParams method testBufferOverFlow.
/*
* TestCase to recreate the BufferOverFlowException issue tracked in DDS-793
*/
@Test
public void testBufferOverFlow() throws Exception {
// DbusEventBuffer.LOG.setLevel(Level.DEBUG);
final Logger log = Logger.getLogger("TestDbusEventBuffer.testBufferOverflow");
// log.setLevel(Level.INFO);
log.info("starting");
DbusEventBuffer dbusBuf = new DbusEventBuffer(getConfig(1000, 1000, 100, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.BLOCK_ON_WRITE, AssertLevel.ALL));
final DbusEventBuffer dbusBuf2 = new DbusEventBuffer(getConfig(2000, 2000, 100, 1000, AllocationPolicy.HEAP_MEMORY, QueuePolicy.BLOCK_ON_WRITE, AssertLevel.NONE));
BufferPositionParser parser = dbusBuf.getBufferPositionParser();
final BufferPositionParser parser2 = dbusBuf2.getBufferPositionParser();
DbusEventGenerator generator = new DbusEventGenerator();
Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(12, 12, 100, 10, events);
log.info("generate sample events to the EventBuffer");
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null);
appender.run();
log.info("dbusBuf : Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
ByteBuffer[] buf = dbusBuf.getBuffer();
byte[] b = new byte[(int) dbusBuf.getTail()];
buf[0].position(0);
buf[0].get(b);
log.info("copy data to the destination buffer: 1");
ReadableByteChannel rChannel = Channels.newChannel(new ByteArrayInputStream(b));
dbusBuf2.readEvents(rChannel);
log.info("dbusBuf2 : Head:" + parser2.toString(dbusBuf2.getHead()) + ",Tail:" + parser2.toString(dbusBuf2.getTail()));
rChannel.close();
log.info("copy data to the destination buffer: 2");
rChannel = Channels.newChannel(new ByteArrayInputStream(b));
dbusBuf2.readEvents(rChannel);
log.info("dbusBuf2 : Head:" + parser2.toString(dbusBuf2.getHead()) + ",Tail:" + parser2.toString(dbusBuf2.getTail()));
log.info("Buffer Size is :" + dbusBuf2.getBuffer().length);
rChannel.close();
log.info("process data in destination buffer: 1");
DbusEventBuffer.DbusEventIterator itr = dbusBuf2.acquireIterator("dummy1");
for (int i = 0; i < 15; i++) {
itr.next();
itr.remove();
}
itr.close();
log.info("dbusBuf2 : Head:" + parser2.toString(dbusBuf2.getHead()) + ",Tail:" + parser2.toString(dbusBuf2.getTail()));
log.info("copy data to the destination buffer: 3");
rChannel = Channels.newChannel(new ByteArrayInputStream(b));
dbusBuf2.readEvents(rChannel);
ByteBuffer[] buf2 = dbusBuf2.getBuffer();
log.info("dbusBuf2 : Head:" + parser2.toString(dbusBuf2.getHead()) + ",Tail:" + parser2.toString(dbusBuf2.getTail()));
log.info("dbusBuf2 : Buffer :" + buf2[0]);
rChannel.close();
log.info("process data in destination buffer: 2");
itr = dbusBuf2.acquireIterator("dummy2");
for (int i = 0; i < 15; i++) {
itr.next();
itr.remove();
}
itr.close();
log.info("dbusBuf2 : Head:" + parser2.toString(dbusBuf2.getHead()) + ",Tail:" + parser2.toString(dbusBuf2.getTail()));
log.info("dbusBuf2 : Buffer :" + buf2[0]);
log.info("generate more sample events to the EventBuffer");
dbusBuf = new DbusEventBuffer(getConfig(2000, 2000, 100, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.BLOCK_ON_WRITE, AssertLevel.ALL));
events = new Vector<DbusEvent>();
generator.generateEvents(8, 9, 150, 52, events);
log.info("Events Size is :" + events.get(0).size());
appender = new DbusEventAppender(events, dbusBuf, null);
appender.run();
final AtomicBoolean stopReader = new AtomicBoolean(false);
Runnable reader = new Runnable() {
@Override
public void run() {
try {
Thread.sleep(5 * 1000);
} catch (InterruptedException ie) {
}
DbusEventBuffer.DbusEventIterator itr = dbusBuf2.acquireIterator("dummy3");
log.info("Reader iterator:" + itr);
while (!stopReader.get() || itr.hasNext()) {
while (itr.hasNext()) {
itr.next();
itr.remove();
}
itr.await(100, TimeUnit.MILLISECONDS);
}
itr.close();
log.info("Reader Thread: dbusBuf2 : Head:" + parser2.toString(dbusBuf2.getHead()) + ",Tail:" + parser2.toString(dbusBuf2.getTail()));
ByteBuffer[] buf = dbusBuf2.getBuffer();
log.info("Reader Tread : dbusBuf2 : Buffer :" + buf[0]);
log.info("Reader iterator:" + itr);
}
};
log.info("generate sample events to the EventBuffer");
Thread t = new Thread(reader, "BufferOverflowReader");
b = new byte[(int) dbusBuf.getTail()];
buf = dbusBuf.getBuffer();
buf[0].position(0);
buf[0].get(b);
log.info("copy data to the destination buffer: 4");
log.info("Size is :" + b.length);
rChannel = Channels.newChannel(new ByteArrayInputStream(b));
// <=== Overflow happened at this point
dbusBuf2.readEvents(rChannel);
rChannel.close();
log.info("dbusBuf2 : Head:" + parser2.toString(dbusBuf2.getHead()) + ",Tail:" + parser2.toString(dbusBuf2.getTail()));
log.info("dbusBuf2 : Buffer :" + buf2[0]);
log.info("test if the readEvents can allow reader to proceed while it is blocked");
rChannel = Channels.newChannel(new ByteArrayInputStream(b));
log.info("start reader thread");
t.start();
log.info("copy data to the destination buffer: 5");
dbusBuf2.readEvents(rChannel);
rChannel.close();
log.info("data copied to the destination buffer: 5");
stopReader.set(true);
t.join(20000);
log.info("check if dbusBuf2 is empty");
Assert.assertTrue(!t.isAlive());
if (!dbusBuf2.empty()) {
log.error("dbusBuf2 not empty: " + dbusBuf2);
}
Assert.assertTrue(dbusBuf2.toString(), dbusBuf2.empty());
log.info("dbusBuf2 : Head:" + parser2.toString(dbusBuf2.getHead()) + ",Tail:" + parser2.toString(dbusBuf2.getTail()));
log.info("dbusBuf2 : Buffer :" + buf2[0]);
log.info("done");
}
use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class ReadEventsTestParams method testBigReadEventBuffer.
@Test
public /*
* ReadBuffer Size is bigger than the overall EVB size.
* A large read happens to EVB which is bigger than its size.
*/
void testBigReadEventBuffer() throws Exception {
final Logger log = Logger.getLogger("TestDbusEventBuffer.testBigReadEventBuffer");
// log.setLevel(Level.INFO);
log.info("starting");
final DbusEventBuffer dbusBuf = new DbusEventBuffer(getConfig(4000, 4000, 100, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.BLOCK_ON_WRITE, AssertLevel.ALL));
final DbusEventBuffer dbusBuf2 = new DbusEventBuffer(getConfig(1000, 1000, 100, 3000, AllocationPolicy.HEAP_MEMORY, QueuePolicy.BLOCK_ON_WRITE, AssertLevel.NONE));
// dbusBuf2.getLog().setLevel(Level.DEBUG);
BufferPositionParser parser = dbusBuf.getBufferPositionParser();
final BufferPositionParser parser2 = dbusBuf2.getBufferPositionParser();
DbusEventGenerator generator = new DbusEventGenerator();
Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(24, 24, 100, 10, events);
log.info("Num Events :" + events.size());
// Add events to the EventBuffer
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null);
appender.run();
final AtomicBoolean stopReader = new AtomicBoolean(false);
log.info("dbusBuf : Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
class EvbReader implements Runnable {
private int _count = 0;
public EvbReader() {
_count = 0;
}
public int getCount() {
return _count;
}
@Override
public void run() {
try {
Thread.sleep(5 * 1000);
} catch (InterruptedException ie) {
}
DbusEventBuffer.DbusEventIterator itr = dbusBuf2.acquireIterator("dummy");
log.info("Reader iterator:" + itr);
while (!stopReader.get() || itr.hasNext()) {
while (itr.hasNext()) {
itr.next();
itr.remove();
_count++;
}
itr.await(100, TimeUnit.MILLISECONDS);
}
log.info("Reader Thread: dbusBuf2 : Head:" + parser2.toString(dbusBuf2.getHead()) + ",Tail:" + parser2.toString(dbusBuf2.getTail()));
ByteBuffer[] buf = dbusBuf2.getBuffer();
log.info("Reader Thread : dbusBuf2 : Buffer :" + buf[0]);
log.info("Count is :" + _count);
log.info("Reader iterator:" + itr);
}
}
;
EvbReader reader = new EvbReader();
Thread t = new Thread(reader, "BigReadEventReader");
ByteBuffer[] buf = dbusBuf.getBuffer();
byte[] b = new byte[(int) dbusBuf.getTail()];
buf[0].position(0);
buf[0].get(b);
ReadableByteChannel rChannel = Channels.newChannel(new ByteArrayInputStream(b));
t.start();
dbusBuf2.readEvents(rChannel);
stopReader.set(true);
t.join(20000);
Assert.assertTrue(!t.isAlive());
DbusEventBuffer.DbusEventIterator itr2 = dbusBuf2.acquireIterator("dummy");
int count = 0;
while (itr2.hasNext()) {
itr2.next();
itr2.remove();
count++;
}
log.info("Total Count :" + (count + reader.getCount()));
log.info("Head :" + dbusBuf2.getHead() + ", Tail :" + dbusBuf2.getTail());
assertEquals("Total Count", 26, (count + reader.getCount()));
assertEquals("Head == Tail", dbusBuf2.getHead(), dbusBuf2.getTail());
assertEquals("Head Check:", 2890, dbusBuf2.getHead());
log.info("done");
}
use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class ReadEventsTestParams method runConstEventsReaderWriter.
// data flow: emitter/producer/appender -> DEB -> writer -> pipe -> reader -> DEB -> consumer
protected boolean runConstEventsReaderWriter(Vector<DbusEvent> srcTestEvents, Vector<DbusEvent> dstTestEvents, EventBufferTestInput input, DbusEventsStatisticsCollector emitterStats, DbusEventsStatisticsCollector streamStats, DbusEventsStatisticsCollector clientStats, boolean autoStartBuffer) throws Exception {
LOG.info("starting runConstEventsReaderWriter for " + input.getTestName());
int numEvents = input.getNumEvents();
int maxWindowSize = input.getWindowSize();
DbusEventGenerator evGen = new DbusEventGenerator();
if (evGen.generateEvents(numEvents, maxWindowSize, 512, input.getPayloadSize(), srcTestEvents) <= 0) {
return false;
}
int eventSize = srcTestEvents.get(0).size();
long producerBufferSize = input.getProducerBufferSize() * eventSize;
long sharedBufferSize = input.getSharedBufferSize() * eventSize;
int stagingBufferSize = input.getStagingBufferSize() * eventSize;
int individualBufferSize = input.getIndividualBufferSize() * eventSize;
int indexSize = input.getIndexSize() * eventSize;
QueuePolicy prodQueuePolicy = input.getProdQueuePolicy();
QueuePolicy consQueuePolicy = input.getConsQueuePolicy();
// create the main event buffers
DbusEventBuffer prodEventBuffer = new DbusEventBuffer(getConfig(producerBufferSize, individualBufferSize, indexSize, stagingBufferSize, AllocationPolicy.HEAP_MEMORY, prodQueuePolicy, input.getProdBufferAssertLevel()));
DbusEventBuffer consEventBuffer = new DbusEventBuffer(getConfig(sharedBufferSize, individualBufferSize, indexSize, stagingBufferSize, AllocationPolicy.HEAP_MEMORY, consQueuePolicy, input.getConsBufferAssertLevel()));
// Producer of events, a.k.a. "emitter"
DbusEventAppender eventProducer = new DbusEventAppender(srcTestEvents, prodEventBuffer, emitterStats, autoStartBuffer);
// commn channels between reader and writer
Pipe pipe = Pipe.open();
Pipe.SinkChannel writerStream = pipe.sink();
Pipe.SourceChannel readerStream = pipe.source();
writerStream.configureBlocking(true);
readerStream.configureBlocking(false);
// Event writer - Relay in the real world
int batchSize = input.getBatchSize() * eventSize;
DbusEventBufferWriter writer = new DbusEventBufferWriter(prodEventBuffer, writerStream, batchSize, streamStats);
// Event readers - Clients in the real world
DbusEventBufferConsumer consumer = new DbusEventBufferConsumer(consEventBuffer, numEvents, input.getDeleteInterval(), dstTestEvents);
Vector<EventBufferConsumer> consList = new Vector<EventBufferConsumer>();
consList.add(consumer);
// Event readers - Clients in the real world
DbusEventBufferReader reader = new DbusEventBufferReader(consEventBuffer, readerStream, consList, clientStats);
UncaughtExceptionTrackingThread tEmitter = new UncaughtExceptionTrackingThread(eventProducer, "EventProducer");
UncaughtExceptionTrackingThread tWriter = new UncaughtExceptionTrackingThread(writer, "Writer");
UncaughtExceptionTrackingThread tReader = new UncaughtExceptionTrackingThread(reader, "Reader");
UncaughtExceptionTrackingThread tConsumer = new UncaughtExceptionTrackingThread(consumer, "Consumer");
long emitterWaitms = 20000;
long writerWaitms = 10000;
long readerWaitms = 10000;
long consumerWaitms = readerWaitms;
// start emitter;
tEmitter.start();
// tarnish events written to buffer;
int[] corruptIndexList = input.getCorruptIndexList();
if (corruptIndexList.length > 0) {
tEmitter.join(emitterWaitms);
EventCorruptionType corruptionType = input.getCorruptionType();
eventProducer.tarnishEventsInBuffer(corruptIndexList, corruptionType);
}
// start consumer / reader /writer
tConsumer.start();
tWriter.start();
tReader.start();
// wait until all events have been written;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): waiting up to " + (emitterWaitms / 1000) + " sec for appender/producer/emitter thread to terminate");
tEmitter.join(emitterWaitms);
// try and set a finish for writer
long eventsEmitted = eventProducer.eventsEmitted();
writer.setExpectedEvents(eventsEmitted);
// wait for writer to finish;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): waiting up to " + (writerWaitms / 1000) + " sec for writer thread to terminate");
tWriter.join(writerWaitms);
// close the writer Stream;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): signalling writer to stop");
writer.stop();
if (!tReader.isAlive()) {
LOG.error("runConstEventsReaderWriter(): reader thread died unexpectedly");
}
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): waiting up to " + (consumerWaitms / 1000) + " sec for consumer thread to terminate");
tConsumer.join(consumerWaitms);
// stop the consumer thread; may or may not have got all events;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): signalling consumer to stop");
consumer.stop();
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): signalling reader to stop");
reader.stop();
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): all stop.");
assertEquals(null, consumer.getExceptionThrown());
LOG.info("runConstEventsReaderWriter() consumer thread: " + (consumer.hasInvalidEvent() ? "DID" : "did NOT") + " receive invalid event(s); num events emitted=" + eventsEmitted + ", events written=" + writer.eventsWritten() + ", events read=" + reader.eventsRead());
return true;
}
Aggregations