use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class TestGenericDispatcher method testPartialWindowRollback.
@Test(groups = { "small", "functional" })
public /**
* Tests the case where the dispatcher exits the main processing loop in {@link GenericDispatcher#doDispatchEvents()}
* with a partial window and the flushing of the outstanding callbacks fails. We want to make sure that a rollback
* is correctly triggered.
*
* The test simulates the following case: e1_1 e1_2 e1_3 <EOW> e2_1 e2_2 e2_3 <EOW> ... with a failure in the e2_2
* callback.
*
* 1) Read full first window: e1_1 e1_2 e1_3 <EOW>
* 2) Read partial second window: e2_1 e2_2
* 3) The above should fail -- verify that rollback is called
* 4) Read the rest
*/
void testPartialWindowRollback() throws Exception {
final Logger log = Logger.getLogger("TestGenericDispatcher.testPartialWindowRollback");
// log.setLevel(Level.INFO);
log.info("start");
final Level saveLevel = Logger.getLogger("com.linkedin.databus.client").getLevel();
// Logger.getLogger("com.linkedin.databus.client").setLevel(Level.DEBUG);
// generate events
Vector<Short> srcIdList = new Vector<Short>();
srcIdList.add((short) 1);
DbusEventGenerator evGen = new DbusEventGenerator(0, srcIdList);
Vector<DbusEvent> srcTestEvents = new Vector<DbusEvent>();
final int numEvents = 9;
// 1-based number of the event callback to fail
final int numOfFailureEvent = 5;
final int numEventsPerWindow = 3;
final int payloadSize = 200;
final int numWindows = (int) Math.ceil(1.0 * numEvents / numEventsPerWindow);
Assert.assertTrue(evGen.generateEvents(numEvents, numEventsPerWindow, 500, payloadSize, srcTestEvents) > 0);
// find out how much data we need to stream for the failure
// account for the EOW event which is < payload size
int win1Size = payloadSize - 1;
int win2Size = 0;
int eventN = 0;
for (DbusEvent e : srcTestEvents) {
eventN++;
if (eventN <= numEventsPerWindow) {
win1Size += e.size();
} else if (eventN <= numOfFailureEvent) {
win2Size += e.size();
}
}
// serialize the events to a buffer so they can be sent to the client
final TestGenericDispatcherEventBuffer srcEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
DbusEventAppender eventProducer = new DbusEventAppender(srcTestEvents, srcEventsBuf, null, true);
Thread tEmitter = new Thread(eventProducer);
tEmitter.start();
// Create destination (client) buffer
final TestGenericDispatcherEventBuffer destEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
// Create dispatcher
final TimeoutTestConsumer mockConsumer = new TimeoutTestConsumer(100, 10, 0, numOfFailureEvent, 0, 1);
SelectingDatabusCombinedConsumer sdccMockConsumer = new SelectingDatabusCombinedConsumer((DatabusStreamConsumer) mockConsumer);
List<String> sources = new ArrayList<String>();
Map<Long, IdNamePair> sourcesMap = new HashMap<Long, IdNamePair>();
for (int i = 1; i <= 3; ++i) {
IdNamePair sourcePair = new IdNamePair((long) i, "source" + i);
sources.add(sourcePair.getName());
sourcesMap.put(sourcePair.getId(), sourcePair);
}
DatabusV2ConsumerRegistration consumerReg = new DatabusV2ConsumerRegistration(sdccMockConsumer, sources, null);
List<DatabusV2ConsumerRegistration> allRegistrations = Arrays.asList(consumerReg);
final ConsumerCallbackStats callbackStats = new ConsumerCallbackStats(0, "test", "test", true, false, null);
final UnifiedClientStats unifiedStats = new UnifiedClientStats(0, "test", "test.unified");
MultiConsumerCallback callback = new MultiConsumerCallback(allRegistrations, Executors.newFixedThreadPool(2), 1000, new StreamConsumerCallbackFactory(callbackStats, unifiedStats), callbackStats, unifiedStats, null, null);
callback.setSourceMap(sourcesMap);
List<DatabusSubscription> subs = DatabusSubscription.createSubscriptionList(sources);
final RelayDispatcher dispatcher = new RelayDispatcher("dispatcher", _genericRelayConnStaticConfig, subs, new InMemoryPersistenceProvider(), destEventsBuf, callback, null, null, null, null, null);
dispatcher.setSchemaIdCheck(false);
Thread dispatcherThread = new Thread(dispatcher);
dispatcherThread.setDaemon(true);
log.info("starting dispatcher thread");
dispatcherThread.start();
HashMap<Long, List<RegisterResponseEntry>> schemaMap = new HashMap<Long, List<RegisterResponseEntry>>();
List<RegisterResponseEntry> l1 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l2 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l3 = new ArrayList<RegisterResponseEntry>();
l1.add(new RegisterResponseEntry(1L, (short) 1, SOURCE1_SCHEMA_STR));
l2.add(new RegisterResponseEntry(2L, (short) 1, SOURCE2_SCHEMA_STR));
l3.add(new RegisterResponseEntry(3L, (short) 1, SOURCE3_SCHEMA_STR));
schemaMap.put(1L, l1);
schemaMap.put(2L, l2);
schemaMap.put(3L, l3);
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesIdsMessage(sourcesMap.values()));
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesSchemasMessage(schemaMap));
log.info("starting event dispatch");
// stream the events from the source buffer without the EOW
// comm channels between reader and writer
Pipe pipe = Pipe.open();
Pipe.SinkChannel writerStream = pipe.sink();
Pipe.SourceChannel readerStream = pipe.source();
writerStream.configureBlocking(true);
readerStream.configureBlocking(false);
// Event writer - Relay in the real world
Checkpoint cp = Checkpoint.createFlexibleCheckpoint();
// Event readers - Clients in the real world
// Checkpoint pullerCheckpoint = Checkpoint.createFlexibleCheckpoint();
DbusEventsStatisticsCollector clientStats = new DbusEventsStatisticsCollector(0, "client", true, false, null);
DbusEventBufferReader reader = new DbusEventBufferReader(destEventsBuf, readerStream, null, clientStats);
UncaughtExceptionTrackingThread tReader = new UncaughtExceptionTrackingThread(reader, "Reader");
tReader.setDaemon(true);
tReader.start();
try {
log.info("send first window -- that one should be OK");
StreamEventsResult streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(win1Size));
Assert.assertEquals(numEventsPerWindow + 1, streamRes.getNumEventsStreamed());
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
return 1 == callbackStats.getNumSysEventsProcessed();
}
}, "first window processed", 5000, log);
log.info("send the second partial window -- that one should cause an error");
streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(win2Size));
Assert.assertEquals(numOfFailureEvent - numEventsPerWindow, streamRes.getNumEventsStreamed());
log.info("wait for dispatcher to finish");
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
log.info("events received: " + callbackStats.getNumDataEventsReceived());
return numOfFailureEvent <= callbackStats.getNumDataEventsProcessed();
}
}, "all events until the error processed", 5000, log);
log.info("all data events have been received but no EOW");
Assert.assertEquals(numOfFailureEvent, clientStats.getTotalStats().getNumDataEvents());
Assert.assertEquals(1, clientStats.getTotalStats().getNumSysEvents());
// at least one failing event therefore < numOfFailureEvent events can be processed
Assert.assertTrue(numOfFailureEvent <= callbackStats.getNumDataEventsProcessed());
// onDataEvent callbacks for e2_1 and e2_2 get cancelled
Assert.assertEquals(2, callbackStats.getNumDataErrorsProcessed());
// only one EOW
Assert.assertEquals(1, callbackStats.getNumSysEventsProcessed());
log.info("Send the remainder of the window");
streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(100000));
// remaining events + EOWs
Assert.assertEquals(srcTestEvents.size() + numWindows - (numOfFailureEvent + 1), streamRes.getNumEventsStreamed());
log.info("wait for the rollback");
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
return 1 == mockConsumer.getNumRollbacks();
}
}, "rollback seen", 5000, log);
log.info("wait for dispatcher to finish after the rollback");
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
log.info("num windows processed: " + callbackStats.getNumSysEventsProcessed());
return numWindows == callbackStats.getNumSysEventsProcessed();
}
}, "all events processed", 5000, log);
} finally {
reader.stop();
dispatcher.shutdown();
log.info("all events processed");
verifyNoLocks(null, srcEventsBuf);
verifyNoLocks(null, destEventsBuf);
}
Logger.getLogger("com.linkedin.databus.client").setLevel(saveLevel);
log.info("end\n");
}
use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class ReadEventsTestParams method testInternalIteratorSingleBufFull.
/**
* Verify that Iterator's CP never gets ahead of Iterator's tail, even at
* the end of the buffer (client, NO-OVERWITE policy).
*/
@Test
public void testInternalIteratorSingleBufFull() throws Exception {
final Logger log = Logger.getLogger("TestDbusEventBufferIterator.testInternalIteratorSingleBufFull");
// log.setLevel(Level.DEBUG);
log.info("starting");
final DbusEventBuffer dbusBuf = new DbusEventBuffer(TestDbusEventBuffer.getConfig(845, 100000, 256, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.BLOCK_ON_WRITE, AssertLevel.NONE));
dbusBuf.start(0);
log.info("append a full buffer");
DbusEventGenerator generator = new DbusEventGenerator(10);
final Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(6, 2, 120, 38, events);
log.debug(dbusBuf.toShortString());
dbusBuf.assertBuffersLimits();
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null, 1.0, false, -1);
appender.run();
log.info("verify new iterator");
DbusEventIterator iter1 = dbusBuf.acquireIterator("testInternalIteratorSingleBufFull");
log.debug("it1=" + iter1);
Assert.assertEquals(iter1.getCurrentPosition(), dbusBuf.getHead());
Assert.assertEquals(iter1._iteratorTail.getPosition(), dbusBuf.getTail());
Assert.assertEquals(dbusBuf._busyIteratorPool.size(), 1);
Assert.assertTrue(iter1.hasNext());
DbusEvent e = iter1.next();
Assert.assertTrue(e.isEndOfPeriodMarker());
Assert.assertTrue(iter1.hasNext());
dbusBuf.assertBuffersLimits();
log.info("make sure we can read some events");
readAndCompareIteratorEvents(iter1, events, 0, 6, true, true, true);
log.debug("after read: " + dbusBuf.toShortString());
log.debug(iter1);
log.info("append more windows");
final Vector<DbusEvent> events2 = new Vector<DbusEvent>();
generator = new DbusEventGenerator(200);
generator.generateEvents(2, 1, 120, 39, events2);
appender = new DbusEventAppender(events2, dbusBuf, null, 1.0, false, -1);
appender.run();
log.debug("after 2 more events added: " + dbusBuf.toShortString());
log.debug(iter1);
readAndCompareIteratorEvents(iter1, events2, 0, 2, true, false, true);
log.debug("after 2 more events read: " + dbusBuf.toShortString());
log.debug(iter1);
dbusBuf.assertBuffersLimits();
// create another iterator - make sure it can read too
DbusEventIterator iter2 = dbusBuf.acquireIterator("testInternalIteratorSingleBufFull2");
long iCWP = iter2.getCurrentPosition();
long head = dbusBuf.getBufferPositionParser().sanitize(dbusBuf.getHead(), dbusBuf.getBuffer());
Assert.assertEquals(iCWP, head);
Assert.assertEquals(iter2._iteratorTail.getPosition(), dbusBuf.getTail());
Assert.assertEquals(dbusBuf._busyIteratorPool.size(), 2);
Assert.assertTrue(iter2.hasNext());
log.debug("iter2=" + iter2);
// read same events and don't remove
readAndCompareIteratorEvents(iter2, events2, 0, 2, true, false, true);
dbusBuf.releaseIterator(iter2);
dbusBuf.assertBuffersLimits();
log.debug("iter1=" + iter1);
iter1.remove();
log.debug("buf (after read)=" + dbusBuf);
generator = new DbusEventGenerator(300);
final Vector<DbusEvent> events3 = new Vector<DbusEvent>();
generator.generateEvents(4, 2, 120, 39, events3);
appender = new DbusEventAppender(events3, dbusBuf, null, 1.0, false, -1);
appender.run();
dbusBuf.assertBuffersLimits();
log.info("make sure we can read remainder of events");
readAndCompareIteratorEvents(iter1, events3, 0, 4, false, true, true);
dbusBuf.assertBuffersLimits();
Assert.assertTrue(dbusBuf.empty());
dbusBuf.releaseIterator(iter1);
log.info("done");
}
use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class ReadEventsTestParams method testMaxEventSize.
@Test
public void testMaxEventSize() throws Exception {
DbusEventBuffer dbusBuf = new DbusEventBuffer(getConfig(1144, 500, 100, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.OVERWRITE_ON_WRITE, AssertLevel.ALL));
DbusEventGenerator generator = new DbusEventGenerator();
// Generate one event that equals the size of the first buffer. We won't be able to
// append that to the buffer.
Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(1, 2, 500, 439, events);
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null, false);
int eventCount = 0;
boolean exceptionCaught = false;
dbusBuf.startEvents();
try {
eventCount = appender.addEventToBuffer(events.get(0), eventCount);
} catch (DatabusRuntimeException e) {
exceptionCaught = true;
}
Assert.assertTrue(exceptionCaught);
Assert.assertEquals(0, eventCount);
DbusEventBufferReflector reflector = appender.getDbusEventReflector();
Assert.assertEquals(0, reflector.getBuffer(0).limit());
Assert.assertEquals(0, reflector.getBuffer(1).limit());
Assert.assertEquals(0, reflector.getBuffer(2).limit());
events.clear();
dbusBuf = new DbusEventBuffer(getConfig(1144, 500, 100, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.OVERWRITE_ON_WRITE, AssertLevel.ALL));
appender = new DbusEventAppender(events, dbusBuf, null, false);
// total event size = 499
generator.generateEvents(1, 20, 500, 438, events);
// total event size = 71
generator.generateEvents(1, 20, 500, 10, events);
// total event size = 428
generator.generateEvents(1, 20, 500, 367, events);
// event + EOP = 132.
generator.generateEvents(1, 1, 500, 10, events);
// We should be able to append the first three events above, filling the two byte buffers completely.
// And then the last event along with EOP marker in the last byte buffer, filling that to complete
// one window.
appender.run();
// Now try to add one event. Because ScnIndex still has head as -1 for this buffer,
// we end up getting a DatabusRuntimeException and not being able to add an event.
// Since this is a rare case, it is TBD whether we need to support this case or not.
events.clear();
// event size = 71, can append.
generator.generateEvents(1, 20, 500, 10, events);
exceptionCaught = false;
try {
appender.run();
} catch (DatabusRuntimeException e) {
exceptionCaught = true;
}
Assert.assertTrue(exceptionCaught);
}
use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class ReadEventsTestParams method testCompleteRollback.
// Add a bunch of events to the buffer without EOP, and then issue a rollback.
// Verify that the buffer is now empty, and we can add the same events with EOP again.
@Test
public void testCompleteRollback() throws Exception {
DbusEventBuffer dbusBuf = new DbusEventBuffer(getConfig(1144, 500, 100, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.OVERWRITE_ON_WRITE, AssertLevel.ALL));
DbusEventGenerator generator = new DbusEventGenerator();
Vector<DbusEvent> events = new Vector<DbusEvent>();
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null, false);
events.clear();
// Same test case as before, with the first 3 byte buffers having data now.
// total event size = 499
generator.generateEvents(1, 20, 500, 438, events);
// total event size = 71
generator.generateEvents(1, 20, 500, 10, events);
// total event size = 428
generator.generateEvents(1, 20, 500, 367, events);
// event = 71.
generator.generateEvents(1, 20, 500, 10, events);
dbusBuf.startEvents();
int evCount = 0;
for (DbusEvent e : events) {
evCount = appender.addEventToBuffer(e, evCount);
}
Assert.assertEquals(events.size(), evCount);
DbusEventBufferReflector reflector = appender.getDbusEventReflector();
dbusBuf.rollbackEvents();
Assert.assertEquals(0, reflector.getCurrentWritePosition().getPosition());
// Now we should be able to add these events, plus an EOP marker at the end.
appender.run();
long cwp1 = reflector.getCurrentWritePosition().getPosition();
Assert.assertEquals(1156, cwp1);
long tail = reflector.getTail().getPosition();
Assert.assertEquals(1156, tail);
Assert.assertEquals(0, reflector.getHead().getPosition());
}
use of com.linkedin.databus.core.test.DbusEventAppender in project databus by linkedin.
the class ReadEventsTestParams method testReadEventOverlap.
@Test
public /*
* This testcase is to recreate the bug where pull thread incorrectly writes to the head of
* the iterator when in BLOCK_ON_WRITE mode. The error was because readEvents() incorrectly
* relies on remaining() to give an accurate value.
*/
void testReadEventOverlap() throws Exception {
/*
* Recreate the head and tail position such that the eventBuffer is in the below state
* --------------------------------------------------------------
* ^ ^ ^ ^
* | | | |
* 0 head tail capacity
*
* The space between tail and capacity is such that with no internal fragmentation, the free space
* will be sufficient to store 2 events but with internal fragmentation, the free space will not be
* enough. In this case, the readEvents should block until an event is removed by the other thread.
*/
final DbusEventBuffer dbusBuf = new DbusEventBuffer(getConfig(1000, 1000, 100, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.BLOCK_ON_WRITE, AssertLevel.NONE));
BufferPositionParser parser = dbusBuf.getBufferPositionParser();
DbusEventGenerator generator = new DbusEventGenerator();
Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(11, 11, 100, 10, events);
// Add events to the EventBuffer
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null);
// running in the same thread
appender.run();
LOG.info("Head:" + dbusBuf.getHead() + ",Tail:" + dbusBuf.getTail());
assertEquals("Head Check", 0, dbusBuf.getHead());
assertEquals("Tail Check", 903, dbusBuf.getTail());
// Remove the first event
DbusEventIterator itr = dbusBuf.acquireIterator("dummy");
assertTrue(itr.hasNext());
DbusEvent event = itr.next();
assertTrue(event.isValid());
itr.remove();
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
assertEquals("Head Check", 61, dbusBuf.getHead());
assertEquals("Tail Check", 903, dbusBuf.getTail());
for (DbusEvent e : events) {
assertTrue("invalid event", e.isValid());
}
// set up the ReadChannel with 2 events
ByteArrayOutputStream oStream = new ByteArrayOutputStream();
WritableByteChannel oChannel = Channels.newChannel(oStream);
for (int i = 0; i < 2; ++i) {
((DbusEventInternalReadable) events.get(i)).writeTo(oChannel, Encoding.BINARY);
}
byte[] writeBytes = oStream.toByteArray();
ByteArrayInputStream iStream = new ByteArrayInputStream(writeBytes);
final ReadableByteChannel rChannel = Channels.newChannel(iStream);
// Create a Thread to call readEvents on the channel
Runnable writer = new Runnable() {
@Override
public void run() {
try {
dbusBuf.readEvents(rChannel);
} catch (InvalidEventException ie) {
ie.printStackTrace();
throw new RuntimeException(ie);
}
}
};
Thread writerThread = new Thread(writer);
writerThread.start();
// Check if the thread is alive (blocked) and head/tail is not overlapped
trySleep(1000);
assertTrue(writerThread.isAlive());
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
assertEquals("Head Check", 61, dbusBuf.getHead());
// GenId set here but tail is not yet overlapped
assertEquals("Tail Check", 2048, dbusBuf.getTail());
// Read the next event to unblock the writer
event = itr.next();
assertTrue(event.isValid());
itr.remove();
try {
writerThread.join(1000);
} catch (InterruptedException ie) {
ie.printStackTrace();
}
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
assertFalse(writerThread.isAlive());
assertEquals("Head Check", 132, dbusBuf.getHead());
assertEquals("Tail Check", 2119, dbusBuf.getTail());
while (itr.hasNext()) {
assertTrue(itr.next().isValid(true));
itr.remove();
}
LOG.info("Head:" + parser.toString(dbusBuf.getHead()) + ",Tail:" + parser.toString(dbusBuf.getTail()));
assertEquals("Head Check", dbusBuf.getHead(), dbusBuf.getTail());
}
Aggregations