use of com.linkedin.databus.core.test.DbusEventBufferConsumer in project databus by linkedin.
the class ReadEventsTestParams method runConstEventsReaderWriter.
// data flow: emitter/producer/appender -> DEB -> writer -> pipe -> reader -> DEB -> consumer
protected boolean runConstEventsReaderWriter(Vector<DbusEvent> srcTestEvents, Vector<DbusEvent> dstTestEvents, EventBufferTestInput input, DbusEventsStatisticsCollector emitterStats, DbusEventsStatisticsCollector streamStats, DbusEventsStatisticsCollector clientStats, boolean autoStartBuffer) throws Exception {
LOG.info("starting runConstEventsReaderWriter for " + input.getTestName());
int numEvents = input.getNumEvents();
int maxWindowSize = input.getWindowSize();
DbusEventGenerator evGen = new DbusEventGenerator();
if (evGen.generateEvents(numEvents, maxWindowSize, 512, input.getPayloadSize(), srcTestEvents) <= 0) {
return false;
}
int eventSize = srcTestEvents.get(0).size();
long producerBufferSize = input.getProducerBufferSize() * eventSize;
long sharedBufferSize = input.getSharedBufferSize() * eventSize;
int stagingBufferSize = input.getStagingBufferSize() * eventSize;
int individualBufferSize = input.getIndividualBufferSize() * eventSize;
int indexSize = input.getIndexSize() * eventSize;
QueuePolicy prodQueuePolicy = input.getProdQueuePolicy();
QueuePolicy consQueuePolicy = input.getConsQueuePolicy();
// create the main event buffers
DbusEventBuffer prodEventBuffer = new DbusEventBuffer(getConfig(producerBufferSize, individualBufferSize, indexSize, stagingBufferSize, AllocationPolicy.HEAP_MEMORY, prodQueuePolicy, input.getProdBufferAssertLevel()));
DbusEventBuffer consEventBuffer = new DbusEventBuffer(getConfig(sharedBufferSize, individualBufferSize, indexSize, stagingBufferSize, AllocationPolicy.HEAP_MEMORY, consQueuePolicy, input.getConsBufferAssertLevel()));
// Producer of events, a.k.a. "emitter"
DbusEventAppender eventProducer = new DbusEventAppender(srcTestEvents, prodEventBuffer, emitterStats, autoStartBuffer);
// commn channels between reader and writer
Pipe pipe = Pipe.open();
Pipe.SinkChannel writerStream = pipe.sink();
Pipe.SourceChannel readerStream = pipe.source();
writerStream.configureBlocking(true);
readerStream.configureBlocking(false);
// Event writer - Relay in the real world
int batchSize = input.getBatchSize() * eventSize;
DbusEventBufferWriter writer = new DbusEventBufferWriter(prodEventBuffer, writerStream, batchSize, streamStats);
// Event readers - Clients in the real world
DbusEventBufferConsumer consumer = new DbusEventBufferConsumer(consEventBuffer, numEvents, input.getDeleteInterval(), dstTestEvents);
Vector<EventBufferConsumer> consList = new Vector<EventBufferConsumer>();
consList.add(consumer);
// Event readers - Clients in the real world
DbusEventBufferReader reader = new DbusEventBufferReader(consEventBuffer, readerStream, consList, clientStats);
UncaughtExceptionTrackingThread tEmitter = new UncaughtExceptionTrackingThread(eventProducer, "EventProducer");
UncaughtExceptionTrackingThread tWriter = new UncaughtExceptionTrackingThread(writer, "Writer");
UncaughtExceptionTrackingThread tReader = new UncaughtExceptionTrackingThread(reader, "Reader");
UncaughtExceptionTrackingThread tConsumer = new UncaughtExceptionTrackingThread(consumer, "Consumer");
long emitterWaitms = 20000;
long writerWaitms = 10000;
long readerWaitms = 10000;
long consumerWaitms = readerWaitms;
// start emitter;
tEmitter.start();
// tarnish events written to buffer;
int[] corruptIndexList = input.getCorruptIndexList();
if (corruptIndexList.length > 0) {
tEmitter.join(emitterWaitms);
EventCorruptionType corruptionType = input.getCorruptionType();
eventProducer.tarnishEventsInBuffer(corruptIndexList, corruptionType);
}
// start consumer / reader /writer
tConsumer.start();
tWriter.start();
tReader.start();
// wait until all events have been written;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): waiting up to " + (emitterWaitms / 1000) + " sec for appender/producer/emitter thread to terminate");
tEmitter.join(emitterWaitms);
// try and set a finish for writer
long eventsEmitted = eventProducer.eventsEmitted();
writer.setExpectedEvents(eventsEmitted);
// wait for writer to finish;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): waiting up to " + (writerWaitms / 1000) + " sec for writer thread to terminate");
tWriter.join(writerWaitms);
// close the writer Stream;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): signalling writer to stop");
writer.stop();
if (!tReader.isAlive()) {
LOG.error("runConstEventsReaderWriter(): reader thread died unexpectedly");
}
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): waiting up to " + (consumerWaitms / 1000) + " sec for consumer thread to terminate");
tConsumer.join(consumerWaitms);
// stop the consumer thread; may or may not have got all events;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): signalling consumer to stop");
consumer.stop();
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): signalling reader to stop");
reader.stop();
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): all stop.");
assertEquals(null, consumer.getExceptionThrown());
LOG.info("runConstEventsReaderWriter() consumer thread: " + (consumer.hasInvalidEvent() ? "DID" : "did NOT") + " receive invalid event(s); num events emitted=" + eventsEmitted + ", events written=" + writer.eventsWritten() + ", events read=" + reader.eventsRead());
return true;
}
use of com.linkedin.databus.core.test.DbusEventBufferConsumer in project databus by linkedin.
the class ReadEventsTestParams method validateDestData.
private void validateDestData() {
_log.info("Verifying the events");
Vector<DbusEvent> destEvents1 = new Vector<DbusEvent>(_srcEvents.size());
DbusEventBufferConsumer consumer = new DbusEventBufferConsumer(_destBuf, _srcEvents.size(), 0, destEvents1);
final long timeout = _debuggingMode ? 100000000 : 1000;
boolean consumerDone = consumer.runWithTimeout(timeout);
Assert.assertTrue(consumerDone);
TestDbusEventBuffer.checkEvents(_srcEvents, destEvents1, _srcEvents.size());
_log.info("Verified events: " + _srcEvents.size());
}
Aggregations