use of com.linkedin.databus.core.util.UncaughtExceptionTrackingThread in project databus by linkedin.
the class TestGenericDispatcher method testPartialWindowRollback.
@Test(groups = { "small", "functional" })
public /**
* Tests the case where the dispatcher exits the main processing loop in {@link GenericDispatcher#doDispatchEvents()}
* with a partial window and the flushing of the outstanding callbacks fails. We want to make sure that a rollback
* is correctly triggered.
*
* The test simulates the following case: e1_1 e1_2 e1_3 <EOW> e2_1 e2_2 e2_3 <EOW> ... with a failure in the e2_2
* callback.
*
* 1) Read full first window: e1_1 e1_2 e1_3 <EOW>
* 2) Read partial second window: e2_1 e2_2
* 3) The above should fail -- verify that rollback is called
* 4) Read the rest
*/
void testPartialWindowRollback() throws Exception {
final Logger log = Logger.getLogger("TestGenericDispatcher.testPartialWindowRollback");
// log.setLevel(Level.INFO);
log.info("start");
final Level saveLevel = Logger.getLogger("com.linkedin.databus.client").getLevel();
// Logger.getLogger("com.linkedin.databus.client").setLevel(Level.DEBUG);
// generate events
Vector<Short> srcIdList = new Vector<Short>();
srcIdList.add((short) 1);
DbusEventGenerator evGen = new DbusEventGenerator(0, srcIdList);
Vector<DbusEvent> srcTestEvents = new Vector<DbusEvent>();
final int numEvents = 9;
// 1-based number of the event callback to fail
final int numOfFailureEvent = 5;
final int numEventsPerWindow = 3;
final int payloadSize = 200;
final int numWindows = (int) Math.ceil(1.0 * numEvents / numEventsPerWindow);
Assert.assertTrue(evGen.generateEvents(numEvents, numEventsPerWindow, 500, payloadSize, srcTestEvents) > 0);
// find out how much data we need to stream for the failure
// account for the EOW event which is < payload size
int win1Size = payloadSize - 1;
int win2Size = 0;
int eventN = 0;
for (DbusEvent e : srcTestEvents) {
eventN++;
if (eventN <= numEventsPerWindow) {
win1Size += e.size();
} else if (eventN <= numOfFailureEvent) {
win2Size += e.size();
}
}
// serialize the events to a buffer so they can be sent to the client
final TestGenericDispatcherEventBuffer srcEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
DbusEventAppender eventProducer = new DbusEventAppender(srcTestEvents, srcEventsBuf, null, true);
Thread tEmitter = new Thread(eventProducer);
tEmitter.start();
// Create destination (client) buffer
final TestGenericDispatcherEventBuffer destEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
// Create dispatcher
final TimeoutTestConsumer mockConsumer = new TimeoutTestConsumer(100, 10, 0, numOfFailureEvent, 0, 1);
SelectingDatabusCombinedConsumer sdccMockConsumer = new SelectingDatabusCombinedConsumer((DatabusStreamConsumer) mockConsumer);
List<String> sources = new ArrayList<String>();
Map<Long, IdNamePair> sourcesMap = new HashMap<Long, IdNamePair>();
for (int i = 1; i <= 3; ++i) {
IdNamePair sourcePair = new IdNamePair((long) i, "source" + i);
sources.add(sourcePair.getName());
sourcesMap.put(sourcePair.getId(), sourcePair);
}
DatabusV2ConsumerRegistration consumerReg = new DatabusV2ConsumerRegistration(sdccMockConsumer, sources, null);
List<DatabusV2ConsumerRegistration> allRegistrations = Arrays.asList(consumerReg);
final ConsumerCallbackStats callbackStats = new ConsumerCallbackStats(0, "test", "test", true, false, null);
final UnifiedClientStats unifiedStats = new UnifiedClientStats(0, "test", "test.unified");
MultiConsumerCallback callback = new MultiConsumerCallback(allRegistrations, Executors.newFixedThreadPool(2), 1000, new StreamConsumerCallbackFactory(callbackStats, unifiedStats), callbackStats, unifiedStats, null, null);
callback.setSourceMap(sourcesMap);
List<DatabusSubscription> subs = DatabusSubscription.createSubscriptionList(sources);
final RelayDispatcher dispatcher = new RelayDispatcher("dispatcher", _genericRelayConnStaticConfig, subs, new InMemoryPersistenceProvider(), destEventsBuf, callback, null, null, null, null, null);
dispatcher.setSchemaIdCheck(false);
Thread dispatcherThread = new Thread(dispatcher);
dispatcherThread.setDaemon(true);
log.info("starting dispatcher thread");
dispatcherThread.start();
HashMap<Long, List<RegisterResponseEntry>> schemaMap = new HashMap<Long, List<RegisterResponseEntry>>();
List<RegisterResponseEntry> l1 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l2 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l3 = new ArrayList<RegisterResponseEntry>();
l1.add(new RegisterResponseEntry(1L, (short) 1, SOURCE1_SCHEMA_STR));
l2.add(new RegisterResponseEntry(2L, (short) 1, SOURCE2_SCHEMA_STR));
l3.add(new RegisterResponseEntry(3L, (short) 1, SOURCE3_SCHEMA_STR));
schemaMap.put(1L, l1);
schemaMap.put(2L, l2);
schemaMap.put(3L, l3);
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesIdsMessage(sourcesMap.values()));
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesSchemasMessage(schemaMap));
log.info("starting event dispatch");
// stream the events from the source buffer without the EOW
// comm channels between reader and writer
Pipe pipe = Pipe.open();
Pipe.SinkChannel writerStream = pipe.sink();
Pipe.SourceChannel readerStream = pipe.source();
writerStream.configureBlocking(true);
readerStream.configureBlocking(false);
// Event writer - Relay in the real world
Checkpoint cp = Checkpoint.createFlexibleCheckpoint();
// Event readers - Clients in the real world
// Checkpoint pullerCheckpoint = Checkpoint.createFlexibleCheckpoint();
DbusEventsStatisticsCollector clientStats = new DbusEventsStatisticsCollector(0, "client", true, false, null);
DbusEventBufferReader reader = new DbusEventBufferReader(destEventsBuf, readerStream, null, clientStats);
UncaughtExceptionTrackingThread tReader = new UncaughtExceptionTrackingThread(reader, "Reader");
tReader.setDaemon(true);
tReader.start();
try {
log.info("send first window -- that one should be OK");
StreamEventsResult streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(win1Size));
Assert.assertEquals(numEventsPerWindow + 1, streamRes.getNumEventsStreamed());
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
return 1 == callbackStats.getNumSysEventsProcessed();
}
}, "first window processed", 5000, log);
log.info("send the second partial window -- that one should cause an error");
streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(win2Size));
Assert.assertEquals(numOfFailureEvent - numEventsPerWindow, streamRes.getNumEventsStreamed());
log.info("wait for dispatcher to finish");
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
log.info("events received: " + callbackStats.getNumDataEventsReceived());
return numOfFailureEvent <= callbackStats.getNumDataEventsProcessed();
}
}, "all events until the error processed", 5000, log);
log.info("all data events have been received but no EOW");
Assert.assertEquals(numOfFailureEvent, clientStats.getTotalStats().getNumDataEvents());
Assert.assertEquals(1, clientStats.getTotalStats().getNumSysEvents());
// at least one failing event therefore < numOfFailureEvent events can be processed
Assert.assertTrue(numOfFailureEvent <= callbackStats.getNumDataEventsProcessed());
// onDataEvent callbacks for e2_1 and e2_2 get cancelled
Assert.assertEquals(2, callbackStats.getNumDataErrorsProcessed());
// only one EOW
Assert.assertEquals(1, callbackStats.getNumSysEventsProcessed());
log.info("Send the remainder of the window");
streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(100000));
// remaining events + EOWs
Assert.assertEquals(srcTestEvents.size() + numWindows - (numOfFailureEvent + 1), streamRes.getNumEventsStreamed());
log.info("wait for the rollback");
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
return 1 == mockConsumer.getNumRollbacks();
}
}, "rollback seen", 5000, log);
log.info("wait for dispatcher to finish after the rollback");
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
log.info("num windows processed: " + callbackStats.getNumSysEventsProcessed());
return numWindows == callbackStats.getNumSysEventsProcessed();
}
}, "all events processed", 5000, log);
} finally {
reader.stop();
dispatcher.shutdown();
log.info("all events processed");
verifyNoLocks(null, srcEventsBuf);
verifyNoLocks(null, destEventsBuf);
}
Logger.getLogger("com.linkedin.databus.client").setLevel(saveLevel);
log.info("end\n");
}
use of com.linkedin.databus.core.util.UncaughtExceptionTrackingThread in project databus by linkedin.
the class ReadEventsTestParams method runConstEventsReaderWriter.
// data flow: emitter/producer/appender -> DEB -> writer -> pipe -> reader -> DEB -> consumer
protected boolean runConstEventsReaderWriter(Vector<DbusEvent> srcTestEvents, Vector<DbusEvent> dstTestEvents, EventBufferTestInput input, DbusEventsStatisticsCollector emitterStats, DbusEventsStatisticsCollector streamStats, DbusEventsStatisticsCollector clientStats, boolean autoStartBuffer) throws Exception {
LOG.info("starting runConstEventsReaderWriter for " + input.getTestName());
int numEvents = input.getNumEvents();
int maxWindowSize = input.getWindowSize();
DbusEventGenerator evGen = new DbusEventGenerator();
if (evGen.generateEvents(numEvents, maxWindowSize, 512, input.getPayloadSize(), srcTestEvents) <= 0) {
return false;
}
int eventSize = srcTestEvents.get(0).size();
long producerBufferSize = input.getProducerBufferSize() * eventSize;
long sharedBufferSize = input.getSharedBufferSize() * eventSize;
int stagingBufferSize = input.getStagingBufferSize() * eventSize;
int individualBufferSize = input.getIndividualBufferSize() * eventSize;
int indexSize = input.getIndexSize() * eventSize;
QueuePolicy prodQueuePolicy = input.getProdQueuePolicy();
QueuePolicy consQueuePolicy = input.getConsQueuePolicy();
// create the main event buffers
DbusEventBuffer prodEventBuffer = new DbusEventBuffer(getConfig(producerBufferSize, individualBufferSize, indexSize, stagingBufferSize, AllocationPolicy.HEAP_MEMORY, prodQueuePolicy, input.getProdBufferAssertLevel()));
DbusEventBuffer consEventBuffer = new DbusEventBuffer(getConfig(sharedBufferSize, individualBufferSize, indexSize, stagingBufferSize, AllocationPolicy.HEAP_MEMORY, consQueuePolicy, input.getConsBufferAssertLevel()));
// Producer of events, a.k.a. "emitter"
DbusEventAppender eventProducer = new DbusEventAppender(srcTestEvents, prodEventBuffer, emitterStats, autoStartBuffer);
// commn channels between reader and writer
Pipe pipe = Pipe.open();
Pipe.SinkChannel writerStream = pipe.sink();
Pipe.SourceChannel readerStream = pipe.source();
writerStream.configureBlocking(true);
readerStream.configureBlocking(false);
// Event writer - Relay in the real world
int batchSize = input.getBatchSize() * eventSize;
DbusEventBufferWriter writer = new DbusEventBufferWriter(prodEventBuffer, writerStream, batchSize, streamStats);
// Event readers - Clients in the real world
DbusEventBufferConsumer consumer = new DbusEventBufferConsumer(consEventBuffer, numEvents, input.getDeleteInterval(), dstTestEvents);
Vector<EventBufferConsumer> consList = new Vector<EventBufferConsumer>();
consList.add(consumer);
// Event readers - Clients in the real world
DbusEventBufferReader reader = new DbusEventBufferReader(consEventBuffer, readerStream, consList, clientStats);
UncaughtExceptionTrackingThread tEmitter = new UncaughtExceptionTrackingThread(eventProducer, "EventProducer");
UncaughtExceptionTrackingThread tWriter = new UncaughtExceptionTrackingThread(writer, "Writer");
UncaughtExceptionTrackingThread tReader = new UncaughtExceptionTrackingThread(reader, "Reader");
UncaughtExceptionTrackingThread tConsumer = new UncaughtExceptionTrackingThread(consumer, "Consumer");
long emitterWaitms = 20000;
long writerWaitms = 10000;
long readerWaitms = 10000;
long consumerWaitms = readerWaitms;
// start emitter;
tEmitter.start();
// tarnish events written to buffer;
int[] corruptIndexList = input.getCorruptIndexList();
if (corruptIndexList.length > 0) {
tEmitter.join(emitterWaitms);
EventCorruptionType corruptionType = input.getCorruptionType();
eventProducer.tarnishEventsInBuffer(corruptIndexList, corruptionType);
}
// start consumer / reader /writer
tConsumer.start();
tWriter.start();
tReader.start();
// wait until all events have been written;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): waiting up to " + (emitterWaitms / 1000) + " sec for appender/producer/emitter thread to terminate");
tEmitter.join(emitterWaitms);
// try and set a finish for writer
long eventsEmitted = eventProducer.eventsEmitted();
writer.setExpectedEvents(eventsEmitted);
// wait for writer to finish;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): waiting up to " + (writerWaitms / 1000) + " sec for writer thread to terminate");
tWriter.join(writerWaitms);
// close the writer Stream;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): signalling writer to stop");
writer.stop();
if (!tReader.isAlive()) {
LOG.error("runConstEventsReaderWriter(): reader thread died unexpectedly");
}
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): waiting up to " + (consumerWaitms / 1000) + " sec for consumer thread to terminate");
tConsumer.join(consumerWaitms);
// stop the consumer thread; may or may not have got all events;
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): signalling consumer to stop");
consumer.stop();
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): signalling reader to stop");
reader.stop();
dumpEmitterWriterReaderConsumerState(eventProducer, writer, reader, consumer, emitterStats, streamStats, clientStats, dstTestEvents, prodEventBuffer, consEventBuffer);
LOG.info("runConstEventsReaderWriter(): all stop.");
assertEquals(null, consumer.getExceptionThrown());
LOG.info("runConstEventsReaderWriter() consumer thread: " + (consumer.hasInvalidEvent() ? "DID" : "did NOT") + " receive invalid event(s); num events emitted=" + eventsEmitted + ", events written=" + writer.eventsWritten() + ", events read=" + reader.eventsRead());
return true;
}
use of com.linkedin.databus.core.util.UncaughtExceptionTrackingThread in project databus by linkedin.
the class TestDbusEventBufferStreamEvents method testStreamLargeScn.
@Test
public /**
* Tests streamEvents with a large scn with concurrent writes
*/
void testStreamLargeScn() throws Exception {
final Logger log = Logger.getLogger("TestDbusEventBuffer.testStreamLargeScn");
// log.setLevel(Level.INFO);
log.info("starting");
final DbusEventBuffer dbusBuf = new DbusEventBuffer(TestDbusEventBuffer.getConfig(120000, 500000, 1024, 500, AllocationPolicy.HEAP_MEMORY, QueuePolicy.OVERWRITE_ON_WRITE, AssertLevel.ALL));
final Checkpoint cp = Checkpoint.createOnlineConsumptionCheckpoint(1000);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
final WritableByteChannel ochannel = Channels.newChannel(baos);
final AtomicBoolean hasError = new AtomicBoolean(false);
final AtomicInteger num = new AtomicInteger(0);
final AtomicBoolean genComplete = new AtomicBoolean(false);
UncaughtExceptionTrackingThread streamThread = new UncaughtExceptionTrackingThread(new Runnable() {
@Override
public void run() {
try {
while (!genComplete.get() && 0 >= num.get()) {
StreamEventsArgs args = new StreamEventsArgs(10000);
num.set(dbusBuf.streamEvents(cp, ochannel, args).getNumEventsStreamed());
}
} catch (ScnNotFoundException e) {
hasError.set(true);
} catch (OffsetNotFoundException e) {
hasError.set(true);
}
}
}, "testGetLargeScn.streamThread");
streamThread.setDaemon(true);
streamThread.start();
log.info("append initial events");
DbusEventGenerator generator = new DbusEventGenerator();
Vector<DbusEvent> events = new Vector<DbusEvent>();
generator.generateEvents(70000, 1, 120, 39, events);
// Add events to the EventBuffer. Now the buffer is full
DbusEventAppender appender = new DbusEventAppender(events, dbusBuf, null);
// running in the same thread
appender.run();
genComplete.set(true);
streamThread.join(10000);
Assert.assertFalse(streamThread.isAlive());
Assert.assertNull(streamThread.getLastException());
Assert.assertFalse(hasError.get());
Assert.assertTrue(num.get() > 0);
}
use of com.linkedin.databus.core.util.UncaughtExceptionTrackingThread in project databus by linkedin.
the class TestGenericDispatcher method testShutdownBeforeRollback.
@Test(groups = { "small", "functional" })
public /**
* 1. Dispatcher is dispatching 2 window of events.
* 2. First window consumption is successfully done.
* 3. The second window's onStartDataEventSequence() of the callback registered is blocked (interruptible),
* causing the dispatcher to wait.
* 4. At this instant the dispatcher is shut down. The callback is made to return Failure status which would
* cause rollback in normal scenario.
* 5. As the shutdown message is passed, the blocked callback is expected to be interrupted and no rollback
* calls MUST be made.
*/
void testShutdownBeforeRollback() throws Exception {
final Logger log = Logger.getLogger("TestGenericDispatcher.testShutdownBeforeRollback");
log.setLevel(Level.INFO);
// log.getRoot().setLevel(Level.DEBUG);
LOG.info("start");
// generate events
Vector<Short> srcIdList = new Vector<Short>();
srcIdList.add((short) 1);
DbusEventGenerator evGen = new DbusEventGenerator(0, srcIdList);
Vector<DbusEvent> srcTestEvents = new Vector<DbusEvent>();
final int numEvents = 8;
final int numEventsPerWindow = 4;
final int payloadSize = 200;
Assert.assertTrue(evGen.generateEvents(numEvents, numEventsPerWindow, 500, payloadSize, srcTestEvents) > 0);
// find out how much data we need to stream for the failure
// account for the EOW event which is < payload size
int win1Size = payloadSize - 1;
for (DbusEvent e : srcTestEvents) {
win1Size += e.size();
}
// serialize the events to a buffer so they can be sent to the client
final TestGenericDispatcherEventBuffer srcEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
DbusEventAppender eventProducer = new DbusEventAppender(srcTestEvents, srcEventsBuf, null, true);
Thread tEmitter = new Thread(eventProducer);
tEmitter.start();
// Create destination (client) buffer
final TestGenericDispatcherEventBuffer destEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
/**
* Consumer with ability to wait for latch during onStartDataEventSequence()
*/
class TimeoutDESConsumer extends TimeoutTestConsumer {
private final CountDownLatch latch = new CountDownLatch(1);
private int _countStartWindow = 0;
private final int _failedRequestNumber;
public TimeoutDESConsumer(int failedRequestNumber) {
super(1, 1, 0, 0, 0, 0);
_failedRequestNumber = failedRequestNumber;
}
public CountDownLatch getLatch() {
return latch;
}
@Override
public ConsumerCallbackResult onStartDataEventSequence(SCN startScn) {
_countStartWindow++;
if (_countStartWindow == _failedRequestNumber) {
// Wait for the latch to open
try {
latch.await();
} catch (InterruptedException e) {
}
return ConsumerCallbackResult.ERROR_FATAL;
}
return super.onStartDataEventSequence(startScn);
}
@Override
public ConsumerCallbackResult onDataEvent(DbusEvent e, DbusEventDecoder eventDecoder) {
return ConsumerCallbackResult.SUCCESS;
}
public int getNumBeginWindowCalls() {
return _countStartWindow;
}
}
// Create dispatcher
// fail on second window
final TimeoutDESConsumer mockConsumer = new TimeoutDESConsumer(2);
SelectingDatabusCombinedConsumer sdccMockConsumer = new SelectingDatabusCombinedConsumer((DatabusStreamConsumer) mockConsumer);
List<String> sources = new ArrayList<String>();
Map<Long, IdNamePair> sourcesMap = new HashMap<Long, IdNamePair>();
for (int i = 1; i <= 3; ++i) {
IdNamePair sourcePair = new IdNamePair((long) i, "source" + i);
sources.add(sourcePair.getName());
sourcesMap.put(sourcePair.getId(), sourcePair);
}
DatabusV2ConsumerRegistration consumerReg = new DatabusV2ConsumerRegistration(sdccMockConsumer, sources, null);
List<DatabusV2ConsumerRegistration> allRegistrations = Arrays.asList(consumerReg);
final ConsumerCallbackStats callbackStats = new ConsumerCallbackStats(0, "test", "test", true, false, null);
final UnifiedClientStats unifiedStats = new UnifiedClientStats(0, "test", "test.unified");
MultiConsumerCallback callback = new MultiConsumerCallback(allRegistrations, Executors.newFixedThreadPool(2), // 100 ms budget
100, new StreamConsumerCallbackFactory(callbackStats, unifiedStats), callbackStats, unifiedStats, null, null);
callback.setSourceMap(sourcesMap);
List<DatabusSubscription> subs = DatabusSubscription.createSubscriptionList(sources);
final RelayDispatcher dispatcher = new RelayDispatcher("dispatcher", _genericRelayConnStaticConfig, subs, new InMemoryPersistenceProvider(), destEventsBuf, callback, null, null, null, null, null);
final Thread dispatcherThread = new Thread(dispatcher);
log.info("starting dispatcher thread");
dispatcherThread.start();
// Generate RegisterRespone for schema
HashMap<Long, List<RegisterResponseEntry>> schemaMap = new HashMap<Long, List<RegisterResponseEntry>>();
List<RegisterResponseEntry> l1 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l2 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l3 = new ArrayList<RegisterResponseEntry>();
l1.add(new RegisterResponseEntry(1L, (short) 1, SOURCE1_SCHEMA_STR));
l2.add(new RegisterResponseEntry(2L, (short) 1, SOURCE2_SCHEMA_STR));
l3.add(new RegisterResponseEntry(3L, (short) 1, SOURCE3_SCHEMA_STR));
schemaMap.put(1L, l1);
schemaMap.put(2L, l2);
schemaMap.put(3L, l3);
// Enqueue Necessary messages before starting dispatch
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesIdsMessage(sourcesMap.values()));
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesSchemasMessage(schemaMap));
log.info("starting event dispatch");
// comm channels between reader and writer
Pipe pipe = Pipe.open();
Pipe.SinkChannel writerStream = pipe.sink();
Pipe.SourceChannel readerStream = pipe.source();
writerStream.configureBlocking(true);
/*
* Needed for DbusEventBuffer.readEvents() to exit their loops when no more data is available.
* With Pipe mimicking ChunkedBodyReadableByteChannel, we need to make Pipe non-blocking on the
* reader side to achieve the behavior that ChunkedBodyReadableByte channel provides.
*/
readerStream.configureBlocking(false);
// Event writer - Relay in the real world
Checkpoint cp = Checkpoint.createFlexibleCheckpoint();
// Event readers - Clients in the real world
// Checkpoint pullerCheckpoint = Checkpoint.createFlexibleCheckpoint();
DbusEventsStatisticsCollector clientStats = new DbusEventsStatisticsCollector(0, "client", true, false, null);
DbusEventBufferReader reader = new DbusEventBufferReader(destEventsBuf, readerStream, null, clientStats);
UncaughtExceptionTrackingThread tReader = new UncaughtExceptionTrackingThread(reader, "Reader");
tReader.setDaemon(true);
tReader.start();
try {
log.info("send both windows");
StreamEventsResult streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(win1Size));
// EOP events, presumably?
Assert.assertEquals(// EOP events, presumably?
"num events streamed should equal total number of events plus 2", numEvents + 2, streamRes.getNumEventsStreamed());
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
return 2 == mockConsumer.getNumBeginWindowCalls();
}
}, "second window processing started", 5000, log);
dispatcher.shutdown();
// remove the barrier
mockConsumer.getLatch().countDown();
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
return !dispatcherThread.isAlive();
}
}, "Ensure Dispatcher thread is shutdown", 5000, log);
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
return 0 == mockConsumer.getNumRollbacks();
}
}, "Ensure No Rollback is called", 10, log);
} finally {
reader.stop();
}
log.info("end\n");
}
Aggregations