use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.
the class MockBootstrapConnection method testBootstrapPendingEvent.
// Make sure that we suspend on error when we get the x-dbus-pending-size header with a size that is
// larger than our dbusevent size.
@Test
public void testBootstrapPendingEvent() throws Exception {
List<String> sources = Arrays.asList("source1");
Properties clientProps = new Properties();
clientProps.setProperty("client.container.httpPort", "0");
clientProps.setProperty("client.container.jmx.rmiEnabled", "false");
clientProps.setProperty("client.runtime.bootstrap.enabled", "true");
clientProps.setProperty("client.runtime.bootstrap.service(1).name", "bs1");
clientProps.setProperty("client.runtime.bootstrap.service(1).host", "localhost");
clientProps.setProperty("client.runtime.bootstrap.service(1).port", "10001");
clientProps.setProperty("client.runtime.bootstrap.service(1).sources", "source1");
clientProps.setProperty("client.runtime.relay(1).name", "relay1");
clientProps.setProperty("client.runtime.relay(1).port", "10001");
clientProps.setProperty("client.runtime.relay(1).sources", "source1");
clientProps.setProperty("client.connectionDefaults.eventBuffer.maxSize", "100000");
clientProps.setProperty("client.connectionDefaults.pullerRetries.maxRetryNum", "3");
DatabusHttpClientImpl.Config clientConfBuilder = new DatabusHttpClientImpl.Config();
ConfigLoader<DatabusHttpClientImpl.StaticConfig> configLoader = new ConfigLoader<DatabusHttpClientImpl.StaticConfig>("client.", clientConfBuilder);
configLoader.loadConfig(clientProps);
DatabusHttpClientImpl.StaticConfig clientConf = clientConfBuilder.build();
DatabusSourcesConnection.StaticConfig srcConnConf = clientConf.getConnectionDefaults();
DatabusHttpClientImpl client = new DatabusHttpClientImpl(clientConf);
client.registerDatabusBootstrapListener(new LoggingConsumer(), null, "source1");
Assert.assertNotNull(client, "client instantiation failed");
DatabusHttpClientImpl.RuntimeConfig clientRtConf = clientConf.getRuntime().build();
//we keep the index of the next server we expect to see
AtomicInteger serverIdx = new AtomicInteger(-1);
List<IdNamePair> sourcesResponse = new ArrayList<IdNamePair>();
sourcesResponse.add(new IdNamePair(1L, "source1"));
Map<Long, List<RegisterResponseEntry>> registerResponse = new HashMap<Long, List<RegisterResponseEntry>>();
List<RegisterResponseEntry> regResponse = new ArrayList<RegisterResponseEntry>();
regResponse.add(new RegisterResponseEntry(1L, (short) 1, SCHEMA$.toString()));
registerResponse.put(1L, regResponse);
ChunkedBodyReadableByteChannel channel = EasyMock.createMock(ChunkedBodyReadableByteChannel.class);
// getting the pending-event-size header is called twice, once for checking and once for logging.
EasyMock.expect(channel.getMetadata(DatabusHttpHeaders.DATABUS_PENDING_EVENT_SIZE)).andReturn("1000000").times(2);
EasyMock.expect(channel.getMetadata("x-dbus-error-cause")).andReturn(null).times(2);
EasyMock.expect(channel.getMetadata("x-dbus-error")).andReturn(null).times(2);
EasyMock.replay(channel);
DbusEventBuffer dbusBuffer = EasyMock.createMock(DbusEventBuffer.class);
dbusBuffer.endEvents(false, -1, false, false, null);
EasyMock.expectLastCall().anyTimes();
EasyMock.expect(dbusBuffer.injectEvent(EasyMock.<DbusEventInternalReadable>notNull())).andReturn(true).anyTimes();
EasyMock.expect(dbusBuffer.getEventSerializationVersion()).andReturn(DbusEventFactory.DBUS_EVENT_V1).anyTimes();
EasyMock.expect(dbusBuffer.getMaxReadBufferCapacity()).andReturn(600).times(2);
EasyMock.expect(dbusBuffer.getBufferFreeReadSpace()).andReturn(600000).times(2);
EasyMock.expect(dbusBuffer.readEvents(EasyMock.<ReadableByteChannel>notNull())).andReturn(1).times(1);
EasyMock.expect(dbusBuffer.readEvents(EasyMock.<ReadableByteChannel>notNull(), EasyMock.<List<InternalDatabusEventsListener>>notNull(), EasyMock.<DbusEventsStatisticsCollector>isNull())).andReturn(0).times(1);
EasyMock.replay(dbusBuffer);
ConnectionStateFactory connStateFactory = new ConnectionStateFactory(sources);
//This guy succeeds on /sources but fails on /register
MockBootstrapConnection mockSuccessConn = new MockBootstrapConnection(10, 10, channel, serverIdx, false);
DatabusBootstrapConnectionFactory mockConnFactory = org.easymock.EasyMock.createMock("mockRelayFactory", DatabusBootstrapConnectionFactory.class);
//each server should be tried MAX_RETRIES time until all retries are exhausted
EasyMock.expect(mockConnFactory.createConnection(EasyMock.<ServerInfo>notNull(), EasyMock.<ActorMessageQueue>notNull(), EasyMock.<RemoteExceptionHandler>notNull())).andReturn(mockSuccessConn).anyTimes();
List<DatabusSubscription> sourcesSubList = DatabusSubscription.createSubscriptionList(sources);
DatabusSourcesConnection sourcesConn2 = EasyMock.createMock(DatabusSourcesConnection.class);
EasyMock.expect(sourcesConn2.getSourcesNames()).andReturn(Arrays.asList("source1")).anyTimes();
EasyMock.expect(sourcesConn2.getSubscriptions()).andReturn(sourcesSubList).anyTimes();
EasyMock.expect(sourcesConn2.getConnectionConfig()).andReturn(srcConnConf).anyTimes();
EasyMock.expect(sourcesConn2.getConnectionStatus()).andReturn(new DatabusComponentStatus("dummy")).anyTimes();
EasyMock.expect(sourcesConn2.getLocalRelayCallsStatsCollector()).andReturn(null).anyTimes();
EasyMock.expect(sourcesConn2.getRelayCallsStatsCollector()).andReturn(null).anyTimes();
EasyMock.expect(sourcesConn2.getUnifiedClientStats()).andReturn(null).anyTimes();
EasyMock.expect(sourcesConn2.getBootstrapConnFactory()).andReturn(mockConnFactory).anyTimes();
EasyMock.expect(sourcesConn2.loadPersistentCheckpoint()).andReturn(null).anyTimes();
EasyMock.expect(sourcesConn2.getDataEventsBuffer()).andReturn(dbusBuffer).anyTimes();
EasyMock.expect(sourcesConn2.isBootstrapEnabled()).andReturn(true).anyTimes();
EasyMock.expect(sourcesConn2.getBootstrapRegistrations()).andReturn(null).anyTimes();
EasyMock.expect(sourcesConn2.getBootstrapServices()).andReturn(null).anyTimes();
EasyMock.expect(sourcesConn2.getBootstrapEventsStatsCollector()).andReturn(null).anyTimes();
EasyMock.makeThreadSafe(mockConnFactory, true);
EasyMock.makeThreadSafe(sourcesConn2, true);
EasyMock.replay(mockConnFactory);
EasyMock.replay(sourcesConn2);
BootstrapPullThread bsPuller = new BootstrapPullThread("RelayPuller", sourcesConn2, dbusBuffer, connStateFactory, clientRtConf.getBootstrap().getServicesSet(), new ArrayList<DbusKeyCompositeFilterConfig>(), clientConf.getPullerBufferUtilizationPct(), ManagementFactory.getPlatformMBeanServer(), new DbusEventV2Factory(), null, null);
mockSuccessConn.setCallback(bsPuller);
bsPuller.getComponentStatus().start();
Checkpoint cp = _ckptHandlerSource1.createInitialBootstrapCheckpoint(null, 0L);
//TODO remove
//cp.setSnapshotSource("source1");
//cp.setCatchupSource("source1");
//cp.setConsumptionMode(DbusClientMode.BOOTSTRAP_SNAPSHOT);
ConnectionState connState = bsPuller.getConnectionState();
connState.switchToBootstrap(cp);
testTransitionCase(bsPuller, StateId.BOOTSTRAP, StateId.REQUEST_START_SCN, cp);
bsPuller.getMessageQueue().clear();
testTransitionCase(bsPuller, StateId.REQUEST_START_SCN, StateId.START_SCN_RESPONSE_SUCCESS, null);
bsPuller.getMessageQueue().clear();
Map<Long, List<RegisterResponseEntry>> entries = new HashMap<Long, List<RegisterResponseEntry>>();
entries.put(1L, new ArrayList<RegisterResponseEntry>());
connState.setSourcesSchemas(entries);
connState.setCurrentBSServerInfo(bsPuller.getCurentServer());
testTransitionCase(bsPuller, StateId.START_SCN_RESPONSE_SUCCESS, StateId.REQUEST_STREAM, null);
bsPuller.getMessageQueue().clear();
connState.getSourcesNameMap().put("source1", new IdNamePair(1L, "source1"));
connState.getSourceIdMap().put(1L, new IdNamePair(1L, "source1"));
testTransitionCase(bsPuller, StateId.REQUEST_STREAM, StateId.STREAM_REQUEST_SUCCESS, null);
bsPuller.getMessageQueue().clear();
testTransitionCase(bsPuller, StateId.STREAM_REQUEST_SUCCESS, StateId.STREAM_REQUEST_SUCCESS, "SUSPEND_ON_ERROR", null);
EasyMock.verify(channel);
EasyMock.verify(sourcesConn2);
EasyMock.verify(dbusBuffer);
EasyMock.verify(channel);
EasyMock.verify(mockConnFactory);
}
use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.
the class DummySuccessfulErrorCountingConsumer method testRegisterDatabusStreamListener.
@Test
public void testRegisterDatabusStreamListener() throws Exception {
DatabusHttpClientImpl.Config clientConfig = new DatabusHttpClientImpl.Config();
clientConfig.getContainer().getJmx().setRmiEnabled(false);
clientConfig.getContainer().setHttpPort(12003);
DatabusHttpClientImpl client = new DatabusHttpClientImpl(clientConfig);
registerRelay(1, "relay1", new InetSocketAddress("localhost", 8888), "S1,S2", client);
registerRelay(2, "relay2", new InetSocketAddress("localhost", 7777), "S1,S3", client);
registerRelay(3, "relay1.1", new InetSocketAddress("localhost", 8887), "S1,S2", client);
registerRelay(4, "relay3", new InetSocketAddress("localhost", 6666), "S3,S4,S5", client);
DummyStreamConsumer listener1 = new DummyStreamConsumer("consumer1");
client.registerDatabusStreamListener(listener1, null, "S1");
List<DatabusSubscription> ls1 = DatabusSubscription.createSubscriptionList(Arrays.asList("S1", "S2"));
List<DatabusSubscription> ls2 = DatabusSubscription.createSubscriptionList(Arrays.asList("S1", "S3"));
List<DatabusSubscription> ls3 = DatabusSubscription.createSubscriptionList(Arrays.asList("S3", "S4", "S5"));
assertEquals("expect one consumer in (S1,S2) or (S1,S3)", 1, safeListSize(client.getRelayGroupStreamConsumers().get(ls1)) + safeListSize(client.getRelayGroupStreamConsumers().get(ls2)));
DummyStreamConsumer listener2 = new DummyStreamConsumer("consumer2");
client.registerDatabusStreamListener(listener2, null, "S1");
int consumersNum = safeListSize(client.getRelayGroupStreamConsumers().get(ls1)) + safeListSize(client.getRelayGroupStreamConsumers().get(ls2));
assertEquals("expect two consumers in (S1,S2) or (S1,S3)", 2, consumersNum);
DatabusStreamConsumer listener3 = new LoggingConsumer(clientConfig.getLoggingListener());
client.registerDatabusStreamListener(listener3, null, "S5");
assertEquals("expect one consumer in (S3,S4,S5)", 1, safeListSize(client.getRelayGroupStreamConsumers().get(ls3)));
}
use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.
the class DummySuccessfulErrorCountingConsumer method testRegisterRelay.
@Test
public void testRegisterRelay() throws Exception {
DatabusHttpClientImpl.Config clientConfig = new DatabusHttpClientImpl.Config();
clientConfig.getContainer().getJmx().setRmiEnabled(false);
clientConfig.getContainer().setHttpPort(10004);
DatabusHttpClientImpl client = new DatabusHttpClientImpl(clientConfig);
List<DatabusSubscription> ls1 = DatabusSubscription.createSubscriptionList(Arrays.asList("S1", "S2"));
List<DatabusSubscription> sl1 = DatabusSubscription.createSubscriptionList(Arrays.asList("S2", "S1"));
List<DatabusSubscription> ls2 = DatabusSubscription.createSubscriptionList(Arrays.asList("S1", "S3"));
List<DatabusSubscription> ls3 = DatabusSubscription.createSubscriptionList(Arrays.asList("S1"));
List<DatabusSubscription> ls4 = DatabusSubscription.createSubscriptionList(Arrays.asList("S3", "S4", "S5"));
List<DatabusSubscription> ls5 = DatabusSubscription.createSubscriptionList(Arrays.asList("S3", "S4"));
registerRelay(1, "relay1", new InetSocketAddress("localhost", 8880), "S1,S2", client);
assertEquals("one relay group", 1, client.getRelayGroups().size());
assertEquals("one relay", 1, client.getRelays().size());
assertEquals("relay group S1,S2", true, client.getRelayGroups().containsKey(ls1));
assertEquals("no relay group S2,S1", false, client.getRelayGroups().containsKey(sl1));
registerRelay(2, "relay2", new InetSocketAddress("localhost", 7777), "S1,S3", client);
assertEquals("two relay groups", 2, client.getRelayGroups().size());
assertEquals("two relays", 2, client.getRelays().size());
assertEquals("relay group S1,S2", true, client.getRelayGroups().containsKey(ls1));
assertEquals("relay group S1,S3", true, client.getRelayGroups().containsKey(ls2));
assertEquals("no relay group S1", false, client.getRelayGroups().containsKey(ls3));
registerRelay(3, "relay1.1", new InetSocketAddress("localhost", 8887), "S1,S2", client);
assertEquals("two relay groups", 2, client.getRelayGroups().size());
assertEquals("three relays", 3, client.getRelays().size());
assertEquals("S1,S2 has two relays", 2, client.getRelayGroups().get(ls1).size());
registerRelay(4, "relay3", new InetSocketAddress("localhost", 6666), "S3,S4,S5", client);
assertEquals("three relay groups", 3, client.getRelayGroups().size());
assertEquals("four relays", 4, client.getRelays().size());
assertEquals("relay group S1,S2", true, client.getRelayGroups().containsKey(ls1));
assertEquals("relay group S1,S3", true, client.getRelayGroups().containsKey(ls2));
assertEquals("relay group S3,S4,S5", true, client.getRelayGroups().containsKey(ls4));
assertEquals("no relay group S3,S4", false, client.getRelayGroups().containsKey(ls5));
}
use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.
the class TestGenericDispatcher method testOneWindowHappyPath.
@Test(groups = { "small", "functional" })
public void testOneWindowHappyPath() {
final Logger log = Logger.getLogger("TestGenericDispatcher.testOneWindowHappyPath");
//log.setLevel(Level.DEBUG);
log.info("start");
int source1EventsNum = 2;
int source2EventsNum = 2;
Hashtable<Long, AtomicInteger> keyCounts = new Hashtable<Long, AtomicInteger>();
Hashtable<Short, AtomicInteger> srcidCounts = new Hashtable<Short, AtomicInteger>();
final TestGenericDispatcherEventBuffer eventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
eventsBuf.start(0);
final StateVerifyingStreamConsumer svsConsumer = new StateVerifyingStreamConsumer(null);
//svsConsumer.getLog().setLevel(Level.DEBUG);
DatabusStreamConsumer mockConsumer = new EventCountingConsumer(svsConsumer, keyCounts, srcidCounts);
SelectingDatabusCombinedConsumer sdccMockConsumer = new SelectingDatabusCombinedConsumer(mockConsumer);
List<String> sources = new ArrayList<String>();
Map<Long, IdNamePair> sourcesMap = new HashMap<Long, IdNamePair>();
for (int i = 1; i <= 3; ++i) {
IdNamePair sourcePair = new IdNamePair((long) i, "source" + i);
sources.add(sourcePair.getName());
sourcesMap.put(sourcePair.getId(), sourcePair);
}
DatabusV2ConsumerRegistration consumerReg = new DatabusV2ConsumerRegistration(sdccMockConsumer, sources, null);
List<DatabusV2ConsumerRegistration> allRegistrations = Arrays.asList(consumerReg);
MultiConsumerCallback callback = new MultiConsumerCallback(allRegistrations, Executors.newSingleThreadExecutor(), 1000, new StreamConsumerCallbackFactory(null, null), null, null, null, null);
callback.setSourceMap(sourcesMap);
List<DatabusSubscription> subs = DatabusSubscription.createSubscriptionList(sources);
RelayDispatcher dispatcher = new RelayDispatcher("dispatcher", _genericRelayConnStaticConfig, subs, new InMemoryPersistenceProvider(), eventsBuf, callback, null, null, null, null, null);
dispatcher.setSchemaIdCheck(false);
Thread dispatcherThread = new Thread(dispatcher, "testOneWindowHappyPath-dispatcher");
//dispatcherThread.setDaemon(true);
dispatcherThread.start();
HashMap<Long, List<RegisterResponseEntry>> schemaMap = new HashMap<Long, List<RegisterResponseEntry>>();
List<RegisterResponseEntry> l1 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l2 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l3 = new ArrayList<RegisterResponseEntry>();
l1.add(new RegisterResponseEntry(1L, (short) 1, SOURCE1_SCHEMA_STR));
l2.add(new RegisterResponseEntry(2L, (short) 1, SOURCE2_SCHEMA_STR));
l3.add(new RegisterResponseEntry(3L, (short) 1, SOURCE3_SCHEMA_STR));
schemaMap.put(1L, l1);
schemaMap.put(2L, l2);
schemaMap.put(3L, l3);
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesIdsMessage(sourcesMap.values()));
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesSchemasMessage(schemaMap));
eventsBuf.startEvents();
initBufferWithEvents(eventsBuf, 1, source1EventsNum, (short) 1, keyCounts, srcidCounts);
initBufferWithEvents(eventsBuf, 1 + source1EventsNum, source2EventsNum, (short) 2, keyCounts, srcidCounts);
eventsBuf.endEvents(100L, null);
try {
Thread.sleep(2000);
} catch (InterruptedException ie) {
}
dispatcher.shutdown();
Checkpoint cp = dispatcher.getDispatcherState().getLastSuccessfulCheckpoint();
Assert.assertEquals(Checkpoint.FULLY_CONSUMED_WINDOW_OFFSET, cp.getWindowOffset());
Assert.assertEquals(cp.getWindowScn(), cp.getPrevScn());
for (long i = 1; i <= source1EventsNum + source2EventsNum; ++i) {
assertEquals("correct amount of callbacks for key " + i, 1, keyCounts.get(i).intValue());
}
assertEquals("correct amount of callbacks for srcid 1", source1EventsNum, srcidCounts.get((short) 1).intValue());
assertEquals("correct amount of callbacks for srcid 2", source2EventsNum, srcidCounts.get((short) 2).intValue());
verifyNoLocks(log, eventsBuf);
log.info("end\n");
}
use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.
the class TestGenericDispatcher method testPartialWindowRollback.
@Test(groups = { "small", "functional" })
public /**
* Tests the case where the dispatcher exits the main processing loop in {@link GenericDispatcher#doDispatchEvents()}
* with a partial window and the flushing of the outstanding callbacks fails. We want to make sure that a rollback
* is correctly triggered.
*
* The test simulates the following case: e1_1 e1_2 e1_3 <EOW> e2_1 e2_2 e2_3 <EOW> ... with a failure in the e2_2
* callback.
*
* 1) Read full first window: e1_1 e1_2 e1_3 <EOW>
* 2) Read partial second window: e2_1 e2_2
* 3) The above should fail -- verify that rollback is called
* 4) Read the rest
*/
void testPartialWindowRollback() throws Exception {
final Logger log = Logger.getLogger("TestGenericDispatcher.testPartialWindowRollback");
//log.setLevel(Level.INFO);
log.info("start");
final Level saveLevel = Logger.getLogger("com.linkedin.databus.client").getLevel();
//Logger.getLogger("com.linkedin.databus.client").setLevel(Level.DEBUG);
// generate events
Vector<Short> srcIdList = new Vector<Short>();
srcIdList.add((short) 1);
DbusEventGenerator evGen = new DbusEventGenerator(0, srcIdList);
Vector<DbusEvent> srcTestEvents = new Vector<DbusEvent>();
final int numEvents = 9;
//1-based number of the event callback to fail
final int numOfFailureEvent = 5;
final int numEventsPerWindow = 3;
final int payloadSize = 200;
final int numWindows = (int) Math.ceil(1.0 * numEvents / numEventsPerWindow);
Assert.assertTrue(evGen.generateEvents(numEvents, numEventsPerWindow, 500, payloadSize, srcTestEvents) > 0);
// find out how much data we need to stream for the failure
// account for the EOW event which is < payload size
int win1Size = payloadSize - 1;
int win2Size = 0;
int eventN = 0;
for (DbusEvent e : srcTestEvents) {
eventN++;
if (eventN <= numEventsPerWindow) {
win1Size += e.size();
} else if (eventN <= numOfFailureEvent) {
win2Size += e.size();
}
}
//serialize the events to a buffer so they can be sent to the client
final TestGenericDispatcherEventBuffer srcEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
DbusEventAppender eventProducer = new DbusEventAppender(srcTestEvents, srcEventsBuf, null, true);
Thread tEmitter = new Thread(eventProducer);
tEmitter.start();
//Create destination (client) buffer
final TestGenericDispatcherEventBuffer destEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
//Create dispatcher
final TimeoutTestConsumer mockConsumer = new TimeoutTestConsumer(100, 10, 0, numOfFailureEvent, 0, 1);
SelectingDatabusCombinedConsumer sdccMockConsumer = new SelectingDatabusCombinedConsumer((DatabusStreamConsumer) mockConsumer);
List<String> sources = new ArrayList<String>();
Map<Long, IdNamePair> sourcesMap = new HashMap<Long, IdNamePair>();
for (int i = 1; i <= 3; ++i) {
IdNamePair sourcePair = new IdNamePair((long) i, "source" + i);
sources.add(sourcePair.getName());
sourcesMap.put(sourcePair.getId(), sourcePair);
}
DatabusV2ConsumerRegistration consumerReg = new DatabusV2ConsumerRegistration(sdccMockConsumer, sources, null);
List<DatabusV2ConsumerRegistration> allRegistrations = Arrays.asList(consumerReg);
final ConsumerCallbackStats callbackStats = new ConsumerCallbackStats(0, "test", "test", true, false, null);
final UnifiedClientStats unifiedStats = new UnifiedClientStats(0, "test", "test.unified");
MultiConsumerCallback callback = new MultiConsumerCallback(allRegistrations, Executors.newFixedThreadPool(2), 1000, new StreamConsumerCallbackFactory(callbackStats, unifiedStats), callbackStats, unifiedStats, null, null);
callback.setSourceMap(sourcesMap);
List<DatabusSubscription> subs = DatabusSubscription.createSubscriptionList(sources);
final RelayDispatcher dispatcher = new RelayDispatcher("dispatcher", _genericRelayConnStaticConfig, subs, new InMemoryPersistenceProvider(), destEventsBuf, callback, null, null, null, null, null);
dispatcher.setSchemaIdCheck(false);
Thread dispatcherThread = new Thread(dispatcher);
dispatcherThread.setDaemon(true);
log.info("starting dispatcher thread");
dispatcherThread.start();
HashMap<Long, List<RegisterResponseEntry>> schemaMap = new HashMap<Long, List<RegisterResponseEntry>>();
List<RegisterResponseEntry> l1 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l2 = new ArrayList<RegisterResponseEntry>();
List<RegisterResponseEntry> l3 = new ArrayList<RegisterResponseEntry>();
l1.add(new RegisterResponseEntry(1L, (short) 1, SOURCE1_SCHEMA_STR));
l2.add(new RegisterResponseEntry(2L, (short) 1, SOURCE2_SCHEMA_STR));
l3.add(new RegisterResponseEntry(3L, (short) 1, SOURCE3_SCHEMA_STR));
schemaMap.put(1L, l1);
schemaMap.put(2L, l2);
schemaMap.put(3L, l3);
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesIdsMessage(sourcesMap.values()));
dispatcher.enqueueMessage(SourcesMessage.createSetSourcesSchemasMessage(schemaMap));
log.info("starting event dispatch");
//stream the events from the source buffer without the EOW
//comm channels between reader and writer
Pipe pipe = Pipe.open();
Pipe.SinkChannel writerStream = pipe.sink();
Pipe.SourceChannel readerStream = pipe.source();
writerStream.configureBlocking(true);
readerStream.configureBlocking(false);
//Event writer - Relay in the real world
Checkpoint cp = Checkpoint.createFlexibleCheckpoint();
//Event readers - Clients in the real world
//Checkpoint pullerCheckpoint = Checkpoint.createFlexibleCheckpoint();
DbusEventsStatisticsCollector clientStats = new DbusEventsStatisticsCollector(0, "client", true, false, null);
DbusEventBufferReader reader = new DbusEventBufferReader(destEventsBuf, readerStream, null, clientStats);
UncaughtExceptionTrackingThread tReader = new UncaughtExceptionTrackingThread(reader, "Reader");
tReader.setDaemon(true);
tReader.start();
try {
log.info("send first window -- that one should be OK");
StreamEventsResult streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(win1Size));
Assert.assertEquals(numEventsPerWindow + 1, streamRes.getNumEventsStreamed());
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
return 1 == callbackStats.getNumSysEventsProcessed();
}
}, "first window processed", 5000, log);
log.info("send the second partial window -- that one should cause an error");
streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(win2Size));
Assert.assertEquals(numOfFailureEvent - numEventsPerWindow, streamRes.getNumEventsStreamed());
log.info("wait for dispatcher to finish");
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
log.info("events received: " + callbackStats.getNumDataEventsReceived());
return numOfFailureEvent <= callbackStats.getNumDataEventsProcessed();
}
}, "all events until the error processed", 5000, log);
log.info("all data events have been received but no EOW");
Assert.assertEquals(numOfFailureEvent, clientStats.getTotalStats().getNumDataEvents());
Assert.assertEquals(1, clientStats.getTotalStats().getNumSysEvents());
//at least one failing event therefore < numOfFailureEvent events can be processed
Assert.assertTrue(numOfFailureEvent <= callbackStats.getNumDataEventsProcessed());
//onDataEvent callbacks for e2_1 and e2_2 get cancelled
Assert.assertEquals(2, callbackStats.getNumDataErrorsProcessed());
//only one EOW
Assert.assertEquals(1, callbackStats.getNumSysEventsProcessed());
log.info("Send the remainder of the window");
streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(100000));
//remaining events + EOWs
Assert.assertEquals(srcTestEvents.size() + numWindows - (numOfFailureEvent + 1), streamRes.getNumEventsStreamed());
log.info("wait for the rollback");
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
return 1 == mockConsumer.getNumRollbacks();
}
}, "rollback seen", 5000, log);
log.info("wait for dispatcher to finish after the rollback");
TestUtil.assertWithBackoff(new ConditionCheck() {
@Override
public boolean check() {
log.info("num windows processed: " + callbackStats.getNumSysEventsProcessed());
return numWindows == callbackStats.getNumSysEventsProcessed();
}
}, "all events processed", 5000, log);
} finally {
reader.stop();
dispatcher.shutdown();
log.info("all events processed");
verifyNoLocks(null, srcEventsBuf);
verifyNoLocks(null, destEventsBuf);
}
Logger.getLogger("com.linkedin.databus.client").setLevel(saveLevel);
log.info("end\n");
}
Aggregations