Search in sources :

Example 11 with EventProducer

use of com.linkedin.databus2.producers.EventProducer in project databus by linkedin.

the class TestDatabusRelayEvents method testV2Events.

/**
   * Stuffs an event buffer with both a v1 and a v2 event, then reads the buffer two ways:
   * first accepting only v1 events (verifying conversion of the v2 event to v1); then accepting
   * both v1 and v2 events.
   *
   * Note that the version of the _EOP_ events must match the version of the event factory,
   * regardless of the versions of any preceding "real" events.  (This matches DbusEventBuffer
   * behavior; see the serializeLongKeyEndOfPeriodMarker() call in endEvents() for details.)
   */
@Test
public void testV2Events() throws KeyTypeNotImplementedException, InvalidEventException, IOException, DatabusException {
    final Logger log = Logger.getLogger("TestDatabusRelayEvents.testV2Events");
    log.setLevel(Level.DEBUG);
    String[] srcs = { "com.linkedin.events.example.fake.FakeSchema" };
    String pSourceName = DatabusRelayTestUtil.getPhysicalSrcName(srcs[0]);
    short srcId = 2;
    short pId = 1;
    int relayPort = Utils.getAvailablePort(11993);
    // create relay
    final DatabusRelayMain relay1 = createRelay(relayPort, pId, srcs);
    DatabusRelayTestUtil.RelayRunner r1 = null;
    ClientRunner cr = null;
    try {
        //EventProducer[] producers = relay1.getProducers();
        r1 = new DatabusRelayTestUtil.RelayRunner(relay1);
        log.info("Relay created");
        DbusEventBufferMult bufMult = relay1.getEventBuffer();
        PhysicalPartition pPartition = new PhysicalPartition((int) pId, pSourceName);
        DbusEventBuffer buf = (DbusEventBuffer) bufMult.getDbusEventBufferAppendable(pPartition);
        log.info("create some events");
        long windowScn = 100L;
        ByteBuffer serializationBuffer = addEvent(windowScn, srcId, relay1.getSchemaRegistryService().fetchSchemaIdForSourceNameAndVersion(srcs[0], 2).getByteArray(), pId, DbusEventFactory.DBUS_EVENT_V2);
        ReadableByteChannel channel = Channels.newChannel(new ByteBufferInputStream(serializationBuffer));
        int readEvents = buf.readEvents(channel);
        log.info("successfully read in " + readEvents + " events ");
        channel.close();
        windowScn = 101L;
        serializationBuffer = addEvent(windowScn, srcId, relay1.getSchemaRegistryService().fetchSchemaIdForSourceNameAndVersion(srcs[0], 2).getByteArray(), pId, DbusEventFactory.DBUS_EVENT_V1);
        channel = Channels.newChannel(new ByteBufferInputStream(serializationBuffer));
        readEvents = buf.readEvents(channel);
        log.info("successfully read in " + readEvents + " events ");
        channel.close();
        log.info("starting relay on port " + relayPort);
        r1.start();
        //TestUtil.sleep(10*1000);
        // wait until relay comes up
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                return relay1.isRunningStatus();
            }
        }, "Relay hasn't come up completely ", 30000, LOG);
        log.info("now create client");
        String srcSubscriptionString = TestUtil.join(srcs, ",");
        String serverName = "localhost:" + relayPort;
        final EventsCountingConsumer countingConsumer = new EventsCountingConsumer();
        int id = (RngUtils.randomPositiveInt() % 10000) + 1;
        DatabusSourcesConnection clientConn = RelayEventProducer.createDatabusSourcesConnection("testProducer", id, serverName, srcSubscriptionString, countingConsumer, 1 * 1024 * 1024, 50000, 30 * 1000, 100, 15 * 1000, 1, true, DatabusClientNettyThreadPools.createNettyThreadPools(id), 0, DbusEventFactory.DBUS_EVENT_V1, 0);
        cr = new ClientRunner(clientConn);
        log.info("starting client");
        cr.start();
        // wait till client gets the event
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                int events = countingConsumer.getNumDataEvents();
                LOG.info("client got " + events + " events");
                return events == 2;
            }
        }, "Consumer didn't get 2 events ", 64 * 1024, LOG);
        // asserts
        Assert.assertEquals(countingConsumer.getNumDataEvents(), 2);
        Assert.assertEquals(countingConsumer.getNumWindows(), 2);
        Assert.assertEquals(countingConsumer.getNumDataEvents(DbusEventFactory.DBUS_EVENT_V1), 2);
        log.info("shutdown first client");
        clientConn.stop();
        cr.shutdown();
        TestUtil.sleep(1000);
        cr = null;
        log.info("start another client who understands V2");
        final EventsCountingConsumer countingConsumer1 = new EventsCountingConsumer();
        clientConn = RelayEventProducer.createDatabusSourcesConnection("testProducer", id, serverName, srcSubscriptionString, countingConsumer1, 1 * 1024 * 1024, 50000, 30 * 1000, 100, 15 * 1000, 1, true, DatabusClientNettyThreadPools.createNettyThreadPools(id), 0, DbusEventFactory.DBUS_EVENT_V2, 0);
        cr = new ClientRunner(clientConn);
        cr.start();
        log.info("wait till client gets the event");
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                int events = countingConsumer1.getNumDataEvents();
                LOG.debug("client got " + events + " events");
                return events == 2;
            }
        }, "Consumer didn't get 2 events ", 64 * 1024, LOG);
        // asserts
        Assert.assertEquals(countingConsumer1.getNumDataEvents(), 2);
        Assert.assertEquals(countingConsumer1.getNumWindows(), 2);
        Assert.assertEquals(countingConsumer1.getNumDataEvents(DbusEventFactory.DBUS_EVENT_V1), 1);
        Assert.assertEquals(countingConsumer1.getNumDataEvents(DbusEventFactory.DBUS_EVENT_V2), 1);
    } finally {
        cleanup(new DatabusRelayTestUtil.RelayRunner[] { r1 }, cr);
    }
}
Also used : ConditionCheck(com.linkedin.databus2.test.ConditionCheck) ReadableByteChannel(java.nio.channels.ReadableByteChannel) ClientRunner(com.linkedin.databus2.relay.TestDatabusRelayMain.ClientRunner) ByteBufferInputStream(org.apache.zookeeper.server.ByteBufferInputStream) Logger(org.apache.log4j.Logger) ByteBuffer(java.nio.ByteBuffer) DbusEventBuffer(com.linkedin.databus.core.DbusEventBuffer) DatabusSourcesConnection(com.linkedin.databus.client.DatabusSourcesConnection) DatabusRelayTestUtil(com.linkedin.databus2.relay.util.test.DatabusRelayTestUtil) DbusEventBufferMult(com.linkedin.databus.core.DbusEventBufferMult) PhysicalPartition(com.linkedin.databus.core.data_model.PhysicalPartition) Test(org.testng.annotations.Test)

Example 12 with EventProducer

use of com.linkedin.databus2.producers.EventProducer in project databus by linkedin.

the class TestDatabusRelayMain method testRelayEventGenerator.

@Test
public void testRelayEventGenerator() throws InterruptedException, InvalidConfigException {
    DatabusRelayTestUtil.RelayRunner r1 = null;
    //Logger.getRootLogger().setLevel(Level.INFO);
    final Logger log = Logger.getLogger("TestDatabusRelayMain.testRelayEventGenerator");
    //log.setLevel(Level.DEBUG);
    ClientRunner cr = null;
    try {
        String[][] srcNames = { { "com.linkedin.events.example.fake.FakeSchema", "com.linkedin.events.example.person.Person" } };
        log.info("create main relay with random generator");
        PhysicalSourceConfig[] srcConfigs = new PhysicalSourceConfig[srcNames.length];
        int i = 0;
        int eventRatePerSec = 20;
        for (String[] srcs : srcNames) {
            PhysicalSourceConfig src1 = DatabusRelayTestUtil.createPhysicalConfigBuilder((short) (i + 1), DatabusRelayTestUtil.getPhysicalSrcName(srcs[0]), "mock", 500, eventRatePerSec, srcs);
            srcConfigs[i++] = src1;
        }
        int relayPort = Utils.getAvailablePort(11993);
        final DatabusRelayMain relay1 = DatabusRelayTestUtil.createDatabusRelayWithSchemaReg(1001, relayPort, 10 * 1024 * 1024, srcConfigs, SCHEMA_REGISTRY_DIR);
        Assert.assertNotEquals(relay1, null);
        r1 = new DatabusRelayTestUtil.RelayRunner(relay1);
        log.info("async starts");
        r1.start();
        log.info("start client in parallel");
        String srcSubscriptionString = TestUtil.join(srcNames[0], ",");
        String serverName = "localhost:" + relayPort;
        final CountingConsumer countingConsumer = new CountingConsumer();
        DatabusSourcesConnection clientConn = RelayEventProducer.createDatabusSourcesConnection("testProducer", serverName, srcSubscriptionString, countingConsumer, 1 * 1024 * 1024, 50000, 30 * 1000, 100, 15 * 1000, 1, true);
        cr = new ClientRunner(clientConn);
        cr.start();
        log.info("terminating conditions");
        final DbusEventsTotalStats stats = relay1.getInboundEventStatisticsCollector().getTotalStats();
        long totalRunTime = 5000;
        long startTime = System.currentTimeMillis();
        do {
            log.info("numDataEvents=" + stats.getNumDataEvents() + " numWindows=" + stats.getNumSysEvents() + " size=" + stats.getSizeDataEvents());
            Thread.sleep(1000);
        } while ((System.currentTimeMillis() - startTime) < totalRunTime);
        r1.pause();
        log.info("Sending pause to relay!");
        log.info("numDataEvents=" + stats.getNumDataEvents() + " numWindows=" + stats.getNumSysEvents() + " size=" + stats.getSizeDataEvents());
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                boolean success = true;
                for (EventProducer p : relay1.getProducers()) {
                    if (!(success = success && p.isPaused()))
                        break;
                }
                return success;
            }
        }, "waiting for producers to pause", 4000, log);
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                log.debug("countingConsumer.getNumWindows()=" + countingConsumer.getNumWindows());
                return countingConsumer.getNumWindows() == stats.getNumSysEvents();
            }
        }, "wait until client got all events or for maxTimeout", 64 * 1024, log);
        log.info("Client stats=" + countingConsumer);
        log.info("Event windows generated=" + stats.getNumSysEvents());
        cr.shutdown(2000, log);
        log.info("Client cr stopped");
        Assert.assertEquals(countingConsumer.getNumDataEvents(), stats.getNumDataEvents());
        boolean stopped = r1.shutdown(2000);
        Assert.assertTrue(stopped);
        log.info("Relay r1 stopped");
    } finally {
        cleanup(new DatabusRelayTestUtil.RelayRunner[] { r1 }, cr);
    }
}
Also used : ConditionCheck(com.linkedin.databus2.test.ConditionCheck) RelayEventProducer(com.linkedin.databus2.producers.RelayEventProducer) EventProducer(com.linkedin.databus2.producers.EventProducer) Logger(org.apache.log4j.Logger) Checkpoint(com.linkedin.databus.core.Checkpoint) DatabusSourcesConnection(com.linkedin.databus.client.DatabusSourcesConnection) PhysicalSourceConfig(com.linkedin.databus2.relay.config.PhysicalSourceConfig) DatabusRelayTestUtil(com.linkedin.databus2.relay.util.test.DatabusRelayTestUtil) DbusEventsTotalStats(com.linkedin.databus.core.monitoring.mbean.DbusEventsTotalStats) Test(org.testng.annotations.Test)

Example 13 with EventProducer

use of com.linkedin.databus2.producers.EventProducer in project databus by linkedin.

the class TestGenericDispatcher method testPartialWindowRollback.

@Test(groups = { "small", "functional" })
public /**
     * Tests the case where the dispatcher exits the main processing loop in {@link GenericDispatcher#doDispatchEvents()}
     * with a partial window and the flushing of the outstanding callbacks fails. We want to make sure that a rollback
     * is correctly triggered.
     *
     * The test simulates the following case: e1_1 e1_2 e1_3 <EOW> e2_1 e2_2 e2_3 <EOW> ... with a failure in the e2_2
     * callback.
     *
     * 1) Read full first window: e1_1 e1_2 e1_3 <EOW>
     * 2) Read partial second window: e2_1 e2_2
     * 3) The above should fail -- verify that rollback is called
     * 4) Read the rest
     */
void testPartialWindowRollback() throws Exception {
    final Logger log = Logger.getLogger("TestGenericDispatcher.testPartialWindowRollback");
    //log.setLevel(Level.INFO);
    log.info("start");
    final Level saveLevel = Logger.getLogger("com.linkedin.databus.client").getLevel();
    //Logger.getLogger("com.linkedin.databus.client").setLevel(Level.DEBUG);
    // generate events
    Vector<Short> srcIdList = new Vector<Short>();
    srcIdList.add((short) 1);
    DbusEventGenerator evGen = new DbusEventGenerator(0, srcIdList);
    Vector<DbusEvent> srcTestEvents = new Vector<DbusEvent>();
    final int numEvents = 9;
    //1-based number of the event callback to fail
    final int numOfFailureEvent = 5;
    final int numEventsPerWindow = 3;
    final int payloadSize = 200;
    final int numWindows = (int) Math.ceil(1.0 * numEvents / numEventsPerWindow);
    Assert.assertTrue(evGen.generateEvents(numEvents, numEventsPerWindow, 500, payloadSize, srcTestEvents) > 0);
    // find out how much data we need to stream for the failure
    // account for the EOW event which is < payload size
    int win1Size = payloadSize - 1;
    int win2Size = 0;
    int eventN = 0;
    for (DbusEvent e : srcTestEvents) {
        eventN++;
        if (eventN <= numEventsPerWindow) {
            win1Size += e.size();
        } else if (eventN <= numOfFailureEvent) {
            win2Size += e.size();
        }
    }
    //serialize the events to a buffer so they can be sent to the client
    final TestGenericDispatcherEventBuffer srcEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
    DbusEventAppender eventProducer = new DbusEventAppender(srcTestEvents, srcEventsBuf, null, true);
    Thread tEmitter = new Thread(eventProducer);
    tEmitter.start();
    //Create destination (client) buffer
    final TestGenericDispatcherEventBuffer destEventsBuf = new TestGenericDispatcherEventBuffer(_generic100KBufferStaticConfig);
    //Create dispatcher
    final TimeoutTestConsumer mockConsumer = new TimeoutTestConsumer(100, 10, 0, numOfFailureEvent, 0, 1);
    SelectingDatabusCombinedConsumer sdccMockConsumer = new SelectingDatabusCombinedConsumer((DatabusStreamConsumer) mockConsumer);
    List<String> sources = new ArrayList<String>();
    Map<Long, IdNamePair> sourcesMap = new HashMap<Long, IdNamePair>();
    for (int i = 1; i <= 3; ++i) {
        IdNamePair sourcePair = new IdNamePair((long) i, "source" + i);
        sources.add(sourcePair.getName());
        sourcesMap.put(sourcePair.getId(), sourcePair);
    }
    DatabusV2ConsumerRegistration consumerReg = new DatabusV2ConsumerRegistration(sdccMockConsumer, sources, null);
    List<DatabusV2ConsumerRegistration> allRegistrations = Arrays.asList(consumerReg);
    final ConsumerCallbackStats callbackStats = new ConsumerCallbackStats(0, "test", "test", true, false, null);
    final UnifiedClientStats unifiedStats = new UnifiedClientStats(0, "test", "test.unified");
    MultiConsumerCallback callback = new MultiConsumerCallback(allRegistrations, Executors.newFixedThreadPool(2), 1000, new StreamConsumerCallbackFactory(callbackStats, unifiedStats), callbackStats, unifiedStats, null, null);
    callback.setSourceMap(sourcesMap);
    List<DatabusSubscription> subs = DatabusSubscription.createSubscriptionList(sources);
    final RelayDispatcher dispatcher = new RelayDispatcher("dispatcher", _genericRelayConnStaticConfig, subs, new InMemoryPersistenceProvider(), destEventsBuf, callback, null, null, null, null, null);
    dispatcher.setSchemaIdCheck(false);
    Thread dispatcherThread = new Thread(dispatcher);
    dispatcherThread.setDaemon(true);
    log.info("starting dispatcher thread");
    dispatcherThread.start();
    HashMap<Long, List<RegisterResponseEntry>> schemaMap = new HashMap<Long, List<RegisterResponseEntry>>();
    List<RegisterResponseEntry> l1 = new ArrayList<RegisterResponseEntry>();
    List<RegisterResponseEntry> l2 = new ArrayList<RegisterResponseEntry>();
    List<RegisterResponseEntry> l3 = new ArrayList<RegisterResponseEntry>();
    l1.add(new RegisterResponseEntry(1L, (short) 1, SOURCE1_SCHEMA_STR));
    l2.add(new RegisterResponseEntry(2L, (short) 1, SOURCE2_SCHEMA_STR));
    l3.add(new RegisterResponseEntry(3L, (short) 1, SOURCE3_SCHEMA_STR));
    schemaMap.put(1L, l1);
    schemaMap.put(2L, l2);
    schemaMap.put(3L, l3);
    dispatcher.enqueueMessage(SourcesMessage.createSetSourcesIdsMessage(sourcesMap.values()));
    dispatcher.enqueueMessage(SourcesMessage.createSetSourcesSchemasMessage(schemaMap));
    log.info("starting event dispatch");
    //stream the events from the source buffer without the EOW
    //comm channels between reader and writer
    Pipe pipe = Pipe.open();
    Pipe.SinkChannel writerStream = pipe.sink();
    Pipe.SourceChannel readerStream = pipe.source();
    writerStream.configureBlocking(true);
    readerStream.configureBlocking(false);
    //Event writer - Relay in the real world
    Checkpoint cp = Checkpoint.createFlexibleCheckpoint();
    //Event readers - Clients in the real world
    //Checkpoint pullerCheckpoint = Checkpoint.createFlexibleCheckpoint();
    DbusEventsStatisticsCollector clientStats = new DbusEventsStatisticsCollector(0, "client", true, false, null);
    DbusEventBufferReader reader = new DbusEventBufferReader(destEventsBuf, readerStream, null, clientStats);
    UncaughtExceptionTrackingThread tReader = new UncaughtExceptionTrackingThread(reader, "Reader");
    tReader.setDaemon(true);
    tReader.start();
    try {
        log.info("send first window -- that one should be OK");
        StreamEventsResult streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(win1Size));
        Assert.assertEquals(numEventsPerWindow + 1, streamRes.getNumEventsStreamed());
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                return 1 == callbackStats.getNumSysEventsProcessed();
            }
        }, "first window processed", 5000, log);
        log.info("send the second partial window -- that one should cause an error");
        streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(win2Size));
        Assert.assertEquals(numOfFailureEvent - numEventsPerWindow, streamRes.getNumEventsStreamed());
        log.info("wait for dispatcher to finish");
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                log.info("events received: " + callbackStats.getNumDataEventsReceived());
                return numOfFailureEvent <= callbackStats.getNumDataEventsProcessed();
            }
        }, "all events until the error processed", 5000, log);
        log.info("all data events have been received but no EOW");
        Assert.assertEquals(numOfFailureEvent, clientStats.getTotalStats().getNumDataEvents());
        Assert.assertEquals(1, clientStats.getTotalStats().getNumSysEvents());
        //at least one failing event therefore < numOfFailureEvent events can be processed
        Assert.assertTrue(numOfFailureEvent <= callbackStats.getNumDataEventsProcessed());
        //onDataEvent callbacks for e2_1 and e2_2 get cancelled
        Assert.assertEquals(2, callbackStats.getNumDataErrorsProcessed());
        //only one EOW
        Assert.assertEquals(1, callbackStats.getNumSysEventsProcessed());
        log.info("Send the remainder of the window");
        streamRes = srcEventsBuf.streamEvents(cp, writerStream, new StreamEventsArgs(100000));
        //remaining events + EOWs
        Assert.assertEquals(srcTestEvents.size() + numWindows - (numOfFailureEvent + 1), streamRes.getNumEventsStreamed());
        log.info("wait for the rollback");
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                return 1 == mockConsumer.getNumRollbacks();
            }
        }, "rollback seen", 5000, log);
        log.info("wait for dispatcher to finish after the rollback");
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                log.info("num windows processed: " + callbackStats.getNumSysEventsProcessed());
                return numWindows == callbackStats.getNumSysEventsProcessed();
            }
        }, "all events processed", 5000, log);
    } finally {
        reader.stop();
        dispatcher.shutdown();
        log.info("all events processed");
        verifyNoLocks(null, srcEventsBuf);
        verifyNoLocks(null, destEventsBuf);
    }
    Logger.getLogger("com.linkedin.databus.client").setLevel(saveLevel);
    log.info("end\n");
}
Also used : DbusEventAppender(com.linkedin.databus.core.test.DbusEventAppender) DatabusV2ConsumerRegistration(com.linkedin.databus.client.consumer.DatabusV2ConsumerRegistration) UncaughtExceptionTrackingThread(com.linkedin.databus.core.util.UncaughtExceptionTrackingThread) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DbusEventsStatisticsCollector(com.linkedin.databus.core.monitoring.mbean.DbusEventsStatisticsCollector) StreamEventsArgs(com.linkedin.databus.core.StreamEventsArgs) Logger(org.apache.log4j.Logger) IdNamePair(com.linkedin.databus.core.util.IdNamePair) List(java.util.List) ArrayList(java.util.ArrayList) Vector(java.util.Vector) DbusEventBufferReader(com.linkedin.databus.core.test.DbusEventBufferReader) ConditionCheck(com.linkedin.databus2.test.ConditionCheck) StreamConsumerCallbackFactory(com.linkedin.databus.client.consumer.StreamConsumerCallbackFactory) UnifiedClientStats(com.linkedin.databus.client.pub.mbean.UnifiedClientStats) DbusEvent(com.linkedin.databus.core.DbusEvent) StreamEventsResult(com.linkedin.databus.core.StreamEventsResult) ConsumerCallbackStats(com.linkedin.databus.client.pub.mbean.ConsumerCallbackStats) MultiConsumerCallback(com.linkedin.databus.client.consumer.MultiConsumerCallback) DbusEventGenerator(com.linkedin.databus.core.test.DbusEventGenerator) Pipe(java.nio.channels.Pipe) DatabusSubscription(com.linkedin.databus.core.data_model.DatabusSubscription) Checkpoint(com.linkedin.databus.core.Checkpoint) UncaughtExceptionTrackingThread(com.linkedin.databus.core.util.UncaughtExceptionTrackingThread) Checkpoint(com.linkedin.databus.core.Checkpoint) SelectingDatabusCombinedConsumer(com.linkedin.databus.client.consumer.SelectingDatabusCombinedConsumer) RegisterResponseEntry(com.linkedin.databus2.core.container.request.RegisterResponseEntry) Level(org.apache.log4j.Level) Test(org.testng.annotations.Test)

Example 14 with EventProducer

use of com.linkedin.databus2.producers.EventProducer in project databus by linkedin.

the class ControlSourceEventsRequestProcessor method doPause.

private void doPause(DatabusRequest request) throws IOException {
    Set<String> sources = getSourcesParam(request);
    for (EventProducer producer : _eventProducers) {
        if (sources == null || sources.contains(producer.getName())) {
            producer.pause();
        }
    }
    doStatus(request);
}
Also used : EventProducer(com.linkedin.databus2.producers.EventProducer)

Example 15 with EventProducer

use of com.linkedin.databus2.producers.EventProducer in project databus by linkedin.

the class DatabusRelayMain method pause.

@Override
public void pause() {
    for (Entry<PhysicalPartition, EventProducer> entry : _producers.entrySet()) {
        EventProducer producer = entry.getValue();
        if (null != producer) {
            if (producer.isRunning()) {
                producer.pause();
                LOG.info("EventProducer :" + producer.getName() + "  pause sent");
            } else if (producer.isPaused()) {
                LOG.info("EventProducer :" + producer.getName() + "  already paused");
            }
        }
    }
}
Also used : OracleEventProducer(com.linkedin.databus2.producers.db.OracleEventProducer) RelayEventProducer(com.linkedin.databus2.producers.RelayEventProducer) EventProducer(com.linkedin.databus2.producers.EventProducer) PhysicalPartition(com.linkedin.databus.core.data_model.PhysicalPartition)

Aggregations

EventProducer (com.linkedin.databus2.producers.EventProducer)14 ArrayList (java.util.ArrayList)13 Checkpoint (com.linkedin.databus.core.Checkpoint)9 DatabusV2ConsumerRegistration (com.linkedin.databus.client.consumer.DatabusV2ConsumerRegistration)8 MultiConsumerCallback (com.linkedin.databus.client.consumer.MultiConsumerCallback)8 StreamConsumerCallbackFactory (com.linkedin.databus.client.consumer.StreamConsumerCallbackFactory)8 DbusEvent (com.linkedin.databus.core.DbusEvent)8 DatabusSubscription (com.linkedin.databus.core.data_model.DatabusSubscription)8 DbusEventAppender (com.linkedin.databus.core.test.DbusEventAppender)8 DbusEventGenerator (com.linkedin.databus.core.test.DbusEventGenerator)8 IdNamePair (com.linkedin.databus.core.util.IdNamePair)8 UncaughtExceptionTrackingThread (com.linkedin.databus.core.util.UncaughtExceptionTrackingThread)8 SelectingDatabusCombinedConsumer (com.linkedin.databus.client.consumer.SelectingDatabusCombinedConsumer)7 PhysicalPartition (com.linkedin.databus.core.data_model.PhysicalPartition)7 RegisterResponseEntry (com.linkedin.databus2.core.container.request.RegisterResponseEntry)7 RelayEventProducer (com.linkedin.databus2.producers.RelayEventProducer)7 OracleEventProducer (com.linkedin.databus2.producers.db.OracleEventProducer)7 HashMap (java.util.HashMap)7 List (java.util.List)7 Vector (java.util.Vector)7