Search in sources :

Example 16 with PhysicalPartition

use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.

the class TestDatabusRelayEvents method testV2Events.

/**
   * Stuffs an event buffer with both a v1 and a v2 event, then reads the buffer two ways:
   * first accepting only v1 events (verifying conversion of the v2 event to v1); then accepting
   * both v1 and v2 events.
   *
   * Note that the version of the _EOP_ events must match the version of the event factory,
   * regardless of the versions of any preceding "real" events.  (This matches DbusEventBuffer
   * behavior; see the serializeLongKeyEndOfPeriodMarker() call in endEvents() for details.)
   */
@Test
public void testV2Events() throws KeyTypeNotImplementedException, InvalidEventException, IOException, DatabusException {
    final Logger log = Logger.getLogger("TestDatabusRelayEvents.testV2Events");
    log.setLevel(Level.DEBUG);
    String[] srcs = { "com.linkedin.events.example.fake.FakeSchema" };
    String pSourceName = DatabusRelayTestUtil.getPhysicalSrcName(srcs[0]);
    short srcId = 2;
    short pId = 1;
    int relayPort = Utils.getAvailablePort(11993);
    // create relay
    final DatabusRelayMain relay1 = createRelay(relayPort, pId, srcs);
    DatabusRelayTestUtil.RelayRunner r1 = null;
    ClientRunner cr = null;
    try {
        //EventProducer[] producers = relay1.getProducers();
        r1 = new DatabusRelayTestUtil.RelayRunner(relay1);
        log.info("Relay created");
        DbusEventBufferMult bufMult = relay1.getEventBuffer();
        PhysicalPartition pPartition = new PhysicalPartition((int) pId, pSourceName);
        DbusEventBuffer buf = (DbusEventBuffer) bufMult.getDbusEventBufferAppendable(pPartition);
        log.info("create some events");
        long windowScn = 100L;
        ByteBuffer serializationBuffer = addEvent(windowScn, srcId, relay1.getSchemaRegistryService().fetchSchemaIdForSourceNameAndVersion(srcs[0], 2).getByteArray(), pId, DbusEventFactory.DBUS_EVENT_V2);
        ReadableByteChannel channel = Channels.newChannel(new ByteBufferInputStream(serializationBuffer));
        int readEvents = buf.readEvents(channel);
        log.info("successfully read in " + readEvents + " events ");
        channel.close();
        windowScn = 101L;
        serializationBuffer = addEvent(windowScn, srcId, relay1.getSchemaRegistryService().fetchSchemaIdForSourceNameAndVersion(srcs[0], 2).getByteArray(), pId, DbusEventFactory.DBUS_EVENT_V1);
        channel = Channels.newChannel(new ByteBufferInputStream(serializationBuffer));
        readEvents = buf.readEvents(channel);
        log.info("successfully read in " + readEvents + " events ");
        channel.close();
        log.info("starting relay on port " + relayPort);
        r1.start();
        //TestUtil.sleep(10*1000);
        // wait until relay comes up
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                return relay1.isRunningStatus();
            }
        }, "Relay hasn't come up completely ", 30000, LOG);
        log.info("now create client");
        String srcSubscriptionString = TestUtil.join(srcs, ",");
        String serverName = "localhost:" + relayPort;
        final EventsCountingConsumer countingConsumer = new EventsCountingConsumer();
        int id = (RngUtils.randomPositiveInt() % 10000) + 1;
        DatabusSourcesConnection clientConn = RelayEventProducer.createDatabusSourcesConnection("testProducer", id, serverName, srcSubscriptionString, countingConsumer, 1 * 1024 * 1024, 50000, 30 * 1000, 100, 15 * 1000, 1, true, DatabusClientNettyThreadPools.createNettyThreadPools(id), 0, DbusEventFactory.DBUS_EVENT_V1, 0);
        cr = new ClientRunner(clientConn);
        log.info("starting client");
        cr.start();
        // wait till client gets the event
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                int events = countingConsumer.getNumDataEvents();
                LOG.info("client got " + events + " events");
                return events == 2;
            }
        }, "Consumer didn't get 2 events ", 64 * 1024, LOG);
        // asserts
        Assert.assertEquals(countingConsumer.getNumDataEvents(), 2);
        Assert.assertEquals(countingConsumer.getNumWindows(), 2);
        Assert.assertEquals(countingConsumer.getNumDataEvents(DbusEventFactory.DBUS_EVENT_V1), 2);
        log.info("shutdown first client");
        clientConn.stop();
        cr.shutdown();
        TestUtil.sleep(1000);
        cr = null;
        log.info("start another client who understands V2");
        final EventsCountingConsumer countingConsumer1 = new EventsCountingConsumer();
        clientConn = RelayEventProducer.createDatabusSourcesConnection("testProducer", id, serverName, srcSubscriptionString, countingConsumer1, 1 * 1024 * 1024, 50000, 30 * 1000, 100, 15 * 1000, 1, true, DatabusClientNettyThreadPools.createNettyThreadPools(id), 0, DbusEventFactory.DBUS_EVENT_V2, 0);
        cr = new ClientRunner(clientConn);
        cr.start();
        log.info("wait till client gets the event");
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                int events = countingConsumer1.getNumDataEvents();
                LOG.debug("client got " + events + " events");
                return events == 2;
            }
        }, "Consumer didn't get 2 events ", 64 * 1024, LOG);
        // asserts
        Assert.assertEquals(countingConsumer1.getNumDataEvents(), 2);
        Assert.assertEquals(countingConsumer1.getNumWindows(), 2);
        Assert.assertEquals(countingConsumer1.getNumDataEvents(DbusEventFactory.DBUS_EVENT_V1), 1);
        Assert.assertEquals(countingConsumer1.getNumDataEvents(DbusEventFactory.DBUS_EVENT_V2), 1);
    } finally {
        cleanup(new DatabusRelayTestUtil.RelayRunner[] { r1 }, cr);
    }
}
Also used : ConditionCheck(com.linkedin.databus2.test.ConditionCheck) ReadableByteChannel(java.nio.channels.ReadableByteChannel) ClientRunner(com.linkedin.databus2.relay.TestDatabusRelayMain.ClientRunner) ByteBufferInputStream(org.apache.zookeeper.server.ByteBufferInputStream) Logger(org.apache.log4j.Logger) ByteBuffer(java.nio.ByteBuffer) DbusEventBuffer(com.linkedin.databus.core.DbusEventBuffer) DatabusSourcesConnection(com.linkedin.databus.client.DatabusSourcesConnection) DatabusRelayTestUtil(com.linkedin.databus2.relay.util.test.DatabusRelayTestUtil) DbusEventBufferMult(com.linkedin.databus.core.DbusEventBufferMult) PhysicalPartition(com.linkedin.databus.core.data_model.PhysicalPartition) Test(org.testng.annotations.Test)

Example 17 with PhysicalPartition

use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.

the class TestDatabusRelayEvents method testEventConversion.

@Test
public /**
   * append event V2 to the buffer and stream it to the client
   * which only accepts events V1. Make sure it got converted
   */
void testEventConversion() throws InterruptedException, IOException, DatabusException {
    final Logger log = Logger.getLogger("TestDatabusRelayEvents.testEventConversion");
    log.setLevel(Level.INFO);
    DatabusRelayTestUtil.RelayRunner r1 = null;
    ClientRunner cr = null;
    try {
        String[] srcs = { "com.linkedin.events.example.fake.FakeSchema" };
        int pId = 1;
        int srcId = 2;
        int relayPort = Utils.getAvailablePort(11994);
        ;
        final DatabusRelayMain relay1 = createRelay(relayPort, pId, srcs);
        Assert.assertNotNull(relay1);
        r1 = new DatabusRelayTestUtil.RelayRunner(relay1);
        log.info("Relay created");
        DbusEventBufferMult bufMult = relay1.getEventBuffer();
        String pSourceName = DatabusRelayTestUtil.getPhysicalSrcName(srcs[0]);
        PhysicalPartition pPartition = new PhysicalPartition(pId, pSourceName);
        DbusEventBufferAppendable buf = bufMult.getDbusEventBufferAppendable(pPartition);
        DbusEventKey key = new DbusEventKey(123L);
        byte[] schemaId = relay1.getSchemaRegistryService().fetchSchemaIdForSourceNameAndVersion(srcs[0], 2).getByteArray();
        byte[] payload = RngUtils.randomString(100).getBytes(Charset.defaultCharset());
        DbusEventInfo eventInfo = new DbusEventInfo(DbusOpcode.UPSERT, 100L, (short) pId, (short) pId, 897L, (short) srcId, schemaId, payload, false, true);
        eventInfo.setEventSerializationVersion(DbusEventFactory.DBUS_EVENT_V2);
        buf.startEvents();
        buf.appendEvent(key, eventInfo, null);
        buf.endEvents(100L, null);
        r1.start();
        log.info("Relay started");
        // wait until relay comes up
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                return relay1.isRunningStatus();
            }
        }, "Relay hasn't come up completely ", 7000, LOG);
        // now create client:
        String srcSubscriptionString = TestUtil.join(srcs, ",");
        String serverName = "localhost:" + relayPort;
        final EventsCountingConsumer countingConsumer = new EventsCountingConsumer();
        int id = (RngUtils.randomPositiveInt() % 10000) + 1;
        DatabusSourcesConnection clientConn = RelayEventProducer.createDatabusSourcesConnection("testProducer", id, serverName, srcSubscriptionString, countingConsumer, 1 * 1024 * 1024, 50000, 30 * 1000, 100, 15 * 1000, 1, true, DatabusClientNettyThreadPools.createNettyThreadPools(id), 0, DbusEventFactory.DBUS_EVENT_V1, 0);
        cr = new ClientRunner(clientConn);
        cr.start();
        log.info("Consumer started");
        // wait till client gets the event
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                return countingConsumer.getNumDataEvents() == 1;
            }
        }, "Consumer didn't get any events ", 64 * 1024, LOG);
        // asserts
        Assert.assertEquals(1, countingConsumer.getNumDataEvents());
        Assert.assertEquals(1, countingConsumer.getNumWindows());
        Assert.assertEquals(1, countingConsumer.getNumDataEvents(DbusEventFactory.DBUS_EVENT_V1));
    } finally {
        cleanup(new DatabusRelayTestUtil.RelayRunner[] { r1 }, cr);
    }
}
Also used : ConditionCheck(com.linkedin.databus2.test.ConditionCheck) ClientRunner(com.linkedin.databus2.relay.TestDatabusRelayMain.ClientRunner) DbusEventBufferAppendable(com.linkedin.databus.core.DbusEventBufferAppendable) Logger(org.apache.log4j.Logger) DatabusSourcesConnection(com.linkedin.databus.client.DatabusSourcesConnection) DbusEventInfo(com.linkedin.databus.core.DbusEventInfo) DatabusRelayTestUtil(com.linkedin.databus2.relay.util.test.DatabusRelayTestUtil) DbusEventBufferMult(com.linkedin.databus.core.DbusEventBufferMult) DbusEventKey(com.linkedin.databus.core.DbusEventKey) PhysicalPartition(com.linkedin.databus.core.data_model.PhysicalPartition) Test(org.testng.annotations.Test)

Example 18 with PhysicalPartition

use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.

the class RelayPullThread method doRequestStream.

protected void doRequestStream(ConnectionState curState) {
    boolean debugEnabled = _log.isDebugEnabled();
    if (debugEnabled)
        _log.debug("Checking for free space in buffer");
    int freeBufferThreshold = (int) (_sourcesConn.getConnectionConfig().getFreeBufferThreshold() * 100.0 / _pullerBufferUtilizationPct);
    try {
        curState.getDataEventsBuffer().waitForFreeSpace(freeBufferThreshold);
    } catch (InterruptedException ie) {
        //loop
        enqueueMessage(curState);
        return;
    }
    Checkpoint cp = curState.getCheckpoint();
    if (debugEnabled)
        _log.debug("Checkpoint at RequestDataEvents: " + cp.toString());
    if (null == _relayFilter) {
        if (debugEnabled)
            _log.debug("Initializing relay filter config");
        _relayFilter = new DbusKeyCompositeFilter();
        Map<String, IdNamePair> srcNameIdMap = curState.getSourcesNameMap();
        for (DbusKeyCompositeFilterConfig conf : _relayFilterConfigs) {
            Map<String, KeyFilterConfigHolder> cMap = conf.getConfigMap();
            Map<Long, KeyFilterConfigHolder> fConfMap = new HashMap<Long, KeyFilterConfigHolder>();
            for (Entry<String, KeyFilterConfigHolder> e : cMap.entrySet()) {
                IdNamePair idName = srcNameIdMap.get(e.getKey());
                if (null != idName) {
                    fConfMap.put(idName.getId(), e.getValue());
                }
            }
            if (debugEnabled)
                _log.debug("FilterConfMap is :" + fConfMap);
            _relayFilter.merge(new DbusKeyCompositeFilter(fConfMap));
        }
        if (debugEnabled)
            _log.debug("Merged Filter (before deduping) is :" + _relayFilter);
        _relayFilter.dedupe();
        if (debugEnabled)
            _log.debug("Merged Filter (after deduping) is :" + _relayFilter);
    }
    _streamCallStartMs = System.currentTimeMillis();
    if (null != _relayCallsStats)
        _relayCallsStats.registerStreamRequest(cp, EMPTY_STREAM_LIST);
    int fetchSize = (int) ((curState.getDataEventsBuffer().getBufferFreeReadSpace() / 100.0) * _pullerBufferUtilizationPct);
    fetchSize = Math.max(freeBufferThreshold, fetchSize);
    CheckpointMult cpMult = new CheckpointMult();
    String args;
    if (curState.getRelayConnection().getProtocolVersion() >= 3) {
        // for version 3 and higher we pass subscriptions
        args = curState.getSubsListString();
        for (DatabusSubscription sub : curState.getSubscriptions()) {
            PhysicalPartition p = sub.getPhysicalPartition();
            cpMult.addCheckpoint(p, cp);
        }
    } else {
        args = curState.getSourcesIdListString();
        cpMult.addCheckpoint(PhysicalPartition.ANY_PHYSICAL_PARTITION, cp);
    }
    curState.switchToStreamRequestSent();
    sendHeartbeat(_sourcesConn.getUnifiedClientStats());
    curState.getRelayConnection().requestStream(args, _relayFilter, fetchSize, cpMult, _sourcesConn.getConnectionConfig().getKeyRange(), curState);
}
Also used : DbusKeyCompositeFilterConfig(com.linkedin.databus2.core.filter.DbusKeyCompositeFilterConfig) HashMap(java.util.HashMap) CheckpointMult(com.linkedin.databus.core.CheckpointMult) DatabusSubscription(com.linkedin.databus.core.data_model.DatabusSubscription) Checkpoint(com.linkedin.databus.core.Checkpoint) KeyFilterConfigHolder(com.linkedin.databus2.core.filter.KeyFilterConfigHolder) Checkpoint(com.linkedin.databus.core.Checkpoint) DbusKeyCompositeFilter(com.linkedin.databus2.core.filter.DbusKeyCompositeFilter) IdNamePair(com.linkedin.databus.core.util.IdNamePair) PhysicalPartition(com.linkedin.databus.core.data_model.PhysicalPartition)

Example 19 with PhysicalPartition

use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.

the class DatabusRequestExecutionHandler method messageReceived.

@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {
    if (e.getMessage() instanceof HttpRequest) {
        _httpRequest = (HttpRequest) e.getMessage();
        ctx.sendUpstream(e);
    }
    if (e.getMessage() instanceof DatabusRequest) {
        _dbusRequest = (DatabusRequest) e.getMessage();
        // If there is a physical partition stashed away, then restore it into the request now.
        if (ctx.getAttachment() != null && ctx.getAttachment() instanceof PhysicalPartition) {
            _dbusRequest.setCursorPartition((PhysicalPartition) (ctx.getAttachment()));
        }
        /*NettyStats nettyStats = _configManager.getNettyStats();
      boolean nettyStatsEnabled = nettyStats.isEnabled();
      CallCompletion callCompletion = nettyStatsEnabled ?
          nettyStats.getRequestHandler_writeResponse().startCall() :
          null;
      CallCompletion processRequestCompletion = null;*/
        try {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Creating response for command [" + _dbusRequest.getId() + "] " + _dbusRequest.getName());
            }
            // Decide whether to close the connection or not.
            boolean keepAlive = isKeepAlive(_httpRequest);
            HttpResponse response = generateEmptyResponse();
            if (LOG.isDebugEnabled()) {
                //We are debugging -- let's add some more info to the response
                response.addHeader(DatabusHttpHeaders.DATABUS_REQUEST_ID_HEADER, Long.toString(_dbusRequest.getId()));
            }
            // Write the response.
            ChunkedBodyWritableByteChannel responseChannel = null;
            try {
                responseChannel = new ChunkedBodyWritableByteChannel(e.getChannel(), response);
                _dbusRequest.setResponseContent(responseChannel);
                if (LOG.isDebugEnabled()) {
                    LOG.debug("About to run command [" + _dbusRequest.getId() + "] " + _dbusRequest.getName());
                }
                //FIXME   DDS-305: Rework the netty stats collector to use event-based stats aggregation
                /*if (nettyStatsEnabled)
          {
            processRequestCompletion = nettyStats.getRequestHandler_processRequest().startCall();
          }*/
                Future<DatabusRequest> responseFuture = _processorRegistry.run(_dbusRequest);
                ServerContainer.RuntimeConfig config = _dbusRequest.getConfig();
                int timeoutMs = config.getRequestProcessingBudgetMs();
                boolean done = responseFuture.isDone();
                while (!done) {
                    try {
                        responseFuture.get(timeoutMs, TimeUnit.MILLISECONDS);
                        done = true;
                        ctx.setAttachment(_dbusRequest.getCursorPartition());
                    } catch (InterruptedException ie) {
                        done = responseFuture.isDone();
                    } catch (Exception ex) {
                        done = true;
                        _dbusRequest.setError(ex);
                        // On any error, clear any context saved. We will start afresh in a new request.
                        ctx.setAttachment(null);
                    //FIXME   DDS-305: Rework the netty stats collector to use event-based stats aggregation
                    /*if (null != processRequestCompletion)
              {
                processRequestCompletion.endCallWithError(ex);
                processRequestCompletion = null;
              }*/
                    }
                }
            } finally {
                if (null != responseChannel) {
                    if (LOG.isDebugEnabled()) {
                        //Add some more debugging info
                        long curTimeMs = System.currentTimeMillis();
                        responseChannel.addMetadata(DatabusHttpHeaders.DATABUS_REQUEST_LATENCY_HEADER, Long.toString(curTimeMs - _dbusRequest.getCreateTimestampMs()));
                    }
                    responseChannel.close();
                }
                if (null != _dbusRequest.getResponseThrowable()) {
                    ContainerStatisticsCollector statsCollector = _serverContainer.getContainerStatsCollector();
                    if (null != statsCollector) {
                        statsCollector.registerContainerError(_dbusRequest.getResponseThrowable());
                    }
                }
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("Done runing command [" + _dbusRequest.getId() + "] " + _dbusRequest.getName());
            }
            // Close the non-keep-alive or hard-failed connection after the write operation is done.
            if (!keepAlive || null == responseChannel) {
                e.getChannel().close();
            }
        //FIXME   DDS-305: Rework the netty stats collector to use event-based stats aggregation
        /*if (null != callCompletion)
        {
          callCompletion.endCall();
        }*/
        } catch (RuntimeException ex) {
            LOG.error("HttpRequestHandler.writeResponse error", ex);
            //FIXME   DDS-305: Rework the netty stats collector to use event-based stats aggregation
            /*if (null != callCompletion)
        {
          callCompletion.endCallWithError(ex);
        }*/
            ContainerStatisticsCollector statsCollector = _serverContainer.getContainerStatsCollector();
            if (null != statsCollector)
                statsCollector.registerContainerError(ex);
        }
    } else {
        //Pass on everything else
        ctx.sendUpstream(e);
    }
}
Also used : HttpRequest(org.jboss.netty.handler.codec.http.HttpRequest) DefaultHttpResponse(org.jboss.netty.handler.codec.http.DefaultHttpResponse) HttpResponse(org.jboss.netty.handler.codec.http.HttpResponse) DatabusRequest(com.linkedin.databus2.core.container.request.DatabusRequest) ContainerStatisticsCollector(com.linkedin.databus2.core.container.monitoring.mbean.ContainerStatisticsCollector) PhysicalPartition(com.linkedin.databus.core.data_model.PhysicalPartition)

Example 20 with PhysicalPartition

use of com.linkedin.databus.core.data_model.PhysicalPartition in project databus by linkedin.

the class TestCheckpointMult method testCursorPosition.

/**
   * Test that the cursor position can be set/retrieved from the CheckpointMult object,
   * but is never serialized.
   * When we change the serialization function to include this in the map, this test will
   * change.
   */
@Test
public void testCursorPosition() throws Exception {
    final PhysicalPartition ppart = new PhysicalPartition(26, "January");
    CheckpointMult cpMult = makeCpMult();
    cpMult.setCursorPartition(ppart);
    assertEquals(ppart, cpMult.getCursorPartition());
    String serialCpMult = cpMult.toString();
    CheckpointMult cpMultCopy = new CheckpointMult(serialCpMult);
    assertNull(cpMultCopy.getCursorPartition());
    // Make sure we are able to decode it, however.
    ObjectMapper mapper = new ObjectMapper();
    Map<String, String> map = mapper.readValue(new ByteArrayInputStream(serialCpMult.getBytes(Charset.defaultCharset())), new TypeReference<Map<String, String>>() {
    });
    map.put("NonJsonKey", "Some value");
    map.put("cursorPartition", ppart.toJsonString());
    ByteArrayOutputStream bs = new ByteArrayOutputStream();
    mapper.writeValue(bs, map);
    cpMultCopy = new CheckpointMult(bs.toString());
    assertEquals(cpMultCopy.getCursorPartition(), ppart);
}
Also used : ByteArrayInputStream(java.io.ByteArrayInputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) HashMap(java.util.HashMap) Map(java.util.Map) PhysicalPartition(com.linkedin.databus.core.data_model.PhysicalPartition) ObjectMapper(org.codehaus.jackson.map.ObjectMapper) Test(org.testng.annotations.Test) BeforeTest(org.testng.annotations.BeforeTest)

Aggregations

PhysicalPartition (com.linkedin.databus.core.data_model.PhysicalPartition)47 Test (org.testng.annotations.Test)22 BeforeTest (org.testng.annotations.BeforeTest)13 DbusEventsStatisticsCollector (com.linkedin.databus.core.monitoring.mbean.DbusEventsStatisticsCollector)8 PhysicalPartitionKey (com.linkedin.databus.core.DbusEventBufferMult.PhysicalPartitionKey)7 ByteArrayOutputStream (java.io.ByteArrayOutputStream)7 DatabusSubscription (com.linkedin.databus.core.data_model.DatabusSubscription)6 LogicalSource (com.linkedin.databus.core.data_model.LogicalSource)6 AllowAllDbusFilter (com.linkedin.databus2.core.filter.AllowAllDbusFilter)6 EventProducer (com.linkedin.databus2.producers.EventProducer)6 RelayEventProducer (com.linkedin.databus2.producers.RelayEventProducer)6 OracleEventProducer (com.linkedin.databus2.producers.db.OracleEventProducer)6 HashMap (java.util.HashMap)6 DbusEventsTotalStats (com.linkedin.databus.core.monitoring.mbean.DbusEventsTotalStats)5 ArrayList (java.util.ArrayList)5 HashSet (java.util.HashSet)5 DbusEventBufferMult (com.linkedin.databus.core.DbusEventBufferMult)4 AggregatedDbusEventsStatisticsCollector (com.linkedin.databus.core.monitoring.mbean.AggregatedDbusEventsStatisticsCollector)4 ConjunctionDbusFilter (com.linkedin.databus2.core.filter.ConjunctionDbusFilter)4 DbusFilter (com.linkedin.databus2.core.filter.DbusFilter)4