Search in sources :

Example 16 with LogicalSource

use of com.linkedin.databus.core.data_model.LogicalSource in project databus by linkedin.

the class SchemaMetaDataManager method persistSrcNameIdMap.

private void persistSrcNameIdMap() throws IOException {
    File tmpFile = File.createTempFile("srcIdToName", ".map.tmp");
    FileWriter oStream = null;
    try {
        oStream = new FileWriter(tmpFile);
        TreeSet<LogicalSource> srcs = new TreeSet<LogicalSource>(new Comparator<LogicalSource>() {

            @Override
            public int compare(LogicalSource o1, LogicalSource o2) {
                return o1.getId().compareTo(o2.getId());
            }
        });
        srcs.addAll(_idNameRegistry.getAllSources());
        for (LogicalSource k : srcs) {
            oStream.append(k.getId().toString());
            oStream.append(":");
            oStream.append(k.getName());
            oStream.append("\n");
        }
    } finally {
        if (null != oStream)
            oStream.close();
    }
    File destFile = new File(_idNameMapFile);
    boolean success = tmpFile.renameTo(destFile);
    if (!success)
        throw new RuntimeException("Unable to persist the mapping file !!");
}
Also used : TreeSet(java.util.TreeSet) FileWriter(java.io.FileWriter) LogicalSource(com.linkedin.databus.core.data_model.LogicalSource) File(java.io.File)

Example 17 with LogicalSource

use of com.linkedin.databus.core.data_model.LogicalSource in project databus by linkedin.

the class PhysicalSourceConfig method createFromLogicalSources.

/** create physical source with some default values
   *  CAUTION - this createas a default physical partition (0)
   *  please use only in case when actual physical partition parameters are unavailable
   **/
public static PhysicalSourceConfig createFromLogicalSources(Collection<LogicalSource> srcIds) {
    PhysicalSourceConfig res = new PhysicalSourceConfig();
    List<LogicalSourceConfig> newSources = new ArrayList<LogicalSourceConfig>(srcIds.size());
    for (LogicalSource p : srcIds) {
        LogicalSourceConfig source = new LogicalSourceConfig();
        source.setId(p.getId().shortValue());
        source.setName(p.getName());
        // these two shouldn't ever be used
        source.setPartitionFunction("DefaultPartition");
        source.setUri("defaultUri");
        newSources.add(source);
    }
    res.setSources(newSources);
    res.setName(DEFAULT_PHYSICAL_PARTITION_NAME);
    res.setId(DEFAULT_PHYSICAL_PARTITION);
    res.setUri(DEFAULT_PHYSICAL_SOURCE_URI);
    return res;
}
Also used : ArrayList(java.util.ArrayList) LogicalSource(com.linkedin.databus.core.data_model.LogicalSource)

Example 18 with LogicalSource

use of com.linkedin.databus.core.data_model.LogicalSource in project databus by linkedin.

the class TestRegisterRequestProcessor method testDatabusExceptionInGetSchemas.

private void testDatabusExceptionInGetSchemas(final int protoVersion) throws Exception {
    LOG.info("Testing DatabusException in getSchemas() call with protocol version " + protoVersion);
    Properties params = new Properties();
    final int srcId1 = 101;
    final String srcName1 = "source-101";
    final String docSchema1 = "docSchema1";
    final String docSchema2 = "docSchema2";
    final short docSchemaV1 = 1;
    final short docSchemaV2 = 2;
    if (protoVersion != 0) {
        params.setProperty(DatabusHttpHeaders.PROTOCOL_VERSION_PARAM, Integer.toString(protoVersion));
    }
    params.setProperty(RegisterRequestProcessor.SOURCES_PARAM, Integer.toString(srcId1));
    final StringBuilder responseStr = new StringBuilder();
    ChunkedWritableByteChannel chunkedWritableByteChannel = EasyMock.createMock(ChunkedWritableByteChannel.class);
    chunkedWritableByteChannel.addMetadata(EasyMock.anyObject(String.class), EasyMock.anyInt());
    EasyMock.expectLastCall().times(1);
    chunkedWritableByteChannel.write(EasyMock.anyObject(ByteBuffer.class));
    EasyMock.expectLastCall().andAnswer(new IAnswer<Object>() {

        @Override
        public Object answer() throws Throwable {
            Charset charset = Charset.forName("UTF-8");
            CharsetDecoder decoder = charset.newDecoder();
            responseStr.append(decoder.decode((ByteBuffer) EasyMock.getCurrentArguments()[0]));
            return responseStr.length();
        }
    });
    EasyMock.replay(chunkedWritableByteChannel);
    DatabusRequest mockReq = EasyMock.createMock(DatabusRequest.class);
    EasyMock.expect(mockReq.getParams()).andReturn(params).anyTimes();
    EasyMock.replay(mockReq);
    LogicalSource lsrc1 = new LogicalSource(srcId1, srcName1);
    SourceIdNameRegistry mockSrcIdReg = EasyMock.createMock(SourceIdNameRegistry.class);
    EasyMock.expect(mockSrcIdReg.getSource(srcId1)).andReturn(lsrc1).anyTimes();
    EasyMock.replay(mockSrcIdReg);
    Map<Short, String> srcSchemaVersions = new HashMap<Short, String>();
    srcSchemaVersions.put(docSchemaV1, docSchema1);
    srcSchemaVersions.put(docSchemaV2, docSchema2);
    DatabusException expectedCause = new DatabusException("FakeException");
    SchemaRegistryService mockSchemaReg = EasyMock.createMock(SchemaRegistryService.class);
    EasyMock.expect(mockSchemaReg.fetchAllSchemaVersionsBySourceName(srcName1)).andThrow(expectedCause);
    EasyMock.replay(mockSchemaReg);
    HttpRelay mockRelay = EasyMock.createMock(HttpRelay.class);
    EasyMock.expect(mockRelay.getHttpStatisticsCollector()).andReturn(null).anyTimes();
    EasyMock.expect(mockRelay.getSourcesIdNameRegistry()).andReturn(mockSrcIdReg).anyTimes();
    EasyMock.expect(mockRelay.getSchemaRegistryService()).andReturn(mockSchemaReg).anyTimes();
    EasyMock.replay(mockRelay);
    RegisterRequestProcessor reqProcessor = new RegisterRequestProcessor(null, mockRelay);
    boolean exceptionCaught = false;
    try {
        reqProcessor.process(mockReq);
    } catch (RequestProcessingException e) {
        Assert.assertEquals(expectedCause, e.getCause());
    }
    EasyMock.verify(mockRelay);
    EasyMock.verify(mockReq);
    EasyMock.verify(mockSchemaReg);
    EasyMock.verify(mockSrcIdReg);
}
Also used : ChunkedWritableByteChannel(com.linkedin.databus2.core.container.ChunkedWritableByteChannel) HashMap(java.util.HashMap) LogicalSource(com.linkedin.databus.core.data_model.LogicalSource) Properties(java.util.Properties) RequestProcessingException(com.linkedin.databus2.core.container.request.RequestProcessingException) CharsetDecoder(java.nio.charset.CharsetDecoder) SchemaRegistryService(com.linkedin.databus2.schemas.SchemaRegistryService) Charset(java.nio.charset.Charset) SourceIdNameRegistry(com.linkedin.databus2.schemas.SourceIdNameRegistry) ByteBuffer(java.nio.ByteBuffer) DatabusRequest(com.linkedin.databus2.core.container.request.DatabusRequest) DatabusException(com.linkedin.databus2.core.DatabusException) RegisterRequestProcessor(com.linkedin.databus.container.request.RegisterRequestProcessor)

Example 19 with LogicalSource

use of com.linkedin.databus.core.data_model.LogicalSource in project databus by linkedin.

the class TestRegisterRequestProcessor method testV4RegisterRequestProcessor.

// Test the happy path where there are 2 versions of document schema of a table and one version of metadata
// schema in the registry.
@Test
public void testV4RegisterRequestProcessor() throws Exception {
    Properties params = new Properties();
    final int protoVersion = 4;
    final int srcId1 = 101;
    final String srcName1 = "source-101";
    final String docSchema1 = "docSchema1";
    final String docSchema2 = "docSchema2";
    final String metadataSchema1 = makeMetadataSchema(1);
    final String metadataSchema2 = makeMetadataSchema(2);
    final byte[] metaSchemaDigest1 = new byte[] { 32, 33, 34, 35 };
    final byte[] metaSchemaDigest2 = new byte[] { 35, 34, 33, 32 };
    final short docSchemaV1 = 1;
    final short docSchemaV2 = 2;
    final short metaSchemaV1 = 1;
    final short metaSchemaV2 = 2;
    params.setProperty(DatabusHttpHeaders.PROTOCOL_VERSION_PARAM, Integer.toString(protoVersion));
    params.setProperty(RegisterRequestProcessor.SOURCES_PARAM, Integer.toString(srcId1));
    final StringBuilder responseStr = new StringBuilder();
    ChunkedWritableByteChannel chunkedWritableByteChannel = EasyMock.createMock(ChunkedWritableByteChannel.class);
    chunkedWritableByteChannel.addMetadata(EasyMock.eq(DatabusHttpHeaders.DBUS_CLIENT_RELAY_PROTOCOL_VERSION_HDR), EasyMock.eq(protoVersion));
    chunkedWritableByteChannel.write(EasyMock.anyObject(ByteBuffer.class));
    EasyMock.expectLastCall().andAnswer(new IAnswer<Object>() {

        @Override
        public Object answer() throws Throwable {
            Charset charset = Charset.forName("UTF-8");
            CharsetDecoder decoder = charset.newDecoder();
            responseStr.append(decoder.decode((ByteBuffer) EasyMock.getCurrentArguments()[0]));
            return responseStr.length();
        }
    });
    EasyMock.replay(chunkedWritableByteChannel);
    DatabusRequest mockReq = EasyMock.createMock(DatabusRequest.class);
    EasyMock.expect(mockReq.getParams()).andReturn(params).anyTimes();
    EasyMock.expect(mockReq.getResponseContent()).andReturn(chunkedWritableByteChannel);
    EasyMock.replay(mockReq);
    LogicalSource lsrc1 = new LogicalSource(srcId1, srcName1);
    SourceIdNameRegistry mockSrcIdReg = EasyMock.createMock(SourceIdNameRegistry.class);
    EasyMock.expect(mockSrcIdReg.getSource(srcId1)).andReturn(lsrc1).anyTimes();
    EasyMock.replay(mockSrcIdReg);
    Map<Short, String> srcSchemaVersions = new HashMap<Short, String>();
    srcSchemaVersions.put(docSchemaV1, docSchema1);
    srcSchemaVersions.put(docSchemaV2, docSchema2);
    VersionedSchemaSet metadataSchemaSet = new VersionedSchemaSet();
    metadataSchemaSet.add(SchemaRegistryService.DEFAULT_METADATA_SCHEMA_SOURCE, metaSchemaV1, new SchemaId(metaSchemaDigest1), metadataSchema1, true);
    metadataSchemaSet.add(SchemaRegistryService.DEFAULT_METADATA_SCHEMA_SOURCE, metaSchemaV2, new SchemaId(metaSchemaDigest2), metadataSchema2, true);
    SchemaRegistryService mockSchemaReg = EasyMock.createMock(SchemaRegistryService.class);
    EasyMock.expect(mockSchemaReg.fetchAllSchemaVersionsBySourceName(srcName1)).andReturn(srcSchemaVersions).anyTimes();
    EasyMock.expect(mockSchemaReg.fetchAllMetadataSchemaVersions()).andReturn(metadataSchemaSet).anyTimes();
    EasyMock.replay(mockSchemaReg);
    HttpRelay mockRelay = EasyMock.createMock(HttpRelay.class);
    EasyMock.expect(mockRelay.getHttpStatisticsCollector()).andReturn(null).anyTimes();
    EasyMock.expect(mockRelay.getSourcesIdNameRegistry()).andReturn(mockSrcIdReg).anyTimes();
    EasyMock.expect(mockRelay.getSchemaRegistryService()).andReturn(mockSchemaReg).anyTimes();
    EasyMock.replay(mockRelay);
    RegisterRequestProcessor reqProcessor = new RegisterRequestProcessor(null, mockRelay);
    reqProcessor.process(mockReq);
    // Decode
    ObjectMapper mapper = new ObjectMapper();
    HashMap<String, List<Object>> responseMap = mapper.readValue(responseStr.toString(), new TypeReference<HashMap<String, List<Object>>>() {
    });
    Map<Long, List<RegisterResponseEntry>> sourcesSchemasMap = RegisterResponseEntry.createFromResponse(responseMap, RegisterResponseEntry.SOURCE_SCHEMAS_KEY, false);
    // There should be one entry in the map, which is a  list.
    Assert.assertEquals(1, sourcesSchemasMap.size());
    Assert.assertEquals(2, sourcesSchemasMap.get(new Long(srcId1)).size());
    for (RegisterResponseEntry r : sourcesSchemasMap.get(new Long(srcId1))) {
        Assert.assertEquals(srcId1, r.getId());
        if (r.getVersion() == docSchemaV1) {
            Assert.assertEquals(docSchema1, r.getSchema());
        } else {
            Assert.assertEquals(docSchema2, r.getSchema());
        }
    }
    Map<Long, List<RegisterResponseEntry>> keysSchemasMap = RegisterResponseEntry.createFromResponse(responseMap, RegisterResponseEntry.KEY_SCHEMAS_KEY, true);
    Assert.assertNull(keysSchemasMap);
    List<RegisterResponseMetadataEntry> metadataSchemasList = RegisterResponseMetadataEntry.createFromResponse(responseMap, RegisterResponseMetadataEntry.METADATA_SCHEMAS_KEY, true);
    // The response should contain the exact string that the schema registry has.
    Assert.assertEquals(2, metadataSchemasList.size());
    for (RegisterResponseMetadataEntry r : metadataSchemasList) {
        if (r.getVersion() == 1) {
            Assert.assertEquals(metadataSchema1, r.getSchema());
            Assert.assertTrue(Arrays.equals(metaSchemaDigest1, r.getCrc32()));
        } else {
            Assert.assertEquals(metadataSchema2, r.getSchema());
            Assert.assertTrue(Arrays.equals(metaSchemaDigest2, r.getCrc32()));
        }
    }
    EasyMock.verify(mockRelay);
    EasyMock.verify(mockReq);
    EasyMock.verify(mockSchemaReg);
    EasyMock.verify(mockSrcIdReg);
}
Also used : ChunkedWritableByteChannel(com.linkedin.databus2.core.container.ChunkedWritableByteChannel) HashMap(java.util.HashMap) LogicalSource(com.linkedin.databus.core.data_model.LogicalSource) Properties(java.util.Properties) VersionedSchemaSet(com.linkedin.databus2.schemas.VersionedSchemaSet) List(java.util.List) ObjectMapper(org.codehaus.jackson.map.ObjectMapper) CharsetDecoder(java.nio.charset.CharsetDecoder) SchemaRegistryService(com.linkedin.databus2.schemas.SchemaRegistryService) Charset(java.nio.charset.Charset) SourceIdNameRegistry(com.linkedin.databus2.schemas.SourceIdNameRegistry) ByteBuffer(java.nio.ByteBuffer) DatabusRequest(com.linkedin.databus2.core.container.request.DatabusRequest) RegisterResponseMetadataEntry(com.linkedin.databus2.core.container.request.RegisterResponseMetadataEntry) RegisterRequestProcessor(com.linkedin.databus.container.request.RegisterRequestProcessor) SchemaId(com.linkedin.databus2.schemas.SchemaId) RegisterResponseEntry(com.linkedin.databus2.core.container.request.RegisterResponseEntry) Test(org.testng.annotations.Test)

Example 20 with LogicalSource

use of com.linkedin.databus.core.data_model.LogicalSource in project databus by linkedin.

the class TestDbusEventBufferMult method testSubscriptionStream.

@Test
public void testSubscriptionStream() throws Exception {
    final Logger log = Logger.getLogger("TestDbusEventBufferMult.testSubscriptionStream");
    log.info("start");
    TestSetup t = new TestSetup();
    PhysicalPartition pp100 = new PhysicalPartition(100, "multBufferTest1");
    PhysicalPartitionKey pk1 = new PhysicalPartitionKey(pp100);
    PhysicalPartition pp101 = new PhysicalPartition(101, "multBufferTest2");
    PhysicalPartitionKey pk2 = new PhysicalPartitionKey(pp101);
    //generate events in pp100
    byte[] schema = "abcdefghijklmnop".getBytes(Charset.defaultCharset());
    DbusEventBufferAppendable buf100 = t._eventBuffer.getDbusEventBufferAppendable(pp100);
    buf100.startEvents();
    assertTrue(buf100.appendEvent(new DbusEventKey(1), (short) 100, (short) 0, System.currentTimeMillis() * 1000000, (short) 1, schema, new byte[100], false, null));
    assertTrue(buf100.appendEvent(new DbusEventKey(10), (short) 100, (short) 0, System.currentTimeMillis() * 1000000, (short) 1, schema, new byte[100], false, null));
    assertTrue(buf100.appendEvent(new DbusEventKey(11), (short) 100, (short) 0, System.currentTimeMillis() * 1000000, (short) 1, schema, new byte[100], false, null));
    assertTrue(buf100.appendEvent(new DbusEventKey(2), (short) 100, (short) 0, System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    buf100.endEvents(100, null);
    buf100.startEvents();
    assertTrue(buf100.appendEvent(new DbusEventKey(3), (short) 100, (short) 0, System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    assertTrue(buf100.appendEvent(new DbusEventKey(4), (short) 100, (short) 1, System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    buf100.endEvents(200, null);
    //generate events in pp100
    DbusEventBufferAppendable buf101 = t._eventBuffer.getDbusEventBufferAppendable(pp101);
    buf101.startEvents();
    assertTrue(buf101.appendEvent(new DbusEventKey(51), (short) 101, (short) 0, System.currentTimeMillis() * 1000000, (short) 11, schema, new byte[100], false, null));
    assertTrue(buf101.appendEvent(new DbusEventKey(52), (short) 101, (short) 0, System.currentTimeMillis() * 1000000, (short) 12, schema, new byte[100], false, null));
    assertTrue(buf101.appendEvent(new DbusEventKey(53), (short) 101, (short) 2, System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    buf101.endEvents(120, null);
    buf101.startEvents();
    assertTrue(buf101.appendEvent(new DbusEventKey(54), (short) 101, (short) 2, System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    assertTrue(buf101.appendEvent(new DbusEventKey(55), (short) 101, (short) 2, System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    assertTrue(buf101.appendEvent(new DbusEventKey(56), (short) 101, (short) 2, System.currentTimeMillis() * 1000000, (short) 2, schema, new byte[100], false, null));
    buf101.endEvents(200, null);
    //initialization
    DatabusSubscription sub1 = DatabusSubscription.createPhysicalPartitionReplicationSubscription(new PhysicalPartition(100, "multBufferTest1"));
    DbusFilter filter1 = t._eventBuffer.constructFilters(Arrays.asList(sub1));
    assertNotNull(filter1);
    CheckpointMult cpMult1 = new CheckpointMult();
    Checkpoint cp100 = new Checkpoint();
    cp100.init();
    cp100.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp100.setWindowScn(10L);
    cp100.setWindowOffset(-1);
    cpMult1.addCheckpoint(pp100, cp100);
    String[] pnames = { "multBufferTest1:100", "multBufferTest2:101" };
    StatsCollectors<DbusEventsStatisticsCollector> statsColls1 = createStats(pnames);
    DbusEventsStatisticsCollector statsCol1 = statsColls1.getStatsCollector("multBufferTest1:100");
    DbusEventsStatisticsCollector statsCol2 = statsColls1.getStatsCollector("multBufferTest2:101");
    //read an entire buffer
    DbusEventBufferBatchReadable reader1 = t._eventBuffer.getDbusEventBufferBatchReadable(cpMult1, Arrays.asList(pk1), statsColls1);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    // Try a call with 20 bytes of fetch size, we should see the event size in the first return with 0 events read.
    StreamEventsResult result = reader1.streamEvents(false, 20, Channels.newChannel(baos), Encoding.BINARY, filter1);
    assertEquals(0, result.getNumEventsStreamed());
    assertEquals(161, result.getSizeOfPendingEvent());
    result = reader1.streamEvents(false, 1000000, Channels.newChannel(baos), Encoding.BINARY, filter1);
    int eventsRead = result.getNumEventsStreamed();
    //4 events + 1 eop + 2 events + 1 eop
    assertEquals(eventsRead, 8);
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumSysEvents(), 2);
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumDataEvents(), 6);
    assertEquals(result.getSizeOfPendingEvent(), 0, "Size of pending event not zero");
    // Now that we have read all the events, we should not see a pending event even if we offer a small fetch size.
    result = reader1.streamEvents(false, 20, Channels.newChannel(baos), Encoding.BINARY, filter1);
    assertEquals(0, result.getNumEventsStreamed(), "There should be no more events in the buffer now");
    assertEquals(0, result.getSizeOfPendingEvent(), "We should not see pending event size since there are no events in buffer");
    baos.reset();
    statsCol1.reset();
    statsCol2.reset();
    //read from two buffers, filtering out one
    cpMult1 = new CheckpointMult();
    cp100.init();
    cp100.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp100.setWindowScn(10L);
    cp100.setWindowOffset(-1);
    cpMult1.addCheckpoint(pp100, cp100);
    reader1 = t._eventBuffer.getDbusEventBufferBatchReadable(cpMult1, Arrays.asList(pk1, pk2), statsColls1);
    eventsRead = reader1.streamEvents(false, 1000000, Channels.newChannel(baos), Encoding.BINARY, filter1).getNumEventsStreamed();
    //4 events + 1 eop + 1 eop from the other buffer + 2 events +
    assertEquals(eventsRead, 10);
    //1 eop + 1 eop from the other buffer
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumSysEvents(), 2);
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumDataEvents(), 6);
    baos.reset();
    statsCol1.reset();
    //read from one buffer and one source partition
    DatabusSubscription sub2 = new DatabusSubscription(PhysicalSource.MASTER_PHISYCAL_SOURCE, new PhysicalPartition(101, "multBufferTest2"), new LogicalSourceId(new LogicalSource(2, "srcName2"), (short) 2));
    DbusFilter filter2 = t._eventBuffer.constructFilters(Arrays.asList(sub2));
    assertNotNull(filter2);
    CheckpointMult cpMult2 = new CheckpointMult();
    Checkpoint cp101 = new Checkpoint();
    cp101.init();
    cp101.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp101.setWindowScn(10L);
    cp101.setWindowOffset(-1);
    cpMult2.addCheckpoint(pp101, cp101);
    DbusEventBufferBatchReadable reader2 = t._eventBuffer.getDbusEventBufferBatchReadable(cpMult2, Arrays.asList(pk2), statsColls1);
    eventsRead = reader2.streamEvents(false, 1000000, Channels.newChannel(baos), Encoding.BINARY, filter2).getNumEventsStreamed();
    //1 events + 1 eop + 3events + 1 eop
    assertEquals(eventsRead, 6);
    baos.reset();
    statsCol1.reset();
    statsCol2.reset();
    //read all partitions for a source
    DatabusSubscription sub3 = new DatabusSubscription(PhysicalSource.MASTER_PHISYCAL_SOURCE, PhysicalPartition.ANY_PHYSICAL_PARTITION, LogicalSourceId.createAllPartitionsWildcard(new LogicalSource(2, "srcName2")));
    DbusFilter filter3 = t._eventBuffer.constructFilters(Arrays.asList(sub3));
    assertNotNull(filter3);
    CheckpointMult cpMult3 = new CheckpointMult();
    cp100.init();
    cp100.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp100.setWindowScn(10L);
    cp100.setWindowOffset(-1);
    cpMult1.addCheckpoint(pp100, cp100);
    cp101.init();
    cp101.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    cp101.setWindowScn(10L);
    cp101.setWindowOffset(-1);
    cpMult2.addCheckpoint(pp101, cp101);
    DbusEventBufferBatchReadable reader3 = t._eventBuffer.getDbusEventBufferBatchReadable(cpMult3, Arrays.asList(pk1, pk2), statsColls1);
    eventsRead = reader3.streamEvents(false, 1000000, Channels.newChannel(baos), Encoding.BINARY, filter3).getNumEventsStreamed();
    //1 events + 1 eop + 1 events + 1 eop + 2 events + 1 eop + 3 events + 1 eop
    assertEquals(eventsRead, 11);
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumSysEvents(), 2);
    assertEquals(statsColls1.getStatsCollector("multBufferTest2:101").getTotalStats().getNumSysEvents(), 2);
    assertEquals(statsColls1.getStatsCollector("multBufferTest1:100").getTotalStats().getNumDataEventsFiltered(), 3);
    assertEquals(statsColls1.getStatsCollector("multBufferTest2:101").getTotalStats().getNumDataEventsFiltered(), 4);
    baos.reset();
    statsCol1.reset();
    statsCol2.reset();
    log.info("end");
}
Also used : LogicalSourceId(com.linkedin.databus.core.data_model.LogicalSourceId) DbusEventsStatisticsCollector(com.linkedin.databus.core.monitoring.mbean.DbusEventsStatisticsCollector) AggregatedDbusEventsStatisticsCollector(com.linkedin.databus.core.monitoring.mbean.AggregatedDbusEventsStatisticsCollector) ByteArrayOutputStream(java.io.ByteArrayOutputStream) LogicalSource(com.linkedin.databus.core.data_model.LogicalSource) Logger(org.apache.log4j.Logger) DatabusSubscription(com.linkedin.databus.core.data_model.DatabusSubscription) ConjunctionDbusFilter(com.linkedin.databus2.core.filter.ConjunctionDbusFilter) SourceDbusFilter(com.linkedin.databus2.core.filter.SourceDbusFilter) AllowAllDbusFilter(com.linkedin.databus2.core.filter.AllowAllDbusFilter) LogicalSourceAndPartitionDbusFilter(com.linkedin.databus2.core.filter.LogicalSourceAndPartitionDbusFilter) PhysicalPartitionDbusFilter(com.linkedin.databus2.core.filter.PhysicalPartitionDbusFilter) DbusFilter(com.linkedin.databus2.core.filter.DbusFilter) PhysicalPartitionKey(com.linkedin.databus.core.DbusEventBufferMult.PhysicalPartitionKey) PhysicalPartition(com.linkedin.databus.core.data_model.PhysicalPartition) Test(org.testng.annotations.Test) BeforeTest(org.testng.annotations.BeforeTest)

Aggregations

LogicalSource (com.linkedin.databus.core.data_model.LogicalSource)22 ArrayList (java.util.ArrayList)7 PhysicalPartition (com.linkedin.databus.core.data_model.PhysicalPartition)6 ObjectMapper (org.codehaus.jackson.map.ObjectMapper)6 DatabusSubscription (com.linkedin.databus.core.data_model.DatabusSubscription)5 IdNamePair (com.linkedin.databus.core.util.IdNamePair)5 ChunkedWritableByteChannel (com.linkedin.databus2.core.container.ChunkedWritableByteChannel)5 SchemaRegistryService (com.linkedin.databus2.schemas.SchemaRegistryService)5 SourceIdNameRegistry (com.linkedin.databus2.schemas.SourceIdNameRegistry)5 HashMap (java.util.HashMap)5 Test (org.testng.annotations.Test)5 RegisterRequestProcessor (com.linkedin.databus.container.request.RegisterRequestProcessor)4 DatabusRequest (com.linkedin.databus2.core.container.request.DatabusRequest)4 InvalidRequestParamValueException (com.linkedin.databus2.core.container.request.InvalidRequestParamValueException)4 ByteBuffer (java.nio.ByteBuffer)4 Charset (java.nio.charset.Charset)4 CharsetDecoder (java.nio.charset.CharsetDecoder)4 Properties (java.util.Properties)4 BeforeTest (org.testng.annotations.BeforeTest)4 Checkpoint (com.linkedin.databus.core.Checkpoint)3