Search in sources :

Example 1 with DatabusSubscription

use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.

the class RelayEventProducer method createDatabusSourcesConnection.

public static DatabusSourcesConnection createDatabusSourcesConnection(String producerName, int id, String serverName, String subscriptionString, DatabusCombinedConsumer consumer, long internalBufferMaxSize, int largestEventSize, long consumerTimeoutMs, long pollIntervalMs, long connTimeoutMs, int consumerParallelism, boolean blockingBuffer, DatabusClientNettyThreadPools nettyThreadPools, int noEventsTimeoutSec, int maxEventVersion, int initReadBufferSize) throws InvalidConfigException {
    // the assumption here is that the list of subscriptions will become the
    // list of sources hosted by the relay
    Set<ServerInfo> relayServices = createServerInfo(serverName, subscriptionString);
    // null bootstrapService
    Set<ServerInfo> bootstrapServices = null;
    // create subscription objects based on what is required by subscription
    String[] subscriptionList = subscriptionString.split(",");
    List<DatabusSubscription> subsList = DatabusSubscription.createSubscriptionList(Arrays.asList(subscriptionList));
    List<String> sourcesStrList = DatabusSubscription.getStrList(subsList);
    LOG.info("The sourcesList is " + sourcesStrList);
    // create registration objects with consumers
    List<DatabusV2ConsumerRegistration> relayConsumers = createDatabusV2ConsumerRegistration(consumer, sourcesStrList);
    List<DatabusV2ConsumerRegistration> bstConsumers = null;
    // setup sources connection config
    DatabusSourcesConnection.Config confBuilder = new DatabusSourcesConnection.Config();
    confBuilder.setId(id);
    // consume whatever is in relay
    confBuilder.setConsumeCurrent(true);
    //this is set to false as the behaviour is to read the latest SCN when SCN is not found, the buffer isn't cleared
    //as such , so a possibility of gaps in events arises. What we want ideally is to clear existing buffer and then consume from latest SCN
    confBuilder.setReadLatestScnOnError(false);
    // 10s max consumer timeout
    confBuilder.setConsumerTimeBudgetMs(consumerTimeoutMs);
    // poll interval in ms and infinite retry;
    confBuilder.getPullerRetries().setMaxRetryNum(-1);
    confBuilder.getPullerRetries().setInitSleep(pollIntervalMs);
    confBuilder.setConsumerParallelism(consumerParallelism);
    confBuilder.getDispatcherRetries().setMaxRetryNum(1);
    // internal buffer conf
    DbusEventBuffer.Config bufferConf = new DbusEventBuffer.Config();
    bufferConf.setMaxSize(internalBufferMaxSize);
    bufferConf.setAllocationPolicy("DIRECT_MEMORY");
    if (initReadBufferSize > 0) {
        bufferConf.setAverageEventSize(initReadBufferSize);
    }
    bufferConf.setMaxEventSize(largestEventSize);
    //client buffer's scn index- not used
    bufferConf.setScnIndexSize(64 * 1024);
    String queuePolicy = blockingBuffer ? "BLOCK_ON_WRITE" : "OVERWRITE_ON_WRITE";
    bufferConf.setQueuePolicy(queuePolicy);
    //get appropriate checkpointThresholdPct
    double newCkptPct = confBuilder.computeSafeCheckpointThresholdPct(bufferConf);
    if (newCkptPct < 5.0 || newCkptPct > 95.0) {
        LOG.warn("Not setting required checkpointThresholdPct : " + newCkptPct + "to  accommodate largestEventSize= " + largestEventSize + " in buffer of size " + bufferConf.getMaxSize());
        if (newCkptPct <= 0.0) {
            //unlikely to happen: if it does retain default
            newCkptPct = confBuilder.getCheckpointThresholdPct();
        }
        if (newCkptPct < 5.0) {
            newCkptPct = 5.0;
        } else if (newCkptPct > 95.0) {
            newCkptPct = 95.0;
        }
    }
    LOG.info("Setting checkpointThresholdPct:" + newCkptPct);
    confBuilder.setCheckpointThresholdPct(newCkptPct);
    confBuilder.setEventBuffer(bufferConf);
    confBuilder.setNoEventsConnectionResetTimeSec(noEventsTimeoutSec);
    DatabusSourcesConnection.StaticConfig connConfig = confBuilder.build();
    // internal buffers of databus client library
    DbusEventBuffer buffer = new DbusEventBuffer(connConfig.getEventBuffer());
    buffer.start(0);
    DbusEventBuffer bootstrapBuffer = null;
    // Create threadpools and netty managers
    // read - write timeout in ms
    long readTimeoutMs = connTimeoutMs;
    long writeTimeoutMs = connTimeoutMs;
    long bstReadTimeoutMs = connTimeoutMs;
    int protocolVersion = 2;
    // connection factory
    NettyHttpConnectionFactory defaultConnFactory = new NettyHttpConnectionFactory(nettyThreadPools.getBossExecutorService(), nettyThreadPools.getIoExecutorService(), null, nettyThreadPools.getTimer(), writeTimeoutMs, readTimeoutMs, bstReadTimeoutMs, protocolVersion, maxEventVersion, nettyThreadPools.getChannelGroup());
    // Create Thread pool for consumer threads
    int maxThreadsNum = 1;
    int keepAliveMs = 1000;
    ThreadPoolExecutor defaultExecutorService = new OrderedMemoryAwareThreadPoolExecutor(maxThreadsNum, 0, 0, keepAliveMs, TimeUnit.MILLISECONDS);
    ConsumerCallbackStats relayConsumerStats = new ConsumerCallbackStats(id, producerName + ".inbound.cons", producerName + ".inbound.cons", true, false, null, ManagementFactory.getPlatformMBeanServer());
    ConsumerCallbackStats bootstrapConsumerStats = new ConsumerCallbackStats(id, producerName + ".inbound.bs.cons", producerName + ".inbound.bs.cons", true, false, null, ManagementFactory.getPlatformMBeanServer());
    UnifiedClientStats unifiedClientStats = new UnifiedClientStats(id, producerName + ".inbound.unified.cons", producerName + ".inbound.unified.cons", true, false, UnifiedClientStats.DEFAULT_DEADNESS_THRESHOLD_MS, null, ManagementFactory.getPlatformMBeanServer());
    DatabusRelayConnectionFactory relayConnFactory = defaultConnFactory;
    DatabusBootstrapConnectionFactory bootstrapConnFactory = defaultConnFactory;
    ConnectionStateFactory connStateFactory = new ConnectionStateFactory(sourcesStrList);
    DatabusSourcesConnection conn = new DatabusSourcesConnection(connConfig, subsList, relayServices, bootstrapServices, relayConsumers, bstConsumers, buffer, bootstrapBuffer, defaultExecutorService, // getContainerStatsCollector(),
    null, // getInboundEventStatisticsCollector(),
    null, // getBootstrapEventsStatsCollector(),
    null, // relay callback stats
    relayConsumerStats, // bootstrap callback stats
    bootstrapConsumerStats, // combined relay/bootstrap callback stats
    unifiedClientStats, // getCheckpointPersistenceProvider(),
    null, relayConnFactory, bootstrapConnFactory, // getHttpStatsCollector(),
    null, // RegistrationId
    null, null, new DbusEventV2Factory(), // TODO Get the ref to factory from HttpRelay.
    connStateFactory);
    return conn;
}
Also used : DatabusV2ConsumerRegistration(com.linkedin.databus.client.consumer.DatabusV2ConsumerRegistration) ServerInfo(com.linkedin.databus.client.pub.ServerInfo) PhysicalSourceStaticConfig(com.linkedin.databus2.relay.config.PhysicalSourceStaticConfig) LogicalSourceStaticConfig(com.linkedin.databus2.relay.config.LogicalSourceStaticConfig) NettyHttpConnectionFactory(com.linkedin.databus.client.netty.NettyHttpConnectionFactory) OrderedMemoryAwareThreadPoolExecutor(org.jboss.netty.handler.execution.OrderedMemoryAwareThreadPoolExecutor) UnifiedClientStats(com.linkedin.databus.client.pub.mbean.UnifiedClientStats) DatabusRelayConnectionFactory(com.linkedin.databus.client.DatabusRelayConnectionFactory) ConsumerCallbackStats(com.linkedin.databus.client.pub.mbean.ConsumerCallbackStats) DatabusSubscription(com.linkedin.databus.core.data_model.DatabusSubscription) DatabusBootstrapConnectionFactory(com.linkedin.databus.client.DatabusBootstrapConnectionFactory) Checkpoint(com.linkedin.databus.core.Checkpoint) DatabusSourcesConnection(com.linkedin.databus.client.DatabusSourcesConnection) DbusEventBuffer(com.linkedin.databus.core.DbusEventBuffer) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) OrderedMemoryAwareThreadPoolExecutor(org.jboss.netty.handler.execution.OrderedMemoryAwareThreadPoolExecutor) DbusEventV2Factory(com.linkedin.databus.core.DbusEventV2Factory) ConnectionStateFactory(com.linkedin.databus.client.ConnectionStateFactory)

Example 2 with DatabusSubscription

use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.

the class DbusModPartitionedFilterFactory method createServerSideFilter.

@Override
public DbusKeyCompositeFilterConfig createServerSideFilter(DbusClusterInfo cluster, DbusPartitionInfo partition) throws InvalidConfigException {
    DbusKeyCompositeFilterConfig.Config compositeConfig = new DbusKeyCompositeFilterConfig.Config();
    for (DatabusSubscription s : _subscriptions) {
        KeyFilterConfigHolder.Config filterConfig = new KeyFilterConfigHolder.Config();
        filterConfig.setType(PartitionType.MOD.toString());
        filterConfig.getMod().setNumBuckets(cluster.getNumTotalPartitions());
        filterConfig.getMod().setBuckets("[" + partition.getPartitionId() + "]");
        compositeConfig.setFilter(s.getLogicalSource().getName(), filterConfig);
    }
    DbusKeyCompositeFilterConfig c = new DbusKeyCompositeFilterConfig(compositeConfig.build());
    LOG.info("Generated Mod Partitioned Config for partition (" + partition + ") of cluster (" + cluster + ") is :" + c);
    return c;
}
Also used : DbusKeyCompositeFilterConfig(com.linkedin.databus2.core.filter.DbusKeyCompositeFilterConfig) KeyFilterConfigHolder(com.linkedin.databus2.core.filter.KeyFilterConfigHolder) DbusKeyCompositeFilterConfig(com.linkedin.databus2.core.filter.DbusKeyCompositeFilterConfig) DatabusSubscription(com.linkedin.databus.core.data_model.DatabusSubscription)

Example 3 with DatabusSubscription

use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.

the class TestFileSystemCheckpointPersistanceProvider method testCacheEntryloadCurrentCheckpoint_new.

@Test
public void testCacheEntryloadCurrentCheckpoint_new() throws Exception {
    File checkpointDir = new File("/tmp/databus2-checkpoints-test");
    FileSystemCheckpointPersistenceProvider.Config config = new FileSystemCheckpointPersistenceProvider.Config();
    config.setRootDirectory(checkpointDir.getAbsolutePath());
    config.getRuntime().setHistoryEnabled(false);
    FileSystemCheckpointPersistenceProvider checkpointProvider = new FileSystemCheckpointPersistenceProvider(config, 2);
    List<String> sourceNames = Arrays.asList("source1", "source2");
    List<DatabusSubscription> subs = DatabusSubscription.createSubscriptionList(sourceNames);
    List<String> subsList = checkpointProvider.convertSubsToListOfStrings(subs);
    String streamId = FileSystemCheckpointPersistenceProvider.calcStreamId(subsList);
    assertEquals("cp_source1-source2", streamId);
    File streamFile = new File(checkpointDir, streamId + ".current");
    if (streamFile.exists()) {
        assertTrue(streamFile.delete());
    }
    CacheEntry cacheEntry = checkpointProvider.new CacheEntry(streamId, null);
    Checkpoint checkpoint = cacheEntry.getCheckpoint();
    assertNull(checkpoint);
}
Also used : Checkpoint(com.linkedin.databus.core.Checkpoint) DatabusSubscription(com.linkedin.databus.core.data_model.DatabusSubscription) CacheEntry(com.linkedin.databus.client.pub.FileSystemCheckpointPersistenceProvider.CacheEntry) File(java.io.File) Test(org.testng.annotations.Test)

Example 4 with DatabusSubscription

use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.

the class TestFileSystemCheckpointPersistanceProvider method testCacheEntry_store_load_withroll.

@Test
public void testCacheEntry_store_load_withroll() throws Exception {
    File checkpointDir = new File("/tmp/databus2-checkpoints-test");
    FileSystemCheckpointPersistenceProvider.Config config = new FileSystemCheckpointPersistenceProvider.Config();
    config.setRootDirectory(checkpointDir.getAbsolutePath());
    config.getRuntime().setHistoryEnabled(true);
    config.getRuntime().setHistorySize(10);
    FileSystemCheckpointPersistenceProvider checkpointProvider = new FileSystemCheckpointPersistenceProvider(config, 2);
    List<String> sourceNames = Arrays.asList("source1", "source2", "source3");
    List<DatabusSubscription> subs = DatabusSubscription.createSubscriptionList(sourceNames);
    List<String> subsList = checkpointProvider.convertSubsToListOfStrings(subs);
    String streamId = FileSystemCheckpointPersistenceProvider.calcStreamId(subsList);
    assertEquals("cp_source1-source2-source3", streamId);
    //clean up checkpoint files
    File checkpointFile = new File(checkpointDir, streamId + ".current");
    if (checkpointFile.exists()) {
        assertTrue(checkpointFile.delete());
    }
    for (int i = 0; i < 15; ++i) {
        File f = FileSystemCheckpointPersistenceProvider.StaticConfig.generateCheckpointFile(checkpointDir, streamId + ".", i);
        if (f.exists()) {
            assertTrue(f.delete());
        }
    }
    Checkpoint[] cp = new Checkpoint[15];
    //store checkpoints
    CacheEntry cacheEntry = checkpointProvider.new CacheEntry(streamId, null);
    for (int i = 0; i < 15; ++i) {
        cp[i] = new Checkpoint();
        cp[i].setWindowScn((long) i * i * i);
        cp[i].setWindowOffset(i * i);
        cp[i].setConsumptionMode(DbusClientMode.BOOTSTRAP_SNAPSHOT);
        assertTrue(cacheEntry.setCheckpoint(cp[i]));
        assertTrue(checkpointFile.exists());
        CacheEntry cacheEntry2 = checkpointProvider.new CacheEntry(streamId, null);
        Checkpoint cp2 = cacheEntry2.getCheckpoint();
        assertNotNull(cp2);
        //TODO need to use a Checkpoint.equals() method
        assertEquals(cp[i].getConsumptionMode(), cp2.getConsumptionMode());
        assertEquals(cp[i].getWindowScn(), cp2.getWindowScn());
        assertEquals(cp[i].getPrevScn(), cp2.getPrevScn());
        assertEquals(cp[i].getWindowOffset(), cp2.getWindowOffset());
    }
    //make sure we don't go over history size
    for (int i = 10; i < 15; ++i) {
        File f = FileSystemCheckpointPersistenceProvider.StaticConfig.generateCheckpointFile(checkpointDir, streamId, i);
        assertTrue(!f.exists());
    }
    //check correctness of history
    for (int i = 0; i < 10; ++i) {
        assertTrue(checkpointFile.delete());
        File f = FileSystemCheckpointPersistenceProvider.StaticConfig.generateCheckpointFile(checkpointDir, streamId + ".", i);
        assertTrue(f.renameTo(checkpointFile));
        CacheEntry cacheEntry2 = checkpointProvider.new CacheEntry(streamId, null);
        Checkpoint cp2 = cacheEntry2.getCheckpoint();
        assertNotNull(cp2);
        final int j = 13 - i;
        //TODO need to use a Checkpoint.equals() method
        assertEquals(cp[j].getConsumptionMode(), cp2.getConsumptionMode());
        assertEquals(cp[j].getWindowScn(), cp2.getWindowScn());
        assertEquals(cp[j].getPrevScn(), cp2.getPrevScn());
        assertEquals(cp[j].getWindowOffset(), cp2.getWindowOffset());
    }
}
Also used : Checkpoint(com.linkedin.databus.core.Checkpoint) DatabusSubscription(com.linkedin.databus.core.data_model.DatabusSubscription) CacheEntry(com.linkedin.databus.client.pub.FileSystemCheckpointPersistenceProvider.CacheEntry) File(java.io.File) Checkpoint(com.linkedin.databus.core.Checkpoint) Test(org.testng.annotations.Test)

Example 5 with DatabusSubscription

use of com.linkedin.databus.core.data_model.DatabusSubscription in project databus by linkedin.

the class TestFileSystemCheckpointPersistanceProvider method testCacheEntry_store_load_noroll.

@Test
public void testCacheEntry_store_load_noroll() throws Exception {
    File checkpointDir = new File("/tmp/databus2-checkpoints-test");
    FileSystemCheckpointPersistenceProvider.Config config = new FileSystemCheckpointPersistenceProvider.Config();
    config.setRootDirectory(checkpointDir.getAbsolutePath());
    config.getRuntime().setHistoryEnabled(false);
    FileSystemCheckpointPersistenceProvider checkpointProvider = new FileSystemCheckpointPersistenceProvider(config, 2);
    List<String> sourceNames = Arrays.asList("source1", "source2");
    List<DatabusSubscription> subs = DatabusSubscription.createSubscriptionList(sourceNames);
    List<String> subsList = checkpointProvider.convertSubsToListOfStrings(subs);
    String streamId = FileSystemCheckpointPersistenceProvider.calcStreamId(subsList);
    assertEquals("cp_source1-source2", streamId);
    File checkpointFile = new File(checkpointDir, streamId + ".current");
    if (checkpointFile.exists()) {
        assertTrue(checkpointFile.delete());
    }
    //simple checkpoint
    Checkpoint cp1 = new Checkpoint();
    cp1.setFlexible();
    CacheEntry cacheEntry = checkpointProvider.new CacheEntry(streamId, null);
    assertTrue(cacheEntry.setCheckpoint(cp1));
    assertTrue(checkpointFile.exists());
    CacheEntry cacheEntry2 = checkpointProvider.new CacheEntry(streamId, null);
    Checkpoint cp2 = cacheEntry2.getCheckpoint();
    assertNotNull(cp2);
    //TODO need to use a Checkpoint.equals() method
    assertEquals(DbusClientMode.ONLINE_CONSUMPTION, cp2.getConsumptionMode());
    assertTrue(cp2.getFlexible());
    //more complex checkpoint plus overwriting current state
    Checkpoint cp3 = new Checkpoint();
    cp3.setWindowScn(1234L);
    cp3.setWindowOffset(9876);
    cp3.setConsumptionMode(DbusClientMode.ONLINE_CONSUMPTION);
    assertTrue(cacheEntry.setCheckpoint(cp3));
    assertTrue(checkpointFile.exists());
    File cpBackupFile = new File(checkpointDir, streamId + ".oldcurrent");
    assertTrue(cpBackupFile.exists());
    cacheEntry2 = checkpointProvider.new CacheEntry(streamId, null);
    cp2 = cacheEntry2.getCheckpoint();
    assertNotNull(cp2);
    //TODO need to use a Checkpoint.equals() method
    assertEquals(cp3.getConsumptionMode(), cp2.getConsumptionMode());
    assertEquals(cp3.getWindowScn(), cp2.getWindowScn());
    assertEquals(cp3.getPrevScn(), cp2.getPrevScn());
    assertEquals(cp3.getWindowOffset(), cp2.getWindowOffset());
    //make sure the backup still works
    assertTrue(checkpointFile.delete());
    assertTrue(!checkpointFile.exists());
    if (!cpBackupFile.renameTo(checkpointFile))
        LOG.error("file rename failed: " + cpBackupFile.getAbsolutePath() + " --> " + checkpointFile.getAbsolutePath());
    assertTrue(checkpointFile.exists());
    cacheEntry2 = checkpointProvider.new CacheEntry(streamId, null);
    cp2 = cacheEntry2.getCheckpoint();
    assertNotNull(cp2);
    //TODO need to use a Checkpoint.equals() method
    assertEquals(DbusClientMode.ONLINE_CONSUMPTION, cp2.getConsumptionMode());
    assertTrue(cp2.getFlexible());
    //try to keep some stuff around and see if things go south
    Checkpoint cp4 = new Checkpoint();
    cp4.setWindowScn(1111L);
    cp4.setWindowOffset(2222);
    cp4.setConsumptionMode(DbusClientMode.BOOTSTRAP_CATCHUP);
    File newCpFile = new File(checkpointDir, streamId + ".newcheckpoint");
    PrintWriter tmpWriter = new PrintWriter(newCpFile);
    try {
        tmpWriter.println("Dummy");
        assertTrue(cacheEntry.setCheckpoint(cp4));
        assertTrue(checkpointFile.exists());
        cacheEntry2 = checkpointProvider.new CacheEntry(streamId, null);
        cp2 = cacheEntry2.getCheckpoint();
        assertNotNull(cp2);
        //TODO need to use a Checkpoint.equals() method
        assertEquals(cp4.getConsumptionMode(), cp2.getConsumptionMode());
        assertEquals(cp4.getWindowScn(), cp2.getWindowScn());
        assertEquals(cp4.getPrevScn(), cp2.getPrevScn());
        assertEquals(cp4.getWindowOffset(), cp2.getWindowOffset());
    } finally {
        tmpWriter.close();
    }
}
Also used : Checkpoint(com.linkedin.databus.core.Checkpoint) DatabusSubscription(com.linkedin.databus.core.data_model.DatabusSubscription) CacheEntry(com.linkedin.databus.client.pub.FileSystemCheckpointPersistenceProvider.CacheEntry) File(java.io.File) PrintWriter(java.io.PrintWriter) Test(org.testng.annotations.Test)

Aggregations

DatabusSubscription (com.linkedin.databus.core.data_model.DatabusSubscription)49 Checkpoint (com.linkedin.databus.core.Checkpoint)30 Test (org.testng.annotations.Test)28 ArrayList (java.util.ArrayList)27 IdNamePair (com.linkedin.databus.core.util.IdNamePair)25 HashMap (java.util.HashMap)24 List (java.util.List)23 RegisterResponseEntry (com.linkedin.databus2.core.container.request.RegisterResponseEntry)21 DatabusV2ConsumerRegistration (com.linkedin.databus.client.consumer.DatabusV2ConsumerRegistration)20 Logger (org.apache.log4j.Logger)17 MultiConsumerCallback (com.linkedin.databus.client.consumer.MultiConsumerCallback)16 StreamConsumerCallbackFactory (com.linkedin.databus.client.consumer.StreamConsumerCallbackFactory)16 UncaughtExceptionTrackingThread (com.linkedin.databus.core.util.UncaughtExceptionTrackingThread)16 SelectingDatabusCombinedConsumer (com.linkedin.databus.client.consumer.SelectingDatabusCombinedConsumer)14 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)13 ServerInfo (com.linkedin.databus.client.pub.ServerInfo)10 DatabusStreamConsumer (com.linkedin.databus.client.pub.DatabusStreamConsumer)9 DbusEventBuffer (com.linkedin.databus.core.DbusEventBuffer)9 DbusKeyCompositeFilterConfig (com.linkedin.databus2.core.filter.DbusKeyCompositeFilterConfig)9 AbstractDatabusStreamConsumer (com.linkedin.databus.client.consumer.AbstractDatabusStreamConsumer)8