Search in sources :

Example 11 with SchemaRegistryService

use of com.linkedin.databus2.schemas.SchemaRegistryService in project databus by linkedin.

the class DatabusRelayMain method addOneProducer.

/** overrides HTTP relay method */
@Override
public void addOneProducer(PhysicalSourceStaticConfig pConfig) throws DatabusException, EventCreationException, UnsupportedKeyException, SQLException, InvalidConfigException {
    // Register a command to allow start/stop/status of the relay
    List<EventProducer> plist = new ArrayList<EventProducer>();
    PhysicalPartition pPartition = pConfig.getPhysicalPartition();
    MaxSCNReaderWriter maxScnReaderWriters = _maxScnReaderWriters.getOrCreateHandler(pPartition);
    LOG.info("Starting server container with maxScnReaderWriter:" + maxScnReaderWriters);
    // Get the event buffer
    DbusEventBufferAppendable dbusEventBuffer = getEventBuffer().getDbusEventBufferAppendable(pPartition);
    // Get the schema registry service
    SchemaRegistryService schemaRegistryService = getSchemaRegistryService();
    // Get a stats collector per physical source
    addPhysicalPartitionCollectors(pPartition);
    String statsCollectorName = pPartition.toSimpleString();
    /*
		 * _inBoundStatsCollectors.addStatsCollector(statsCollectorName, new
		 * DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
		 * statsCollectorName+".inbound", true, false, getMbeanServer()));
		 *
		 * _outBoundStatsCollectors.addStatsCollector(statsCollectorName, new
		 * DbusEventsStatisticsCollector(getContainerStaticConfig().getId(),
		 * statsCollectorName+".outbound", true, false, getMbeanServer()));
		 */
    // Create the event producer
    String uri = pConfig.getUri();
    if (uri == null)
        throw new DatabusException("Uri is required to start the relay");
    uri = uri.trim();
    EventProducer producer = null;
    if (uri.startsWith("jdbc:")) {
        SourceType sourceType = pConfig.getReplBitSetter().getSourceType();
        if (SourceType.TOKEN.equals(sourceType))
            throw new DatabusException("Token Source-type for Replication bit setter config cannot be set for trigger-based Databus relay !!");
        // if a buffer for this partiton exists - we are overwri
        producer = new OracleEventProducerFactory().buildEventProducer(pConfig, schemaRegistryService, dbusEventBuffer, getMbeanServer(), _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
    } else if (uri.startsWith("mock")) {
        // Get all relevant pConfig attributes
        //TODO add real instantiation
        EventProducerServiceProvider mockProvider = _producersRegistry.getEventProducerServiceProvider("mock");
        if (null == mockProvider) {
            throw new DatabusRuntimeException("relay event producer not available: " + "mock");
        }
        producer = mockProvider.createProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
    } else if (uri.startsWith("gg:")) {
        producer = new GoldenGateEventProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
    } else if (uri.startsWith("mysql:")) {
        LOG.info("Adding OpenReplicatorEventProducer for uri :" + uri);
        final String serviceName = "or";
        EventProducerServiceProvider orProvider = _producersRegistry.getEventProducerServiceProvider(serviceName);
        if (null == orProvider) {
            throw new DatabusRuntimeException("relay event producer not available: " + serviceName);
        }
        producer = orProvider.createProducer(pConfig, schemaRegistryService, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters);
    } else {
        // Get all relevant pConfig attributes and initialize the nettyThreadPool objects
        RelayEventProducer.DatabusClientNettyThreadPools nettyThreadPools = new RelayEventProducer.DatabusClientNettyThreadPools(0, getNetworkTimeoutTimer(), getBossExecutorService(), getIoExecutorService(), getHttpChannelGroup());
        producer = new RelayEventProducer(pConfig, dbusEventBuffer, _inBoundStatsCollectors.getStatsCollector(statsCollectorName), maxScnReaderWriters, nettyThreadPools);
    }
    // if a buffer for this partiton exists - we are overwriting it.
    _producers.put(pPartition, producer);
    plist.add(producer);
    // append 'monitoring event producer'
    if (producer instanceof OracleEventProducer) {
        MonitoringEventProducer monitoringProducer = new MonitoringEventProducer("dbMonitor." + pPartition.toSimpleString(), pConfig.getName(), pConfig.getUri(), ((OracleEventProducer) producer).getMonitoredSourceInfos(), getMbeanServer());
        _monitoringProducers.put(pPartition, monitoringProducer);
        plist.add(monitoringProducer);
    }
    if (_csEventRequestProcessor == null)
        _csEventRequestProcessor = new ControlSourceEventsRequestProcessor(null, this, plist);
    else
        _csEventRequestProcessor.addEventProducers(plist);
    RequestProcessorRegistry processorRegistry = getProcessorRegistry();
    processorRegistry.reregister(ControlSourceEventsRequestProcessor.COMMAND_NAME, _csEventRequestProcessor);
}
Also used : MaxSCNReaderWriter(com.linkedin.databus2.core.seq.MaxSCNReaderWriter) DbusEventBufferAppendable(com.linkedin.databus.core.DbusEventBufferAppendable) SchemaRegistryService(com.linkedin.databus2.schemas.SchemaRegistryService) SourceType(com.linkedin.databus2.relay.config.ReplicationBitSetterStaticConfig.SourceType) ArrayList(java.util.ArrayList) OracleEventProducer(com.linkedin.databus2.producers.db.OracleEventProducer) RelayEventProducer(com.linkedin.databus2.producers.RelayEventProducer) EventProducer(com.linkedin.databus2.producers.EventProducer) OracleEventProducer(com.linkedin.databus2.producers.db.OracleEventProducer) EventProducerServiceProvider(com.linkedin.databus2.producers.EventProducerServiceProvider) ControlSourceEventsRequestProcessor(com.linkedin.databus.container.request.ControlSourceEventsRequestProcessor) DatabusException(com.linkedin.databus2.core.DatabusException) RequestProcessorRegistry(com.linkedin.databus2.core.container.request.RequestProcessorRegistry) PhysicalPartition(com.linkedin.databus.core.data_model.PhysicalPartition) DatabusRuntimeException(com.linkedin.databus.core.DatabusRuntimeException) RelayEventProducer(com.linkedin.databus2.producers.RelayEventProducer)

Example 12 with SchemaRegistryService

use of com.linkedin.databus2.schemas.SchemaRegistryService in project databus by linkedin.

the class BootstrapAvroFileSeederMain method init.

public static void init(String[] args) throws Exception {
    parseArgs(args);
    File sourcesJson = new File(_sSourcesConfigFile);
    ObjectMapper mapper = new ObjectMapper();
    PhysicalSourceConfig physicalSourceConfig = mapper.readValue(sourcesJson, PhysicalSourceConfig.class);
    physicalSourceConfig.checkForNulls();
    Config config = new Config();
    ConfigLoader<StaticConfig> configLoader = new ConfigLoader<StaticConfig>("databus.seed.", config);
    _sStaticConfig = configLoader.loadConfig(_sBootstrapConfigProps);
    // Make sure the URI from the configuration file identifies an Oracle JDBC source.
    String uri = physicalSourceConfig.getUri();
    if (!uri.startsWith("jdbc:oracle")) {
        throw new InvalidConfigException("Invalid source URI (" + physicalSourceConfig.getUri() + "). Only jdbc:oracle: URIs are supported.");
    }
    OracleEventProducerFactory factory = new BootstrapSeederOracleEventProducerFactory(_sStaticConfig.getController().getPKeyNameMap());
    // Parse each one of the logical sources
    _sources = new ArrayList<OracleTriggerMonitoredSourceInfo>();
    FileSystemSchemaRegistryService schemaRegistryService = FileSystemSchemaRegistryService.build(_sStaticConfig.getSchemaRegistry().getFileSystem());
    for (LogicalSourceConfig sourceConfig : physicalSourceConfig.getSources()) {
        OracleTriggerMonitoredSourceInfo source = factory.buildOracleMonitoredSourceInfo(sourceConfig.build(), physicalSourceConfig.build(), schemaRegistryService);
        _sources.add(source);
    }
    _sSeeder = new BootstrapDBSeeder(_sStaticConfig.getBootstrap(), _sources);
    _sBootstrapBuffer = new BootstrapEventBuffer(_sStaticConfig.getController().getCommitInterval() * 2);
    _sWriterThread = new BootstrapSeederWriterThread(_sBootstrapBuffer, _sSeeder);
    _sReader = new BootstrapAvroFileEventReader(_sStaticConfig.getController(), _sources, _sSeeder.getLastRows(), _sBootstrapBuffer);
}
Also used : LogicalSourceConfig(com.linkedin.databus2.relay.config.LogicalSourceConfig) ConfigLoader(com.linkedin.databus.core.util.ConfigLoader) LogicalSourceConfig(com.linkedin.databus2.relay.config.LogicalSourceConfig) SchemaRegistryStaticConfig(com.linkedin.databus2.schemas.SchemaRegistryStaticConfig) BootstrapConfig(com.linkedin.databus.bootstrap.common.BootstrapConfig) BootstrapReadOnlyConfig(com.linkedin.databus.bootstrap.common.BootstrapReadOnlyConfig) PhysicalSourceConfig(com.linkedin.databus2.relay.config.PhysicalSourceConfig) SchemaRegistryStaticConfig(com.linkedin.databus2.schemas.SchemaRegistryStaticConfig) InvalidConfigException(com.linkedin.databus.core.util.InvalidConfigException) FileSystemSchemaRegistryService(com.linkedin.databus2.schemas.FileSystemSchemaRegistryService) OracleTriggerMonitoredSourceInfo(com.linkedin.databus2.producers.db.OracleTriggerMonitoredSourceInfo) PhysicalSourceConfig(com.linkedin.databus2.relay.config.PhysicalSourceConfig) OracleEventProducerFactory(com.linkedin.databus2.relay.OracleEventProducerFactory) File(java.io.File) ObjectMapper(org.codehaus.jackson.map.ObjectMapper)

Example 13 with SchemaRegistryService

use of com.linkedin.databus2.schemas.SchemaRegistryService in project databus by linkedin.

the class BootstrapSeederMain method init.

public static void init(String[] args) throws Exception {
    parseArgs(args);
    // Load the source configuration JSON file
    //File sourcesJson = new File("integration-test/config/sources-member2.json");
    File sourcesJson = new File(_sSourcesConfigFile);
    ObjectMapper mapper = new ObjectMapper();
    PhysicalSourceConfig physicalSourceConfig = mapper.readValue(sourcesJson, PhysicalSourceConfig.class);
    physicalSourceConfig.checkForNulls();
    Config config = new Config();
    ConfigLoader<StaticConfig> configLoader = new ConfigLoader<StaticConfig>("databus.seed.", config);
    _sStaticConfig = configLoader.loadConfig(_sBootstrapConfigProps);
    // Make sure the URI from the configuration file identifies an Oracle JDBC source.
    String uri = physicalSourceConfig.getUri();
    if (!uri.startsWith("jdbc:oracle")) {
        throw new InvalidConfigException("Invalid source URI (" + physicalSourceConfig.getUri() + "). Only jdbc:oracle: URIs are supported.");
    }
    String sourceTypeStr = physicalSourceConfig.getReplBitSetter().getSourceType();
    if (SourceType.TOKEN.toString().equalsIgnoreCase(sourceTypeStr))
        throw new InvalidConfigException("Token Source-type for Replication bit setter config cannot be set for trigger-based Databus relay !!");
    // Create the OracleDataSource used to get DB connection(s)
    try {
        Class oracleDataSourceClass = OracleJarUtils.loadClass("oracle.jdbc.pool.OracleDataSource");
        Object ods = oracleDataSourceClass.newInstance();
        Method setURLMethod = oracleDataSourceClass.getMethod("setURL", String.class);
        setURLMethod.invoke(ods, uri);
        _sDataStore = (DataSource) ods;
    } catch (Exception e) {
        String errMsg = "Error creating a data source object ";
        LOG.error(errMsg, e);
        throw e;
    }
    //TODO: Need a better way than relaying on RelayFactory for generating MonitoredSourceInfo
    OracleEventProducerFactory factory = new BootstrapSeederOracleEventProducerFactory(_sStaticConfig.getController().getPKeyNameMap());
    // Parse each one of the logical sources
    _sources = new ArrayList<OracleTriggerMonitoredSourceInfo>();
    FileSystemSchemaRegistryService schemaRegistryService = FileSystemSchemaRegistryService.build(_sStaticConfig.getSchemaRegistry().getFileSystem());
    Set<String> seenUris = new HashSet<String>();
    for (LogicalSourceConfig sourceConfig : physicalSourceConfig.getSources()) {
        String srcUri = sourceConfig.getUri();
        if (seenUris.contains(srcUri)) {
            String msg = "Uri (" + srcUri + ") is used for more than one sources. Currently Bootstrap Seeder cannot support seeding sources with the same URI together. Please have them run seperately !!";
            LOG.fatal(msg);
            throw new InvalidConfigException(msg);
        }
        seenUris.add(srcUri);
        OracleTriggerMonitoredSourceInfo source = factory.buildOracleMonitoredSourceInfo(sourceConfig.build(), physicalSourceConfig.build(), schemaRegistryService);
        _sources.add(source);
    }
    _sSeeder = new BootstrapDBSeeder(_sStaticConfig.getBootstrap(), _sources);
    _sBootstrapBuffer = new BootstrapEventBuffer(_sStaticConfig.getController().getCommitInterval() * 2);
    _sWriterThread = new BootstrapSeederWriterThread(_sBootstrapBuffer, _sSeeder);
    _sReader = new BootstrapSrcDBEventReader(_sDataStore, _sBootstrapBuffer, _sStaticConfig.getController(), _sources, _sSeeder.getLastRows(), _sSeeder.getLastKeys(), 0);
}
Also used : LogicalSourceConfig(com.linkedin.databus2.relay.config.LogicalSourceConfig) LogicalSourceConfig(com.linkedin.databus2.relay.config.LogicalSourceConfig) SchemaRegistryStaticConfig(com.linkedin.databus2.schemas.SchemaRegistryStaticConfig) BootstrapConfig(com.linkedin.databus.bootstrap.common.BootstrapConfig) BootstrapReadOnlyConfig(com.linkedin.databus.bootstrap.common.BootstrapReadOnlyConfig) PhysicalSourceConfig(com.linkedin.databus2.relay.config.PhysicalSourceConfig) FileSystemSchemaRegistryService(com.linkedin.databus2.schemas.FileSystemSchemaRegistryService) OracleTriggerMonitoredSourceInfo(com.linkedin.databus2.producers.db.OracleTriggerMonitoredSourceInfo) OracleEventProducerFactory(com.linkedin.databus2.relay.OracleEventProducerFactory) ObjectMapper(org.codehaus.jackson.map.ObjectMapper) HashSet(java.util.HashSet) ConfigLoader(com.linkedin.databus.core.util.ConfigLoader) SchemaRegistryStaticConfig(com.linkedin.databus2.schemas.SchemaRegistryStaticConfig) InvalidConfigException(com.linkedin.databus.core.util.InvalidConfigException) Method(java.lang.reflect.Method) InvalidConfigException(com.linkedin.databus.core.util.InvalidConfigException) IOException(java.io.IOException) ParseException(org.apache.commons.cli.ParseException) PhysicalSourceConfig(com.linkedin.databus2.relay.config.PhysicalSourceConfig) File(java.io.File)

Example 14 with SchemaRegistryService

use of com.linkedin.databus2.schemas.SchemaRegistryService in project databus by linkedin.

the class BootstrapTableReader method init.

public static void init(String[] args) throws Exception {
    parseArgs(args);
    BootstrapSeederMain.Config bsConf = new BootstrapSeederMain.Config();
    ConfigLoader<BootstrapSeederMain.StaticConfig> configLoader = new ConfigLoader<BootstrapSeederMain.StaticConfig>("databus.reader.", bsConf);
    _bsStaticConfig = configLoader.loadConfig(_sBootstrapConfigProps);
    Config qConf = new Config();
    ConfigLoader<StaticConfig> configLoader2 = new ConfigLoader<StaticConfig>("databus.query.", qConf);
    _queryStaticConfig = configLoader2.loadConfig(_sQueryConfigProps);
    SchemaRegistryService schemaRegistry = FileSystemSchemaRegistryService.build(_bsStaticConfig.getSchemaRegistry().getFileSystem());
    LOG.info("Schema = " + schemaRegistry.fetchLatestSchemaBySourceName(_queryStaticConfig.getSourceName()));
    _schema = Schema.parse(schemaRegistry.fetchLatestSchemaBySourceName(_queryStaticConfig.getSourceName()));
    VersionedSchema vs = new VersionedSchema(_schema.getFullName(), (short) 1, _schema, null);
    VersionedSchemaSet schemaSet = new VersionedSchemaSet();
    schemaSet.add(vs);
    _decoder = new DbusEventAvroDecoder(schemaSet);
}
Also used : ConfigLoader(com.linkedin.databus.core.util.ConfigLoader) DbusEventAvroDecoder(com.linkedin.databus.client.DbusEventAvroDecoder) FileSystemSchemaRegistryService(com.linkedin.databus2.schemas.FileSystemSchemaRegistryService) SchemaRegistryService(com.linkedin.databus2.schemas.SchemaRegistryService) VersionedSchemaSet(com.linkedin.databus2.schemas.VersionedSchemaSet) VersionedSchema(com.linkedin.databus2.schemas.VersionedSchema)

Example 15 with SchemaRegistryService

use of com.linkedin.databus2.schemas.SchemaRegistryService in project databus by linkedin.

the class TestGoldenGateEventProducer method testGGParserStats.

/**
   * test collection of parser stats, especially lag between parsed and added files
   * @throws Exception
   */
@Test
public void testGGParserStats() throws Exception {
    short[] sourceIds = new short[] { 505, 506 };
    String[] sourceNames = new String[] { "source1", "source2" };
    // setup trail Files directory
    File ggTrailDir = FileUtils.createTempDir("testGGParserStats");
    // configure physical source
    String uri = "gg://" + ggTrailDir.getAbsolutePath() + ":x3";
    PhysicalSourceStaticConfig pssc = buildSimplePssc(sourceIds, sourceNames, uri);
    LOG.info("Uri=" + uri);
    // create schema
    Schema s = Schema.parse(sourceAvroSchema);
    VersionedSchema vs = new VersionedSchema(new VersionedSchemaId("source1", (short) 3), s, null);
    // mock for schema registry
    SchemaRegistryService srs = EasyMock.createMock(SchemaRegistryService.class);
    EasyMock.expect(srs.fetchLatestVersionedSchemaBySourceName("source1")).andReturn(vs).anyTimes();
    EasyMock.expect(srs.fetchLatestVersionedSchemaBySourceName("source2")).andReturn(vs).anyTimes();
    EasyMock.expect(srs.fetchLatestVersionedSchemaBySourceName(null)).andReturn(vs);
    // mock for MaxSCNReadWriter
    MaxSCNReaderWriter mscn = EasyMock.createMock(MaxSCNReaderWriter.class);
    EasyMock.expect(mscn.getMaxScn()).andReturn((long) -2).atLeastOnce();
    mscn.saveMaxScn(EasyMock.anyLong());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(mscn);
    EasyMock.replay(srs);
    int totalTransWritten = 0;
    int totalFilesWritten = 0;
    // buffer
    DbusEventBufferAppendable mb = createBufMult(pssc);
    // start GG producer
    GoldenGateEventProducer gg = new GoldenGateEventProducer(pssc, srs, mb, null, mscn);
    // create first 2 files
    addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x301"), 100, 4);
    addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x302"), 200, 4);
    totalTransWritten = 8;
    totalFilesWritten = 2;
    // get hold of parser stats object
    final GGParserStatistics ggParserStats = gg.getParserStats();
    // all should be 0
    Assert.assertEquals(0, ggParserStats.getNumFilesParsed());
    Assert.assertEquals(0, ggParserStats.getNumFilesAdded());
    Assert.assertEquals(0, ggParserStats.getFilesLag());
    Assert.assertEquals(0, ggParserStats.getTimeLag());
    Assert.assertEquals(0, ggParserStats.getBytesLag());
    try {
        LOG.info("starting event producer");
        // -2 here does nothing. actual setting happens thru the mock of
        gg.start(-2);
        // MaxSCNReadWriter
        // let it parse first files
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                return ggParserStats.getNumFilesParsed() == 2 && (8 * _transactionPatternSize == ggParserStats.getNumBytesTotalParsed());
            }
        }, "First two files parsed", 2000, LOG);
        // stats in the interim
        Assert.assertEquals(2, ggParserStats.getNumFilesParsed());
        Assert.assertEquals(2, ggParserStats.getNumFilesAdded());
        Assert.assertEquals(0, ggParserStats.getFilesLag());
        Assert.assertEquals(0, ggParserStats.getTimeLag());
        Assert.assertEquals(0, ggParserStats.getBytesLag());
        Assert.assertEquals(totalTransWritten * _transactionPatternSize, ggParserStats.getNumBytesTotalParsed());
        gg.pause();
        // the file will get parsed but not processed
        addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x303"), 300, 4);
        totalTransWritten += 4;
        totalFilesWritten++;
        // to get more then a ms lag time
        TestUtil.sleep(2000);
        addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x304"), 400, 4);
        totalTransWritten += 4;
        totalFilesWritten++;
        // to guarantee we picked up stats update (stats are updated
        TestUtil.sleep(6000);
        // every 5 seconds)
        // now we should be 2 files behind. parser thread gets paused AFTER it start
        // processing the file
        // so the actuall value will be 1 file behind
        // 303(already started being parsed), only 304 is behind
        int lagFiles = 1;
        // 1 file, 4 transactions each
        long lagBytes = 1 * 4 * _transactionPatternSize;
        /*
       * Assert.assertEquals(totalFilesWritten-1, ggParserStats.getNumFilesParsed());
       * Assert.assertEquals(totalFilesWritten, ggParserStats.getNumFilesAdded());
       * Assert.assertEquals(lagFiles, ggParserStats.getFilesLag()); // because 303 got
       * parsed
       *
       * // we added 4 files and parsed 3 , so the diff should be 1 file size (4
       * trasactions in 1 file) Assert.assertEquals(lagBytes,
       * ggParserStats.getBytesLag()); Assert.assertTrue(ggParserStats.getTimeLag()>0);
       */
        gg.unpause();
        TestUtil.sleep(5000);
        // now we should catchup
        Assert.assertEquals(4, ggParserStats.getNumFilesParsed());
        Assert.assertEquals(4, ggParserStats.getNumFilesAdded());
        Assert.assertEquals(0, ggParserStats.getFilesLag());
        Assert.assertEquals(0, ggParserStats.getTimeLag());
        Assert.assertEquals(0, ggParserStats.getBytesLag());
        // append to a file
        LOG.info("pausing again");
        gg.pause();
        addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x304"), 410, 4);
        totalTransWritten += 4;
        TestUtil.sleep(1000);
        addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x304"), 420, 4);
        totalTransWritten += 4;
        TestUtil.sleep(2000);
        gg.unpause();
        TestUtil.sleep(5500);
        // should be still up
        Assert.assertEquals(4, ggParserStats.getNumFilesParsed());
        Assert.assertEquals(4, ggParserStats.getNumFilesAdded());
        Assert.assertEquals(0, ggParserStats.getFilesLag());
        Assert.assertEquals(0, ggParserStats.getTimeLag());
        Assert.assertEquals(0, ggParserStats.getBytesLag());
        // assert the stats
        int totalFilesSize = totalTransWritten * _transactionPatternSize;
        Assert.assertEquals((totalFilesSize / totalFilesWritten), ggParserStats.getAvgFileSize());
        Assert.assertEquals(true, ggParserStats.getAvgParseTransactionTimeNs() > 0);
        Assert.assertEquals("part1", ggParserStats.getPhysicalSourceName());
        Assert.assertEquals(totalFilesSize / totalTransWritten, ggParserStats.getAvgTransactionSize());
        Assert.assertEquals(423, ggParserStats.getMaxScn());
        // 2
        Assert.assertEquals(totalTransWritten * 2, ggParserStats.getNumTotalEvents());
        // events
        // per
        // transaction
        Assert.assertEquals(totalTransWritten, ggParserStats.getNumTransactionsTotal());
        Assert.assertEquals(totalTransWritten, ggParserStats.getNumTransactionsWithEvents());
        Assert.assertEquals(0, ggParserStats.getNumTransactionsWithoutEvents());
        Assert.assertEquals(true, ggParserStats.getTimeSinceLastAccessMs() > 0);
        Assert.assertEquals(totalTransWritten * _transactionPatternSize, ggParserStats.getNumBytesTotalParsed());
        Assert.assertEquals("NumSCNRegressions", 0, ggParserStats.getNumSCNRegressions());
        Assert.assertEquals("LastSCNRegressed", -1, ggParserStats.getLastRegressedScn());
    } finally {
        gg.shutdown();
    }
    return;
}
Also used : ConditionCheck(com.linkedin.databus2.test.ConditionCheck) PhysicalSourceStaticConfig(com.linkedin.databus2.relay.config.PhysicalSourceStaticConfig) MaxSCNReaderWriter(com.linkedin.databus2.core.seq.MaxSCNReaderWriter) GGParserStatistics(com.linkedin.databus.monitoring.mbean.GGParserStatistics) VersionedSchemaId(com.linkedin.databus2.schemas.VersionedSchemaId) Schema(org.apache.avro.Schema) VersionedSchema(com.linkedin.databus2.schemas.VersionedSchema) SchemaRegistryService(com.linkedin.databus2.schemas.SchemaRegistryService) DbusEventBufferAppendable(com.linkedin.databus.core.DbusEventBufferAppendable) VersionedSchema(com.linkedin.databus2.schemas.VersionedSchema) File(java.io.File) Test(org.testng.annotations.Test)

Aggregations

SchemaRegistryService (com.linkedin.databus2.schemas.SchemaRegistryService)11 DatabusException (com.linkedin.databus2.core.DatabusException)9 InvalidConfigException (com.linkedin.databus.core.util.InvalidConfigException)7 LogicalSource (com.linkedin.databus.core.data_model.LogicalSource)5 ChunkedWritableByteChannel (com.linkedin.databus2.core.container.ChunkedWritableByteChannel)5 RegisterResponseEntry (com.linkedin.databus2.core.container.request.RegisterResponseEntry)5 OracleTriggerMonitoredSourceInfo (com.linkedin.databus2.producers.db.OracleTriggerMonitoredSourceInfo)5 FileSystemSchemaRegistryService (com.linkedin.databus2.schemas.FileSystemSchemaRegistryService)5 VersionedSchema (com.linkedin.databus2.schemas.VersionedSchema)5 ArrayList (java.util.ArrayList)5 HashMap (java.util.HashMap)5 ObjectMapper (org.codehaus.jackson.map.ObjectMapper)5 RegisterRequestProcessor (com.linkedin.databus.container.request.RegisterRequestProcessor)4 DatabusRequest (com.linkedin.databus2.core.container.request.DatabusRequest)4 NoSuchSchemaException (com.linkedin.databus2.schemas.NoSuchSchemaException)4 SourceIdNameRegistry (com.linkedin.databus2.schemas.SourceIdNameRegistry)4 VersionedSchemaSet (com.linkedin.databus2.schemas.VersionedSchemaSet)4 IOException (java.io.IOException)4 ByteBuffer (java.nio.ByteBuffer)4 Charset (java.nio.charset.Charset)4