Search in sources :

Example 11 with Transaction

use of com.linkedin.databus2.producers.ds.Transaction in project databus by linkedin.

the class ORListener method endXtion.

/**
   * Per {@link http://code.google.com/p/open-replicator/source/browse/trunk/open-replicator/src/main/java/com/google/code/or/binlog/impl/event/XidEvent.java}
   * XidEvent signals a commit
   */
private void endXtion(AbstractBinlogEventV4 e) {
    _currTxnTimestamp = e.getHeader().getTimestamp() * 1000000L;
    long txnReadLatency = System.nanoTime() - _currTxnStartReadTimestamp;
    boolean em = ((e instanceof QueryEvent) || (e instanceof XidEvent));
    if (!em) {
        throw new DatabusRuntimeException("endXtion should be called with either QueryEvent of XidEvent");
    }
    _transaction.setSizeInBytes(_currTxnSizeInBytes);
    _transaction.setTxnNanoTimestamp(_currTxnTimestamp);
    _transaction.setTxnReadLatencyNanos(txnReadLatency);
    if (_ignoreSource) {
        long scn = scn(_currFileNum, (int) e.getHeader().getPosition());
        _transaction.setIgnoredSourceScn(scn);
    }
    try {
        _txnProcessor.onEndTransaction(_transaction);
    } catch (DatabusException e3) {
        _log.error("Got exception in the transaction handler ", e3);
        throw new DatabusRuntimeException(e3);
    } finally {
        reset();
        if (_log.isDebugEnabled()) {
            _log.debug("endXtion" + e);
        }
    }
}
Also used : DatabusException(com.linkedin.databus2.core.DatabusException) XidEvent(com.google.code.or.binlog.impl.event.XidEvent) QueryEvent(com.google.code.or.binlog.impl.event.QueryEvent) DatabusRuntimeException(com.linkedin.databus.core.DatabusRuntimeException)

Example 12 with Transaction

use of com.linkedin.databus2.producers.ds.Transaction in project databus by linkedin.

the class TestGoldenGateEventProducer method testGGParserStats.

/**
   * test collection of parser stats, especially lag between parsed and added files
   * @throws Exception
   */
@Test
public void testGGParserStats() throws Exception {
    short[] sourceIds = new short[] { 505, 506 };
    String[] sourceNames = new String[] { "source1", "source2" };
    // setup trail Files directory
    File ggTrailDir = FileUtils.createTempDir("testGGParserStats");
    // configure physical source
    String uri = "gg://" + ggTrailDir.getAbsolutePath() + ":x3";
    PhysicalSourceStaticConfig pssc = buildSimplePssc(sourceIds, sourceNames, uri);
    LOG.info("Uri=" + uri);
    // create schema
    Schema s = Schema.parse(sourceAvroSchema);
    VersionedSchema vs = new VersionedSchema(new VersionedSchemaId("source1", (short) 3), s, null);
    // mock for schema registry
    SchemaRegistryService srs = EasyMock.createMock(SchemaRegistryService.class);
    EasyMock.expect(srs.fetchLatestVersionedSchemaBySourceName("source1")).andReturn(vs).anyTimes();
    EasyMock.expect(srs.fetchLatestVersionedSchemaBySourceName("source2")).andReturn(vs).anyTimes();
    EasyMock.expect(srs.fetchLatestVersionedSchemaBySourceName(null)).andReturn(vs);
    // mock for MaxSCNReadWriter
    MaxSCNReaderWriter mscn = EasyMock.createMock(MaxSCNReaderWriter.class);
    EasyMock.expect(mscn.getMaxScn()).andReturn((long) -2).atLeastOnce();
    mscn.saveMaxScn(EasyMock.anyLong());
    EasyMock.expectLastCall().anyTimes();
    EasyMock.replay(mscn);
    EasyMock.replay(srs);
    int totalTransWritten = 0;
    int totalFilesWritten = 0;
    // buffer
    DbusEventBufferAppendable mb = createBufMult(pssc);
    // start GG producer
    GoldenGateEventProducer gg = new GoldenGateEventProducer(pssc, srs, mb, null, mscn);
    // create first 2 files
    addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x301"), 100, 4);
    addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x302"), 200, 4);
    totalTransWritten = 8;
    totalFilesWritten = 2;
    // get hold of parser stats object
    final GGParserStatistics ggParserStats = gg.getParserStats();
    // all should be 0
    Assert.assertEquals(0, ggParserStats.getNumFilesParsed());
    Assert.assertEquals(0, ggParserStats.getNumFilesAdded());
    Assert.assertEquals(0, ggParserStats.getFilesLag());
    Assert.assertEquals(0, ggParserStats.getTimeLag());
    Assert.assertEquals(0, ggParserStats.getBytesLag());
    try {
        LOG.info("starting event producer");
        // -2 here does nothing. actual setting happens thru the mock of
        gg.start(-2);
        // MaxSCNReadWriter
        // let it parse first files
        TestUtil.assertWithBackoff(new ConditionCheck() {

            @Override
            public boolean check() {
                return ggParserStats.getNumFilesParsed() == 2 && (8 * _transactionPatternSize == ggParserStats.getNumBytesTotalParsed());
            }
        }, "First two files parsed", 2000, LOG);
        // stats in the interim
        Assert.assertEquals(2, ggParserStats.getNumFilesParsed());
        Assert.assertEquals(2, ggParserStats.getNumFilesAdded());
        Assert.assertEquals(0, ggParserStats.getFilesLag());
        Assert.assertEquals(0, ggParserStats.getTimeLag());
        Assert.assertEquals(0, ggParserStats.getBytesLag());
        Assert.assertEquals(totalTransWritten * _transactionPatternSize, ggParserStats.getNumBytesTotalParsed());
        gg.pause();
        // the file will get parsed but not processed
        addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x303"), 300, 4);
        totalTransWritten += 4;
        totalFilesWritten++;
        // to get more then a ms lag time
        TestUtil.sleep(2000);
        addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x304"), 400, 4);
        totalTransWritten += 4;
        totalFilesWritten++;
        // to guarantee we picked up stats update (stats are updated
        TestUtil.sleep(6000);
        // every 5 seconds)
        // now we should be 2 files behind. parser thread gets paused AFTER it start
        // processing the file
        // so the actuall value will be 1 file behind
        // 303(already started being parsed), only 304 is behind
        int lagFiles = 1;
        // 1 file, 4 transactions each
        long lagBytes = 1 * 4 * _transactionPatternSize;
        /*
       * Assert.assertEquals(totalFilesWritten-1, ggParserStats.getNumFilesParsed());
       * Assert.assertEquals(totalFilesWritten, ggParserStats.getNumFilesAdded());
       * Assert.assertEquals(lagFiles, ggParserStats.getFilesLag()); // because 303 got
       * parsed
       *
       * // we added 4 files and parsed 3 , so the diff should be 1 file size (4
       * trasactions in 1 file) Assert.assertEquals(lagBytes,
       * ggParserStats.getBytesLag()); Assert.assertTrue(ggParserStats.getTimeLag()>0);
       */
        gg.unpause();
        TestUtil.sleep(5000);
        // now we should catchup
        Assert.assertEquals(4, ggParserStats.getNumFilesParsed());
        Assert.assertEquals(4, ggParserStats.getNumFilesAdded());
        Assert.assertEquals(0, ggParserStats.getFilesLag());
        Assert.assertEquals(0, ggParserStats.getTimeLag());
        Assert.assertEquals(0, ggParserStats.getBytesLag());
        // append to a file
        LOG.info("pausing again");
        gg.pause();
        addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x304"), 410, 4);
        totalTransWritten += 4;
        TestUtil.sleep(1000);
        addToTrailFile(new File(ggTrailDir.getAbsolutePath() + "/x304"), 420, 4);
        totalTransWritten += 4;
        TestUtil.sleep(2000);
        gg.unpause();
        TestUtil.sleep(5500);
        // should be still up
        Assert.assertEquals(4, ggParserStats.getNumFilesParsed());
        Assert.assertEquals(4, ggParserStats.getNumFilesAdded());
        Assert.assertEquals(0, ggParserStats.getFilesLag());
        Assert.assertEquals(0, ggParserStats.getTimeLag());
        Assert.assertEquals(0, ggParserStats.getBytesLag());
        // assert the stats
        int totalFilesSize = totalTransWritten * _transactionPatternSize;
        Assert.assertEquals((totalFilesSize / totalFilesWritten), ggParserStats.getAvgFileSize());
        Assert.assertEquals(true, ggParserStats.getAvgParseTransactionTimeNs() > 0);
        Assert.assertEquals("part1", ggParserStats.getPhysicalSourceName());
        Assert.assertEquals(totalFilesSize / totalTransWritten, ggParserStats.getAvgTransactionSize());
        Assert.assertEquals(423, ggParserStats.getMaxScn());
        // 2
        Assert.assertEquals(totalTransWritten * 2, ggParserStats.getNumTotalEvents());
        // events
        // per
        // transaction
        Assert.assertEquals(totalTransWritten, ggParserStats.getNumTransactionsTotal());
        Assert.assertEquals(totalTransWritten, ggParserStats.getNumTransactionsWithEvents());
        Assert.assertEquals(0, ggParserStats.getNumTransactionsWithoutEvents());
        Assert.assertEquals(true, ggParserStats.getTimeSinceLastAccessMs() > 0);
        Assert.assertEquals(totalTransWritten * _transactionPatternSize, ggParserStats.getNumBytesTotalParsed());
        Assert.assertEquals("NumSCNRegressions", 0, ggParserStats.getNumSCNRegressions());
        Assert.assertEquals("LastSCNRegressed", -1, ggParserStats.getLastRegressedScn());
    } finally {
        gg.shutdown();
    }
    return;
}
Also used : ConditionCheck(com.linkedin.databus2.test.ConditionCheck) PhysicalSourceStaticConfig(com.linkedin.databus2.relay.config.PhysicalSourceStaticConfig) MaxSCNReaderWriter(com.linkedin.databus2.core.seq.MaxSCNReaderWriter) GGParserStatistics(com.linkedin.databus.monitoring.mbean.GGParserStatistics) VersionedSchemaId(com.linkedin.databus2.schemas.VersionedSchemaId) Schema(org.apache.avro.Schema) VersionedSchema(com.linkedin.databus2.schemas.VersionedSchema) SchemaRegistryService(com.linkedin.databus2.schemas.SchemaRegistryService) DbusEventBufferAppendable(com.linkedin.databus.core.DbusEventBufferAppendable) VersionedSchema(com.linkedin.databus2.schemas.VersionedSchema) File(java.io.File) Test(org.testng.annotations.Test)

Example 13 with Transaction

use of com.linkedin.databus2.producers.ds.Transaction in project databus by linkedin.

the class TestGoldenGateEventProducer method testAddEventToBufferRateControl.

private void testAddEventToBufferRateControl(long throttleDurationInSecs) throws InvalidConfigException, UnsupportedKeyException, DatabusException, NoSuchFieldException, IllegalAccessException {
    // 1 event per second required. Send 5 events. Must have 4 sleeps.
    long rate = 1;
    int numEvents = 5;
    PhysicalSourceStaticConfig pssc = buildPssc(rate, throttleDurationInSecs);
    long scn = 10;
    DbusEventBufferAppendable mb = createBufMult(pssc);
    GoldenGateEventProducer gg = new GoldenGateEventProducer(pssc, null, mb, null, null);
    // enable if want to run with mocked timer
    // run_with_mock_timer(gg);
    int sourceId = 505;
    HashSet<DBUpdateImage> db = new HashSet<DBUpdateImage>();
    // name1 is the only key
    ColumnsState.KeyPair kp1 = new ColumnsState.KeyPair(new String("name1"), Schema.Type.RECORD);
    ArrayList<ColumnsState.KeyPair> keyPairs = new ArrayList<ColumnsState.KeyPair>(numEvents);
    keyPairs.add(kp1);
    Schema s = Schema.parse(avroSchema2);
    GenericRecord gr1 = new GenericData.Record(s);
    gr1.put("name1", "phani1");
    gr1.put("name2", "boris1");
    GenericRecord gr2 = new GenericData.Record(s);
    gr2.put("name1", "phani2");
    gr2.put("name2", "boris2");
    GenericRecord gr3 = new GenericData.Record(s);
    gr3.put("name1", "phani3");
    gr3.put("name2", "boris3");
    GenericRecord gr4 = new GenericData.Record(s);
    gr4.put("name1", "phani4");
    gr4.put("name2", "boris4");
    GenericRecord gr5 = new GenericData.Record(s);
    gr5.put("name1", "phani5");
    gr5.put("name2", "boris5");
    DBUpdateImage dbi1 = new DBUpdateImage(keyPairs, scn, gr1, s, DbUpdateState.DBUpdateImage.OpType.INSERT, false);
    DBUpdateImage dbi2 = new DBUpdateImage(keyPairs, scn, gr2, s, DbUpdateState.DBUpdateImage.OpType.INSERT, false);
    DBUpdateImage dbi3 = new DBUpdateImage(keyPairs, scn, gr3, s, DbUpdateState.DBUpdateImage.OpType.INSERT, false);
    DBUpdateImage dbi4 = new DBUpdateImage(keyPairs, scn, gr4, s, DbUpdateState.DBUpdateImage.OpType.INSERT, false);
    DBUpdateImage dbi5 = new DBUpdateImage(keyPairs, scn, gr5, s, DbUpdateState.DBUpdateImage.OpType.INSERT, false);
    db.add(dbi1);
    db.add(dbi2);
    db.add(dbi3);
    db.add(dbi4);
    db.add(dbi5);
    // For a given transaction, and logical source : only 1 update ( the last one succeeds )
    Assert.assertEquals(1, db.size());
    // Generate 5 transactions with the same update
    for (int i = 0; i < numEvents; i++) {
        List<TransactionState.PerSourceTransactionalUpdate> dbUpdates = new ArrayList<TransactionState.PerSourceTransactionalUpdate>(10);
        TransactionState.PerSourceTransactionalUpdate dbUpdate = new TransactionState.PerSourceTransactionalUpdate(sourceId, db);
        dbUpdates.add(dbUpdate);
        long timestamp = 60;
        gg.addEventToBuffer(dbUpdates, new TransactionInfo(0, 0, timestamp, scn));
        scn++;
    }
    // It may not sleep the very first time as 1 second may have elapsed from when the rate control got started to when event in
    // getting inserted. Subsequently, expect rate control to kick in
    long numSleeps = Math.min(numEvents, throttleDurationInSecs);
    Assert.assertEquals(gg.getRateControl().getNumSleeps(), numSleeps);
    gg.getRateControl().resetNumSleeps();
    return;
}
Also used : PhysicalSourceStaticConfig(com.linkedin.databus2.relay.config.PhysicalSourceStaticConfig) TransactionState(com.linkedin.databus2.ggParser.XmlStateMachine.TransactionState) DbusEventBufferAppendable(com.linkedin.databus.core.DbusEventBufferAppendable) Schema(org.apache.avro.Schema) VersionedSchema(com.linkedin.databus2.schemas.VersionedSchema) DBUpdateImage(com.linkedin.databus2.ggParser.XmlStateMachine.DbUpdateState.DBUpdateImage) ColumnsState(com.linkedin.databus2.ggParser.XmlStateMachine.ColumnsState) ArrayList(java.util.ArrayList) TransactionInfo(com.linkedin.databus.monitoring.mbean.GGParserStatistics.TransactionInfo) GenericRecord(org.apache.avro.generic.GenericRecord) GenericRecord(org.apache.avro.generic.GenericRecord) HashSet(java.util.HashSet)

Example 14 with Transaction

use of com.linkedin.databus2.producers.ds.Transaction in project databus by linkedin.

the class TestTrailFilePositionSetter method testFailureMode_MalformedFirstTransactionInFirstFile.

/**
   * Verify that corruption that occurs early in the file causes USE_EARLIEST_SCN to return the
   * first SCN of the first uncorrupted transaction.
   */
@Test
public void testFailureMode_MalformedFirstTransactionInFirstFile() throws Exception {
    final Logger log = Logger.getLogger("TestTrailFilePositionSetter.testFailureMode_MalformedFirstTransactionInFirstFile");
    log.info("starting");
    File dir = createTempDir();
    // first SCN is hardcoded always to be 100
    createTrailFiles(dir.getAbsolutePath(), TRAIL_FILENAME_PREFIX, 150, /* numTxns, 24 lines each */
    1250, /* numLinesPerFile */
    1, /* numLinesPerNewline */
    "\n", 0, 100, /* corrupt first SCN */
    "xyzzy", false, "");
    TrailFilePositionSetter posSetter = new TrailFilePositionSetter(dir.getAbsolutePath(), TRAIL_FILENAME_PREFIX);
    GGXMLTrailTransactionFinder finder = new GGXMLTrailTransactionFinder();
    FilePositionResult res;
    // SCN 100 is corrupted, so 101 is the effective oldest SCN => 100 treated as error:
    res = posSetter.locateFilePosition(100, finder);
    Assert.assertEquals(res.getStatus(), FilePositionResult.Status.ERROR, "expected error for exact-match SCN that's corrupted and oldest in all trail files.");
    // SCN 101 is OK (regexQuery() doesn't fully validate XML):
    finder.reset();
    res = posSetter.locateFilePosition(TrailFilePositionSetter.USE_EARLIEST_SCN, finder);
    assertFilePositionResult(res, dir, 101, FilePositionResult.Status.FOUND);
    log.info(DONE_STRING);
}
Also used : GGXMLTrailTransactionFinder(com.linkedin.databus2.producers.db.GGXMLTrailTransactionFinder) Logger(org.apache.log4j.Logger) File(java.io.File) FilePositionResult(com.linkedin.databus.core.TrailFilePositionSetter.FilePositionResult) Test(org.testng.annotations.Test)

Example 15 with Transaction

use of com.linkedin.databus2.producers.ds.Transaction in project databus by linkedin.

the class TestTrailFilePositionSetter method testFailureMode_MalformedTransactionNoValidScnsInMiddleOfMiddleFile.

/**
   * Verify that a transaction with corruption in all of its SCNs doesn't cause problems.
   * Case 1:  bad transaction at the beginning of the first trail file.
   * Case 2:  bad transaction at the beginning of the middle trail file (partial transaction).
   * Case 3:  bad transaction at the beginning of the middle trail file (first full transaction; preceding partial bad).
   * Case 4:  bad transaction in the middle of the middle trail file.
   * Case 5:  bad transaction at the beginning of the last trail file (first full transaction; preceding partial is OK).
   * Case 6:  bad transaction at the end of the last trail file.
   */
@Test
public void testFailureMode_MalformedTransactionNoValidScnsInMiddleOfMiddleFile() throws Exception {
    final Logger log = Logger.getLogger("TestTrailFilePositionSetter.testFailureMode_MalformedTransactionNoValidScns");
    log.info("starting");
    File dir = createTempDir();
    // corrupt both SCNs in each of six transactions:
    HashSet<Long> corruptedScns = new HashSet<Long>(10);
    // case 1
    corruptedScns.add(new Long(100));
    // case 1
    corruptedScns.add(new Long(101));
    // case 2
    corruptedScns.add(new Long(204));
    // case 2
    corruptedScns.add(new Long(205));
    // case 3
    corruptedScns.add(new Long(206));
    // case 3
    corruptedScns.add(new Long(207));
    // case 4
    corruptedScns.add(new Long(250));
    // case 4
    corruptedScns.add(new Long(251));
    // case 5
    corruptedScns.add(new Long(310));
    // case 5
    corruptedScns.add(new Long(311));
    // case 6
    corruptedScns.add(new Long(398));
    // case 6
    corruptedScns.add(new Long(399));
    createTrailFiles(dir.getAbsolutePath(), TRAIL_FILENAME_PREFIX, 150, /* numTxns, 24 lines each */
    1250, /* numLinesPerFile */
    1, /* numLinesPerNewline */
    "\n", 0, corruptedScns, "blargh", false, "");
    TrailFilePositionSetter posSetter = new TrailFilePositionSetter(dir.getAbsolutePath(), TRAIL_FILENAME_PREFIX);
    GGXMLTrailTransactionFinder finder = new GGXMLTrailTransactionFinder();
    FilePositionResult res;
    // SCN 101 is before the earliest (valid) SCN present, so expect ERROR:
    res = posSetter.locateFilePosition(101, finder);
    Assert.assertEquals(res.getStatus(), FilePositionResult.Status.ERROR, "expected error for exact-match SCN that's 'too old'.");
    // For SCN <= the earliest transactions maxSCN, we throw error
    finder.reset();
    res = posSetter.locateFilePosition(102, finder);
    Assert.assertEquals(res.getStatus(), FilePositionResult.Status.ERROR, "expected error for exact-match SCN that's 'too old'.");
    // expect first non-corrupted SCN here, not first "transaction SCN":
    finder.reset();
    res = posSetter.locateFilePosition(TrailFilePositionSetter.USE_EARLIEST_SCN, finder);
    assertFilePositionResult(res, dir, 102, FilePositionResult.Status.FOUND);
    // 107 = max SCN of its transaction = "transaction SCN" => should get FOUND
    finder.reset();
    res = posSetter.locateFilePosition(107, finder);
    assertFilePositionResult(res, dir, 107, FilePositionResult.Status.FOUND);
    // 203 = last valid SCN in first file = max SCN of its transaction = "transaction SCN"
    // => should be FOUND
    finder.reset();
    res = posSetter.locateFilePosition(203, finder);
    assertFilePositionResult(res, dir, 203, FilePositionResult.Status.FOUND);
    // SCN 204 is invalid and is part of a transaction split across first/second files;
    // 209 = next "transaction SCN" and is near the top of the middle file
    finder.reset();
    res = posSetter.locateFilePosition(204, finder);
    assertFilePositionResult(res, dir, 209, FilePositionResult.Status.EXACT_SCN_NOT_FOUND);
    // SCN 250 is invalid (as is 251); expect 253 since max SCN of following transaction
    finder.reset();
    res = posSetter.locateFilePosition(250, finder);
    assertFilePositionResult(res, dir, 253, FilePositionResult.Status.EXACT_SCN_NOT_FOUND);
    // SCN 251 is invalid (as is 250); expect 253 since max SCN of following transaction
    finder.reset();
    res = posSetter.locateFilePosition(251, finder);
    assertFilePositionResult(res, dir, 253, FilePositionResult.Status.EXACT_SCN_NOT_FOUND);
    // SCN 252 is valid and present, but weird corner case => still EXACT_SCN_NOT_FOUND
    finder.reset();
    res = posSetter.locateFilePosition(252, finder);
    assertFilePositionResult(res, dir, 252, FilePositionResult.Status.EXACT_SCN_NOT_FOUND);
    // SCN 253 is valid and present and max SCN of its transaction => FOUND
    finder.reset();
    res = posSetter.locateFilePosition(253, finder);
    assertFilePositionResult(res, dir, 253, FilePositionResult.Status.FOUND);
    // SCN 309 is valid and present and max SCN of its transaction => FOUND (even though
    // split across second/third files, and following transaction is corrupted)
    finder.reset();
    res = posSetter.locateFilePosition(309, finder);
    assertFilePositionResult(res, dir, 309, FilePositionResult.Status.FOUND);
    // SCN 310 is invalid (as is 311); expect 313
    finder.reset();
    res = posSetter.locateFilePosition(310, finder);
    assertFilePositionResult(res, dir, 313, FilePositionResult.Status.EXACT_SCN_NOT_FOUND);
    // SCN 311 is invalid (as is 310); expect 313
    finder.reset();
    res = posSetter.locateFilePosition(311, finder);
    assertFilePositionResult(res, dir, 313, FilePositionResult.Status.EXACT_SCN_NOT_FOUND);
    // SCN 398 is invalid (as is 399) and is in last transaction of last file, but since
    // trail file is expected to continue growing (i.e., eventually to have a valid SCN
    // that's larger than the request), expect EXACT_SCN_NOT_FOUND rather than ERROR.  SCN
    // returned will be that of last valid transaction, i.e., 397.
    // [checks beginning of last valid transaction == 396/397 one at byte offset 35650]
    finder.reset();
    res = posSetter.locateFilePosition(398, finder);
    assertFilePositionResult(res, dir, 397, FilePositionResult.Status.EXACT_SCN_NOT_FOUND);
    // SCN 405 is completely missing (would be after last transaction of last file); expect
    // same behavior as previous case
    finder.reset();
    res = posSetter.locateFilePosition(405, finder);
    assertFilePositionResult(res, dir, 397, FilePositionResult.Status.EXACT_SCN_NOT_FOUND);
    // last valid transaction-SCN is 397
    finder.reset();
    res = posSetter.locateFilePosition(TrailFilePositionSetter.USE_LATEST_SCN, finder);
    assertFilePositionResult(res, dir, 397, FilePositionResult.Status.FOUND);
    log.info(DONE_STRING);
}
Also used : GGXMLTrailTransactionFinder(com.linkedin.databus2.producers.db.GGXMLTrailTransactionFinder) Logger(org.apache.log4j.Logger) File(java.io.File) FilePositionResult(com.linkedin.databus.core.TrailFilePositionSetter.FilePositionResult) HashSet(java.util.HashSet) Test(org.testng.annotations.Test)

Aggregations

DatabusException (com.linkedin.databus2.core.DatabusException)10 File (java.io.File)9 Test (org.testng.annotations.Test)8 FilePositionResult (com.linkedin.databus.core.TrailFilePositionSetter.FilePositionResult)6 GGXMLTrailTransactionFinder (com.linkedin.databus2.producers.db.GGXMLTrailTransactionFinder)6 Logger (org.apache.log4j.Logger)6 ArrayList (java.util.ArrayList)5 DatabusRuntimeException (com.linkedin.databus.core.DatabusRuntimeException)3 HashSet (java.util.HashSet)3 Schema (org.apache.avro.Schema)3 QueryEvent (com.google.code.or.binlog.impl.event.QueryEvent)2 XidEvent (com.google.code.or.binlog.impl.event.XidEvent)2 DbusEventBufferAppendable (com.linkedin.databus.core.DbusEventBufferAppendable)2 TransactionInfo (com.linkedin.databus.monitoring.mbean.GGParserStatistics.TransactionInfo)2 TransactionState (com.linkedin.databus2.ggParser.XmlStateMachine.TransactionState)2 PhysicalSourceStaticConfig (com.linkedin.databus2.relay.config.PhysicalSourceStaticConfig)2 VersionedSchema (com.linkedin.databus2.schemas.VersionedSchema)2 GenericRecord (org.apache.avro.generic.GenericRecord)2 BinlogEventV4 (com.google.code.or.binlog.BinlogEventV4)1 AbstractBinlogEventV4 (com.google.code.or.binlog.impl.event.AbstractBinlogEventV4)1