Search in sources :

Example 1 with DefaultDatasetManager

use of co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager in project cdap by caskdata.

the class LogFileManagerTest method testLogFileManager.

@Test
public void testLogFileManager() throws Exception {
    int syncInterval = 1024 * 1024;
    long maxLifeTimeMs = 50;
    long maxFileSizeInBytes = 104857600;
    DatasetManager datasetManager = new DefaultDatasetManager(injector.getInstance(DatasetFramework.class), NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry());
    Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(injector.getInstance(DatasetFramework.class)), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
    FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
    LogFileManager logFileManager = new LogFileManager("700", "600", maxLifeTimeMs, maxFileSizeInBytes, syncInterval, fileMetaDataWriter, injector.getInstance(LocationFactory.class));
    LogPathIdentifier logPathIdentifier = new LogPathIdentifier("test", "testApp", "testFlow");
    long timestamp = System.currentTimeMillis();
    LogFileOutputStream outputStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
    LoggingEvent event1 = getLoggingEvent("co.cask.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1");
    outputStream.append(event1);
    // we are doing this, instead of calling getLogFileOutputStream to avoid race, if test machine can be slow.
    Assert.assertNotNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
    TimeUnit.MILLISECONDS.sleep(60);
    logFileManager.flush();
    // should be closed on flush, should return null
    Assert.assertNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
    LogFileOutputStream newLogOutStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
    // make sure the new location we got is different
    Assert.assertNotEquals(outputStream.getLocation(), newLogOutStream.getLocation());
}
Also used : FileMetaDataWriter(co.cask.cdap.logging.meta.FileMetaDataWriter) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) DatasetManager(co.cask.cdap.api.dataset.DatasetManager) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) LoggingEvent(ch.qos.logback.classic.spi.LoggingEvent) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) MultiThreadDatasetCache(co.cask.cdap.data2.dataset2.MultiThreadDatasetCache) SystemDatasetInstantiator(co.cask.cdap.data.dataset.SystemDatasetInstantiator) Transactional(co.cask.cdap.api.Transactional) Test(org.junit.Test)

Example 2 with DefaultDatasetManager

use of co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager in project cdap by caskdata.

the class FileMetadataTest method testFileMetadataReadWrite.

@Test
public void testFileMetadataReadWrite() throws Exception {
    DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
    DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry());
    Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
    FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
    LogPathIdentifier logPathIdentifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
    LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
    Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
    long currentTime = System.currentTimeMillis();
    for (int i = 10; i <= 100; i += 10) {
        // i is the event time
        fileMetaDataWriter.writeMetaData(logPathIdentifier, i, currentTime, location.append(Integer.toString(i)));
    }
    // for the timestamp 80, add new new log path id with different current time.
    fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 1, location.append("81"));
    fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 2, location.append("82"));
    // reader test
    FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
    Assert.assertEquals(12, fileMetadataReader.listFiles(logPathIdentifier, 0, 100).size());
    Assert.assertEquals(5, fileMetadataReader.listFiles(logPathIdentifier, 20, 50).size());
    Assert.assertEquals(2, fileMetadataReader.listFiles(logPathIdentifier, 100, 150).size());
    // should include the latest file with event start time 80.
    List<LogLocation> locationList = fileMetadataReader.listFiles(logPathIdentifier, 81, 85);
    Assert.assertEquals(1, locationList.size());
    Assert.assertEquals(80, locationList.get(0).getEventTimeMs());
    Assert.assertEquals(location.append("82"), locationList.get(0).getLocation());
    Assert.assertEquals(1, fileMetadataReader.listFiles(logPathIdentifier, 150, 1000).size());
}
Also used : FileMetaDataWriter(co.cask.cdap.logging.meta.FileMetaDataWriter) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) DatasetManager(co.cask.cdap.api.dataset.DatasetManager) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) MultiThreadDatasetCache(co.cask.cdap.data2.dataset2.MultiThreadDatasetCache) SystemDatasetInstantiator(co.cask.cdap.data.dataset.SystemDatasetInstantiator) LogLocation(co.cask.cdap.logging.write.LogLocation) LogPathIdentifier(co.cask.cdap.logging.appender.system.LogPathIdentifier) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) Transactional(co.cask.cdap.api.Transactional) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation) Test(org.junit.Test)

Example 3 with DefaultDatasetManager

use of co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager in project cdap by caskdata.

the class FileMetadataTest method testFileMetadataReadWriteAcrossFormats.

@Test
public void testFileMetadataReadWriteAcrossFormats() throws Exception {
    DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
    DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry());
    Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
    FileMetaDataManager fileMetaDataManager = injector.getInstance(FileMetaDataManager.class);
    FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
    LogPathIdentifier logPathIdentifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
    LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
    Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
    long currentTime = System.currentTimeMillis();
    LoggingContext loggingContext = LoggingContextHelper.getLoggingContext(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
    // 10 files in old format
    for (int i = 1; i <= 10; i++) {
        // i is the event time
        fileMetaDataManager.writeMetaData(loggingContext, currentTime + i, location.append("testFile" + Integer.toString(i)));
    }
    long eventTime = currentTime + 20;
    long newCurrentTime = currentTime + 100;
    // 10 files in new format
    for (int i = 1; i <= 10; i++) {
        fileMetaDataWriter.writeMetaData(logPathIdentifier, eventTime + i, newCurrentTime + i, location.append("testFileNew" + Integer.toString(i)));
    }
    // reader test
    FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
    // scan only in old files time range
    List<LogLocation> locations = fileMetadataReader.listFiles(logPathIdentifier, currentTime + 2, currentTime + 6);
    // should include files from currentTime (1..6)
    Assert.assertEquals(6, locations.size());
    for (LogLocation logLocation : locations) {
        Assert.assertEquals(LogLocation.VERSION_0, logLocation.getFrameworkVersion());
    }
    // scan only in new files time range
    locations = fileMetadataReader.listFiles(logPathIdentifier, eventTime + 2, eventTime + 6);
    // should include files from currentTime (1..6)
    Assert.assertEquals(6, locations.size());
    for (LogLocation logLocation : locations) {
        Assert.assertEquals(LogLocation.VERSION_1, logLocation.getFrameworkVersion());
    }
    // scan time range across formats
    locations = fileMetadataReader.listFiles(logPathIdentifier, currentTime + 2, eventTime + 6);
    // should include files from old range (1..10) and new range (1..6)
    Assert.assertEquals(16, locations.size());
    for (int i = 0; i < locations.size(); i++) {
        if (i < 10) {
            Assert.assertEquals(LogLocation.VERSION_0, locations.get(i).getFrameworkVersion());
            Assert.assertEquals(location.append("testFile" + Integer.toString(i + 1)), locations.get(i).getLocation());
        } else {
            Assert.assertEquals(LogLocation.VERSION_1, locations.get(i).getFrameworkVersion());
            Assert.assertEquals(location.append("testFileNew" + Integer.toString(i - 9)), locations.get(i).getLocation());
        }
    }
}
Also used : FileMetaDataWriter(co.cask.cdap.logging.meta.FileMetaDataWriter) LoggingContext(co.cask.cdap.common.logging.LoggingContext) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) DatasetManager(co.cask.cdap.api.dataset.DatasetManager) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) MultiThreadDatasetCache(co.cask.cdap.data2.dataset2.MultiThreadDatasetCache) SystemDatasetInstantiator(co.cask.cdap.data.dataset.SystemDatasetInstantiator) LogLocation(co.cask.cdap.logging.write.LogLocation) FileMetaDataManager(co.cask.cdap.logging.write.FileMetaDataManager) LogPathIdentifier(co.cask.cdap.logging.appender.system.LogPathIdentifier) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) Transactional(co.cask.cdap.api.Transactional) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation) Test(org.junit.Test)

Example 4 with DefaultDatasetManager

use of co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager in project cdap by caskdata.

the class FileMetadataCleanerTest method testScanAndDeleteNewMetadata.

@Test
public void testScanAndDeleteNewMetadata() throws Exception {
    // use file meta data manager to write meta data in old format
    // use file meta writer to write meta data in new format
    // scan for old files and make sure we only get the old meta data entries.
    DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
    DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry());
    Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
    FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
    FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(datasetManager, transactional);
    try {
        long currentTime = System.currentTimeMillis();
        long eventTimestamp = currentTime - 100;
        LogPathIdentifier logPathIdentifier = new LogPathIdentifier("testNs2", "testApp", "testFlow");
        LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
        List<String> expected = new ArrayList<>();
        for (int i = 0; i < 100; i++) {
            Location location = locationFactory.create("testFlowFile" + i);
            // values : event time is 100ms behind current timestamp
            fileMetaDataWriter.writeMetaData(logPathIdentifier, eventTimestamp + i, currentTime + i, location);
            expected.add(location.toURI().getPath());
        }
        long tillTime = currentTime + 50;
        List<FileMetadataCleaner.DeletedEntry> deletedEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, TRANSACTION_TIMEOUT);
        // we should have deleted 51 rows, till time is inclusive
        Assert.assertEquals(51, deletedEntries.size());
        int count = 0;
        for (FileMetadataCleaner.DeletedEntry deletedEntry : deletedEntries) {
            Assert.assertEquals(expected.get(count), deletedEntry.getPath());
            count += 1;
        }
        // now add 10 entries for spark
        logPathIdentifier = new LogPathIdentifier("testNs2", "testApp", "testSpark");
        expected = new ArrayList<>();
        for (int i = 0; i < 10; i++) {
            Location location = locationFactory.create("testSparkFile" + i);
            // values : event time is 100ms behind current timestamp
            fileMetaDataWriter.writeMetaData(logPathIdentifier, eventTimestamp + i, currentTime + i, location);
            expected.add(location.toURI().getPath());
        }
        // lets keep the same till time - this should only delete the spark entries now
        deletedEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, TRANSACTION_TIMEOUT);
        // we should have deleted 51 rows, till time is inclusive
        Assert.assertEquals(10, deletedEntries.size());
        count = 0;
        for (FileMetadataCleaner.DeletedEntry deletedEntry : deletedEntries) {
            Assert.assertEquals(expected.get(count), deletedEntry.getPath());
            count += 1;
        }
        // now add 10 entries in mr context in time range 60-70
        logPathIdentifier = new LogPathIdentifier("testNs2", "testApp", "testMr");
        expected = new ArrayList<>();
        // flow should come up at the beginning in the expected list
        for (int i = 51; i <= 70; i++) {
            expected.add(locationFactory.create("testFlowFile" + i).toURI().getPath());
        }
        for (int i = 0; i < 10; i++) {
            Location location = locationFactory.create("testMrFile" + i);
            // values : event time is 100ms behind current timestamp
            fileMetaDataWriter.writeMetaData(logPathIdentifier, eventTimestamp + i, currentTime + i, location);
            expected.add(location.toURI().getPath());
        }
        List<String> nextExpected = new ArrayList<>();
        logPathIdentifier = new LogPathIdentifier("testNs2", "testApp", "testCustomAction");
        for (int i = 90; i < 100; i++) {
            Location location = locationFactory.create("testActionFile" + i);
            // values : event time is 100ms behind current timestamp
            fileMetaDataWriter.writeMetaData(logPathIdentifier, eventTimestamp + i, currentTime + i, location);
            nextExpected.add(location.toURI().getPath());
        }
        tillTime = currentTime + 70;
        // lets delete till 70.
        deletedEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, TRANSACTION_TIMEOUT);
        // we should have deleted 51-70 files of flow and 0-9 files of spark files in that order and 0 files of action.
        Assert.assertEquals(30, deletedEntries.size());
        count = 0;
        for (FileMetadataCleaner.DeletedEntry deletedEntry : deletedEntries) {
            Assert.assertEquals(expected.get(count), deletedEntry.getPath());
            count += 1;
        }
        // now delete till currentTime + 100, this should delete all remaining entries.
        // custom action should come first and then flow entries
        tillTime = currentTime + 100;
        // lets delete till 100.
        deletedEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, TRANSACTION_TIMEOUT);
        // we should have deleted 90-99 of custom action(10) 71-99 (29) files of flow.
        for (int i = 71; i < 100; i++) {
            nextExpected.add(locationFactory.create("testFlowFile" + i).toURI().getPath());
        }
        Assert.assertEquals(39, deletedEntries.size());
        count = 0;
        for (FileMetadataCleaner.DeletedEntry deletedEntry : deletedEntries) {
            Assert.assertEquals(nextExpected.get(count), deletedEntry.getPath());
            count += 1;
        }
        // now lets do a delete with till time  = currentTime + 1000, this should return empty result
        tillTime = currentTime + 1000;
        deletedEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, TRANSACTION_TIMEOUT);
        Assert.assertEquals(0, deletedEntries.size());
    } finally {
        // cleanup meta
        cleanupMetadata(transactional, datasetManager);
    }
}
Also used : FileMetaDataWriter(co.cask.cdap.logging.meta.FileMetaDataWriter) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) DatasetManager(co.cask.cdap.api.dataset.DatasetManager) ArrayList(java.util.ArrayList) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) MultiThreadDatasetCache(co.cask.cdap.data2.dataset2.MultiThreadDatasetCache) SystemDatasetInstantiator(co.cask.cdap.data.dataset.SystemDatasetInstantiator) LogPathIdentifier(co.cask.cdap.logging.appender.system.LogPathIdentifier) Transactional(co.cask.cdap.api.Transactional) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation) Test(org.junit.Test)

Example 5 with DefaultDatasetManager

use of co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager in project cdap by caskdata.

the class FileMetadataCleanerTest method testScanAndDeleteOldMetadata.

@Test
public void testScanAndDeleteOldMetadata() throws Exception {
    // use file meta data manager to write meta data in old format
    // use file meta writer to write meta data in new format
    // scan for old files and make sure we only get the old meta data entries.
    DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
    DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry());
    Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
    FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
    FileMetaDataManager fileMetaDataManager = injector.getInstance(FileMetaDataManager.class);
    LoggingContext flowContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testFlow", ProgramType.FLOW);
    long eventTimestamp = System.currentTimeMillis();
    LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
    Location testLocation = locationFactory.create("testFile");
    try {
        // write 50 entries in old format
        for (int i = 0; i < 50; i++) {
            fileMetaDataManager.writeMetaData(flowContext, eventTimestamp + i, testLocation);
        }
        LoggingContext wflowContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testWflow", ProgramType.WORKFLOW);
        fileMetaDataManager.writeMetaData(wflowContext, eventTimestamp, testLocation);
        LoggingContext mrContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testMR", ProgramType.MAPREDUCE);
        fileMetaDataManager.writeMetaData(mrContext, eventTimestamp, testLocation);
        LoggingContext sparkContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testSpark", ProgramType.SPARK);
        fileMetaDataManager.writeMetaData(sparkContext, eventTimestamp, testLocation);
        // write 50 entries in new format
        long newEventTime = eventTimestamp + 1000;
        long currentTime = newEventTime + 1000;
        LogPathIdentifier logPathIdentifier = new LogPathIdentifier("testNs", "testApp", "testFlow");
        for (int i = 50; i < 100; i++) {
            fileMetaDataWriter.writeMetaData(logPathIdentifier, newEventTime + i, currentTime + i, testLocation);
        }
        FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
        Assert.assertEquals(50, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(flowContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(1, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(wflowContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(1, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(mrContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(1, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(sparkContext), eventTimestamp - 1, eventTimestamp + 100).size());
        FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(datasetManager, transactional);
        fileMetadataCleaner.scanAndDeleteOldMetaData(TRANSACTION_TIMEOUT, CUTOFF_TIME_TRANSACTION);
        // deleted all old metadata
        Assert.assertEquals(0, fileMetaDataReader.listFiles(logPathIdentifier, eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(0, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(wflowContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(0, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(mrContext), eventTimestamp - 1, eventTimestamp + 100).size());
        Assert.assertEquals(0, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(sparkContext), eventTimestamp - 1, eventTimestamp + 100).size());
    } finally {
        // cleanup meta
        cleanupMetadata(transactional, datasetManager);
    }
}
Also used : FileMetaDataWriter(co.cask.cdap.logging.meta.FileMetaDataWriter) LoggingContext(co.cask.cdap.common.logging.LoggingContext) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) DatasetManager(co.cask.cdap.api.dataset.DatasetManager) DefaultDatasetManager(co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager) LocationFactory(org.apache.twill.filesystem.LocationFactory) DatasetFramework(co.cask.cdap.data2.dataset2.DatasetFramework) TransactionSystemClient(org.apache.tephra.TransactionSystemClient) MultiThreadDatasetCache(co.cask.cdap.data2.dataset2.MultiThreadDatasetCache) SystemDatasetInstantiator(co.cask.cdap.data.dataset.SystemDatasetInstantiator) FileMetaDataManager(co.cask.cdap.logging.write.FileMetaDataManager) LogPathIdentifier(co.cask.cdap.logging.appender.system.LogPathIdentifier) FileMetaDataReader(co.cask.cdap.logging.meta.FileMetaDataReader) Transactional(co.cask.cdap.api.Transactional) Location(org.apache.twill.filesystem.Location) LogLocation(co.cask.cdap.logging.write.LogLocation) Test(org.junit.Test)

Aggregations

Transactional (co.cask.cdap.api.Transactional)7 DatasetManager (co.cask.cdap.api.dataset.DatasetManager)7 SystemDatasetInstantiator (co.cask.cdap.data.dataset.SystemDatasetInstantiator)7 DefaultDatasetManager (co.cask.cdap.data2.datafabric.dataset.DefaultDatasetManager)7 DatasetFramework (co.cask.cdap.data2.dataset2.DatasetFramework)7 MultiThreadDatasetCache (co.cask.cdap.data2.dataset2.MultiThreadDatasetCache)7 FileMetaDataWriter (co.cask.cdap.logging.meta.FileMetaDataWriter)7 TransactionSystemClient (org.apache.tephra.TransactionSystemClient)7 LocationFactory (org.apache.twill.filesystem.LocationFactory)7 Test (org.junit.Test)7 LogPathIdentifier (co.cask.cdap.logging.appender.system.LogPathIdentifier)6 Location (org.apache.twill.filesystem.Location)6 FileMetaDataReader (co.cask.cdap.logging.meta.FileMetaDataReader)5 LogLocation (co.cask.cdap.logging.write.LogLocation)5 LoggingContext (co.cask.cdap.common.logging.LoggingContext)2 FileMetaDataManager (co.cask.cdap.logging.write.FileMetaDataManager)2 ArrayList (java.util.ArrayList)2 LoggingEvent (ch.qos.logback.classic.spi.LoggingEvent)1