use of co.cask.cdap.api.Transactional in project cdap by caskdata.
the class LogFileManagerTest method testLogFileManager.
@Test
public void testLogFileManager() throws Exception {
int syncInterval = 1024 * 1024;
long maxLifeTimeMs = 50;
long maxFileSizeInBytes = 104857600;
DatasetManager datasetManager = new DefaultDatasetManager(injector.getInstance(DatasetFramework.class), NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry(), null);
Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(injector.getInstance(DatasetFramework.class)), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
LogFileManager logFileManager = new LogFileManager("700", "600", maxLifeTimeMs, maxFileSizeInBytes, syncInterval, fileMetaDataWriter, injector.getInstance(LocationFactory.class));
LogPathIdentifier logPathIdentifier = new LogPathIdentifier("test", "testApp", "testFlow");
long timestamp = System.currentTimeMillis();
LogFileOutputStream outputStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
LoggingEvent event1 = getLoggingEvent("co.cask.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1");
outputStream.append(event1);
// we are doing this, instead of calling getLogFileOutputStream to avoid race, if test machine can be slow.
Assert.assertNotNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
TimeUnit.MILLISECONDS.sleep(60);
logFileManager.flush();
// should be closed on flush, should return null
Assert.assertNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
LogFileOutputStream newLogOutStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
// make sure the new location we got is different
Assert.assertNotEquals(outputStream.getLocation(), newLogOutStream.getLocation());
}
use of co.cask.cdap.api.Transactional in project cdap by caskdata.
the class FileMetadataTest method testFileMetadataReadWrite.
@Test
public void testFileMetadataReadWrite() throws Exception {
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry(), null);
Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
LogPathIdentifier logPathIdentifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
for (int i = 10; i <= 100; i += 10) {
// i is the event time
fileMetaDataWriter.writeMetaData(logPathIdentifier, i, currentTime, location.append(Integer.toString(i)));
}
// for the timestamp 80, add new new log path id with different current time.
fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 1, location.append("81"));
fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 2, location.append("82"));
// reader test
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
Assert.assertEquals(12, fileMetadataReader.listFiles(logPathIdentifier, 0, 100).size());
Assert.assertEquals(5, fileMetadataReader.listFiles(logPathIdentifier, 20, 50).size());
Assert.assertEquals(2, fileMetadataReader.listFiles(logPathIdentifier, 100, 150).size());
// should include the latest file with event start time 80.
List<LogLocation> locationList = fileMetadataReader.listFiles(logPathIdentifier, 81, 85);
Assert.assertEquals(1, locationList.size());
Assert.assertEquals(80, locationList.get(0).getEventTimeMs());
Assert.assertEquals(location.append("82"), locationList.get(0).getLocation());
Assert.assertEquals(1, fileMetadataReader.listFiles(logPathIdentifier, 150, 1000).size());
}
use of co.cask.cdap.api.Transactional in project cdap by caskdata.
the class AbstractContext method execute.
/**
* Execute in a transaction with optional retry on conflict.
*/
public void execute(final TxRunnable runnable, boolean retryOnConflict) throws TransactionFailureException {
ClassLoader oldClassLoader = ClassLoaders.setContextClassLoader(getClass().getClassLoader());
try {
Transactional txnl = retryOnConflict ? Transactions.createTransactionalWithRetry(transactional, RetryStrategies.retryOnConflict(20, 100)) : transactional;
txnl.execute(new TxRunnable() {
@Override
public void run(DatasetContext context) throws Exception {
ClassLoader oldClassLoader = ClassLoaders.setContextClassLoader(getProgramInvocationClassLoader());
try {
runnable.run(context);
} finally {
ClassLoaders.setContextClassLoader(oldClassLoader);
}
}
});
} finally {
ClassLoaders.setContextClassLoader(oldClassLoader);
}
}
use of co.cask.cdap.api.Transactional in project cdap by caskdata.
the class FileMetadataCleanerTest method testFileMetadataWithCommonContextPrefix.
@Test
public void testFileMetadataWithCommonContextPrefix() throws Exception {
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry(), null);
Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(datasetManager, transactional);
try {
List<LogPathIdentifier> logPathIdentifiers = new ArrayList<>();
// this should be able to scan and delete common prefix programs like testFlow1, testFlow10 during clenaup.
for (int i = 1; i <= 20; i++) {
logPathIdentifiers.add(new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", String.format("testFlow%s", i)));
}
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
long newCurrentTime = currentTime + 100;
for (int i = 1; i <= 20; i++) {
LogPathIdentifier identifier = logPathIdentifiers.get(i - 1);
for (int j = 0; j < 10; j++) {
fileMetaDataWriter.writeMetaData(identifier, newCurrentTime + j, newCurrentTime + j, location.append("testFileNew" + Integer.toString(j)));
}
}
List<LogLocation> locations;
for (int i = 1; i <= 20; i++) {
locations = fileMetadataReader.listFiles(logPathIdentifiers.get(i - 1), newCurrentTime, newCurrentTime + 10);
// should include files from currentTime (0..9)
Assert.assertEquals(10, locations.size());
}
long tillTime = newCurrentTime + 4;
List<FileMetadataCleaner.DeletedEntry> deleteEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, TRANSACTION_TIMEOUT);
// 20 context, 5 entries each
Assert.assertEquals(100, deleteEntries.size());
for (int i = 1; i <= 20; i++) {
locations = fileMetadataReader.listFiles(logPathIdentifiers.get(i - 1), newCurrentTime, newCurrentTime + 10);
// should include files from time (5..9)
Assert.assertEquals(5, locations.size());
int startIndex = 5;
for (LogLocation logLocation : locations) {
Assert.assertEquals(String.format("testFileNew%s", startIndex), logLocation.getLocation().getName());
startIndex++;
}
}
} finally {
// cleanup meta
cleanupMetadata(transactional, datasetManager);
}
}
use of co.cask.cdap.api.Transactional in project cdap by caskdata.
the class FileMetadataCleanerTest method testScanAndDeleteOldMetadata.
@Test
public void testScanAndDeleteOldMetadata() throws Exception {
// use file meta data manager to write meta data in old format
// use file meta writer to write meta data in new format
// scan for old files and make sure we only get the old meta data entries.
DatasetFramework datasetFramework = injector.getInstance(DatasetFramework.class);
DatasetManager datasetManager = new DefaultDatasetManager(datasetFramework, NamespaceId.SYSTEM, co.cask.cdap.common.service.RetryStrategies.noRetry(), null);
Transactional transactional = Transactions.createTransactionalWithRetry(Transactions.createTransactional(new MultiThreadDatasetCache(new SystemDatasetInstantiator(datasetFramework), injector.getInstance(TransactionSystemClient.class), NamespaceId.SYSTEM, ImmutableMap.<String, String>of(), null, null)), RetryStrategies.retryOnConflict(20, 100));
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(datasetManager, transactional);
FileMetaDataManager fileMetaDataManager = injector.getInstance(FileMetaDataManager.class);
LoggingContext flowContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testFlow", ProgramType.FLOW);
long eventTimestamp = System.currentTimeMillis();
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location testLocation = locationFactory.create("testFile");
try {
// write 50 entries in old format
for (int i = 0; i < 50; i++) {
fileMetaDataManager.writeMetaData(flowContext, eventTimestamp + i, testLocation);
}
LoggingContext wflowContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testWflow", ProgramType.WORKFLOW);
fileMetaDataManager.writeMetaData(wflowContext, eventTimestamp, testLocation);
LoggingContext mrContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testMR", ProgramType.MAPREDUCE);
fileMetaDataManager.writeMetaData(mrContext, eventTimestamp, testLocation);
LoggingContext sparkContext = LoggingContextHelper.getLoggingContext("testNs", "testApp", "testSpark", ProgramType.SPARK);
fileMetaDataManager.writeMetaData(sparkContext, eventTimestamp, testLocation);
// write 50 entries in new format
long newEventTime = eventTimestamp + 1000;
long currentTime = newEventTime + 1000;
LogPathIdentifier logPathIdentifier = new LogPathIdentifier("testNs", "testApp", "testFlow");
for (int i = 50; i < 100; i++) {
fileMetaDataWriter.writeMetaData(logPathIdentifier, newEventTime + i, currentTime + i, testLocation);
}
FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
Assert.assertEquals(50, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(flowContext), eventTimestamp - 1, eventTimestamp + 100).size());
Assert.assertEquals(1, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(wflowContext), eventTimestamp - 1, eventTimestamp + 100).size());
Assert.assertEquals(1, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(mrContext), eventTimestamp - 1, eventTimestamp + 100).size());
Assert.assertEquals(1, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(sparkContext), eventTimestamp - 1, eventTimestamp + 100).size());
FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(datasetManager, transactional);
fileMetadataCleaner.scanAndDeleteOldMetaData(TRANSACTION_TIMEOUT, CUTOFF_TIME_TRANSACTION);
// deleted all old metadata
Assert.assertEquals(0, fileMetaDataReader.listFiles(logPathIdentifier, eventTimestamp - 1, eventTimestamp + 100).size());
Assert.assertEquals(0, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(wflowContext), eventTimestamp - 1, eventTimestamp + 100).size());
Assert.assertEquals(0, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(mrContext), eventTimestamp - 1, eventTimestamp + 100).size());
Assert.assertEquals(0, fileMetaDataReader.listFiles(LoggingContextHelper.getLogPathIdentifier(sparkContext), eventTimestamp - 1, eventTimestamp + 100).size());
} finally {
// cleanup meta
cleanupMetadata(transactional, datasetManager);
}
}
Aggregations