use of io.cdap.cdap.logging.meta.FileMetaDataWriter in project cdap by caskdata.
the class FileMetadataTest method testFileMetadataReadWriteAcrossFormats.
@Test
public void testFileMetadataReadWriteAcrossFormats() throws Exception {
TransactionRunner transactionRunner = injector.getInstance(TransactionRunner.class);
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(transactionRunner);
LogPathIdentifier logPathIdentifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
long eventTime = currentTime + 20;
long newCurrentTime = currentTime + 100;
// 10 files in new format
for (int i = 1; i <= 10; i++) {
fileMetaDataWriter.writeMetaData(logPathIdentifier, eventTime + i, newCurrentTime + i, location.append("testFileNew" + Integer.toString(i)));
}
// reader test
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
// scan only in new files time range
List<LogLocation> locations = fileMetadataReader.listFiles(logPathIdentifier, eventTime + 2, eventTime + 6);
// should include files from currentTime (1..6)
Assert.assertEquals(6, locations.size());
for (LogLocation logLocation : locations) {
Assert.assertEquals(LogLocation.VERSION_1, logLocation.getFrameworkVersion());
}
// scan time range across formats
locations = fileMetadataReader.listFiles(logPathIdentifier, currentTime + 2, eventTime + 6);
// should include files from new range (1..6)
Assert.assertEquals(6, locations.size());
for (int i = 0; i < locations.size(); i++) {
Assert.assertEquals(LogLocation.VERSION_1, locations.get(i).getFrameworkVersion());
Assert.assertEquals(location.append("testFileNew" + Integer.toString(i + 1)), locations.get(i).getLocation());
}
}
use of io.cdap.cdap.logging.meta.FileMetaDataWriter in project cdap by caskdata.
the class LogCleanerTest method testLogCleanup.
@Test
public void testLogCleanup() throws Exception {
TransactionRunner transactionRunner = injector.getInstance(TransactionRunner.class);
FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(transactionRunner);
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
long currentTime = System.currentTimeMillis();
LogPathIdentifier logPathIdentifier = new LogPathIdentifier("testNs", "testApp", "testEntity");
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(transactionRunner);
long startTime = currentTime - 5000;
Location dirLocation = locationFactory.create("logs");
dirLocation.mkdirs();
// create 20 files, add them in past time range
for (int i = 0; i < 20; i++) {
Location location = dirLocation.append("test" + i);
location.createNew();
fileMetaDataWriter.writeMetaData(logPathIdentifier, startTime + i, startTime + i, location);
}
Assert.assertEquals(20, dirLocation.list().size());
LogCleaner logCleaner = new LogCleaner(fileMetadataCleaner, locationFactory, 100, 60);
logCleaner.run();
FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
// all meta data should be deleted
Assert.assertEquals(0, fileMetaDataReader.listFiles(logPathIdentifier, 0, System.currentTimeMillis()).size());
// we are not asserting file existence as the delete could fail and we don't guarantee file deletion.
}
use of io.cdap.cdap.logging.meta.FileMetaDataWriter in project cdap by caskdata.
the class FileMetadataCleanerTest method testWithBatchSizeLargerThanNumOfFiles.
@Test
public void testWithBatchSizeLargerThanNumOfFiles() throws Exception {
TransactionRunner transactionRunner = injector.getInstance(TransactionRunner.class);
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(transactionRunner);
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(transactionRunner);
try {
LogPathIdentifier identifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", String.format("testFlow%s", 0));
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
long newCurrentTime = currentTime + 100;
for (int j = 0; j < 10; j++) {
fileMetaDataWriter.writeMetaData(identifier, newCurrentTime + j, newCurrentTime + j, location.append("testFileNew" + Integer.toString(j)));
}
List<LogLocation> locations;
locations = fileMetadataReader.listFiles(identifier, newCurrentTime, newCurrentTime + 10);
// should include files from currentTime (0..9)
Assert.assertEquals(10, locations.size());
long tillTime = newCurrentTime + 4;
List<FileMetadataCleaner.DeletedEntry> deleteEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, 1000);
Assert.assertEquals(5, deleteEntries.size());
} finally {
// cleanup meta
deleteAllMetaEntries(transactionRunner);
}
}
use of io.cdap.cdap.logging.meta.FileMetaDataWriter in project cdap by caskdata.
the class LogFileManagerTest method testLogFileManager.
@Test
public void testLogFileManager() throws Exception {
int syncInterval = 1024 * 1024;
long maxLifeTimeMs = 50;
long maxFileSizeInBytes = 104857600;
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(injector.getInstance(TransactionRunner.class));
LogFileManager logFileManager = new LogFileManager("700", "600", maxLifeTimeMs, maxFileSizeInBytes, syncInterval, fileMetaDataWriter, injector.getInstance(LocationFactory.class));
LogPathIdentifier logPathIdentifier = new LogPathIdentifier("test", "testApp", "testFlow");
long timestamp = System.currentTimeMillis();
LogFileOutputStream outputStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
LoggingEvent event1 = getLoggingEvent("io.cdap.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1");
outputStream.append(event1);
// we are doing this, instead of calling getLogFileOutputStream to avoid race, if test machine can be slow.
Assert.assertNotNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
TimeUnit.MILLISECONDS.sleep(60);
logFileManager.flush();
// should be closed on flush, should return null
Assert.assertNull((logFileManager.getActiveOutputStream(logPathIdentifier)));
LogFileOutputStream newLogOutStream = logFileManager.getLogFileOutputStream(logPathIdentifier, timestamp);
// make sure the new location we got is different
Assert.assertNotEquals(outputStream.getLocation(), newLogOutStream.getLocation());
}
use of io.cdap.cdap.logging.meta.FileMetaDataWriter in project cdap by caskdata.
the class FileMetadataCleanerTest method testFileMetadataWithCommonContextPrefix.
@Test
public void testFileMetadataWithCommonContextPrefix() throws Exception {
TransactionRunner transactionRunner = injector.getInstance(TransactionRunner.class);
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(transactionRunner);
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(transactionRunner);
try {
List<LogPathIdentifier> logPathIdentifiers = new ArrayList<>();
// this should be able to scan and delete common prefix programs like testFlow1, testFlow10 during clenaup.
for (int i = 1; i <= 20; i++) {
logPathIdentifiers.add(new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", String.format("testFlow%s", i)));
}
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
long newCurrentTime = currentTime + 100;
for (int i = 1; i <= 20; i++) {
LogPathIdentifier identifier = logPathIdentifiers.get(i - 1);
for (int j = 0; j < 10; j++) {
fileMetaDataWriter.writeMetaData(identifier, newCurrentTime + j, newCurrentTime + j, location.append("testFileNew" + Integer.toString(j)));
}
}
List<LogLocation> locations;
for (int i = 1; i <= 20; i++) {
locations = fileMetadataReader.listFiles(logPathIdentifiers.get(i - 1), newCurrentTime, newCurrentTime + 10);
// should include files from currentTime (0..9)
Assert.assertEquals(10, locations.size());
}
long tillTime = newCurrentTime + 4;
List<FileMetadataCleaner.DeletedEntry> deleteEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, 100);
// 20 context, 5 entries each
Assert.assertEquals(100, deleteEntries.size());
for (int i = 1; i <= 20; i++) {
locations = fileMetadataReader.listFiles(logPathIdentifiers.get(i - 1), newCurrentTime, newCurrentTime + 10);
// should include files from time (5..9)
Assert.assertEquals(5, locations.size());
int startIndex = 5;
for (LogLocation logLocation : locations) {
Assert.assertEquals(String.format("testFileNew%s", startIndex), logLocation.getLocation().getName());
startIndex++;
}
}
} finally {
// cleanup meta
deleteAllMetaEntries(transactionRunner);
}
}
Aggregations