use of io.cdap.cdap.logging.meta.FileMetaDataReader in project cdap by caskdata.
the class CDAPLogAppenderTest method testCDAPLogAppenderSizeBasedRotation.
@Test
public void testCDAPLogAppenderSizeBasedRotation() throws Exception {
int syncInterval = 1024 * 1024;
FileMetaDataReader fileMetaDataReader = injector.getInstance(FileMetaDataReader.class);
CDAPLogAppender cdapLogAppender = new CDAPLogAppender();
AppenderContext context = new LocalAppenderContext(injector.getInstance(TransactionRunner.class), injector.getInstance(LocationFactory.class), new NoOpMetricsCollectionService());
context.start();
cdapLogAppender.setSyncIntervalBytes(syncInterval);
cdapLogAppender.setMaxFileLifetimeMs(TimeUnit.DAYS.toMillis(1));
cdapLogAppender.setMaxFileSizeInBytes(500);
cdapLogAppender.setDirPermissions("750");
cdapLogAppender.setFilePermissions("640");
cdapLogAppender.setFileRetentionDurationDays(1);
cdapLogAppender.setLogCleanupIntervalMins(10);
cdapLogAppender.setFileCleanupBatchSize(100);
cdapLogAppender.setContext(context);
cdapLogAppender.start();
Map<String, String> properties = new HashMap<>();
properties.put(NamespaceLoggingContext.TAG_NAMESPACE_ID, "testSizeRotation");
properties.put(ApplicationLoggingContext.TAG_APPLICATION_ID, "testApp");
properties.put(UserServiceLoggingContext.TAG_USER_SERVICE_ID, "testService");
long currentTimeMillisEvent1 = System.currentTimeMillis();
LoggingEvent event1 = getLoggingEvent("io.cdap.Test1", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 1", properties);
event1.setTimeStamp(currentTimeMillisEvent1);
cdapLogAppender.doAppend(event1);
// sync updates the file size
cdapLogAppender.sync();
long currentTimeMillisEvent2 = System.currentTimeMillis();
LoggingEvent event2 = getLoggingEvent("io.cdap.Test2", (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(Logger.ROOT_LOGGER_NAME), Level.ERROR, "test message 2", properties);
event2.setTimeStamp(currentTimeMillisEvent2);
// one new append, we will rotate to new file as the file size limit is very low and last append exceeded that.
cdapLogAppender.doAppend(event2);
cdapLogAppender.stop();
context.stop();
try {
List<LogLocation> files = fileMetaDataReader.listFiles(cdapLogAppender.getLoggingPath(properties), 0, Long.MAX_VALUE);
Assert.assertEquals(2, files.size());
assertLogEventDetails(event1, files.get(0));
assertLogEventDetails(event2, files.get(1));
Assert.assertEquals(currentTimeMillisEvent1, files.get(0).getEventTimeMs());
Assert.assertEquals(currentTimeMillisEvent2, files.get(1).getEventTimeMs());
Assert.assertTrue(files.get(0).getFileCreationTimeMs() >= currentTimeMillisEvent1);
Assert.assertTrue(files.get(1).getFileCreationTimeMs() >= currentTimeMillisEvent2);
} catch (Exception e) {
Assert.fail();
}
}
use of io.cdap.cdap.logging.meta.FileMetaDataReader in project cdap by caskdata.
the class FileMetadataCleanerTest method testFileMetadataWithCommonContextPrefix.
@Test
public void testFileMetadataWithCommonContextPrefix() throws Exception {
TransactionRunner transactionRunner = injector.getInstance(TransactionRunner.class);
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(transactionRunner);
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
FileMetadataCleaner fileMetadataCleaner = new FileMetadataCleaner(transactionRunner);
try {
List<LogPathIdentifier> logPathIdentifiers = new ArrayList<>();
// this should be able to scan and delete common prefix programs like testFlow1, testFlow10 during clenaup.
for (int i = 1; i <= 20; i++) {
logPathIdentifiers.add(new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", String.format("testFlow%s", i)));
}
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
long newCurrentTime = currentTime + 100;
for (int i = 1; i <= 20; i++) {
LogPathIdentifier identifier = logPathIdentifiers.get(i - 1);
for (int j = 0; j < 10; j++) {
fileMetaDataWriter.writeMetaData(identifier, newCurrentTime + j, newCurrentTime + j, location.append("testFileNew" + Integer.toString(j)));
}
}
List<LogLocation> locations;
for (int i = 1; i <= 20; i++) {
locations = fileMetadataReader.listFiles(logPathIdentifiers.get(i - 1), newCurrentTime, newCurrentTime + 10);
// should include files from currentTime (0..9)
Assert.assertEquals(10, locations.size());
}
long tillTime = newCurrentTime + 4;
List<FileMetadataCleaner.DeletedEntry> deleteEntries = fileMetadataCleaner.scanAndGetFilesToDelete(tillTime, 100);
// 20 context, 5 entries each
Assert.assertEquals(100, deleteEntries.size());
for (int i = 1; i <= 20; i++) {
locations = fileMetadataReader.listFiles(logPathIdentifiers.get(i - 1), newCurrentTime, newCurrentTime + 10);
// should include files from time (5..9)
Assert.assertEquals(5, locations.size());
int startIndex = 5;
for (LogLocation logLocation : locations) {
Assert.assertEquals(String.format("testFileNew%s", startIndex), logLocation.getLocation().getName());
startIndex++;
}
}
} finally {
// cleanup meta
deleteAllMetaEntries(transactionRunner);
}
}
use of io.cdap.cdap.logging.meta.FileMetaDataReader in project cdap by caskdata.
the class DistributedLogFrameworkTest method testFramework.
@Test
public void testFramework() throws Exception {
DistributedLogFramework framework = injector.getInstance(DistributedLogFramework.class);
CConfiguration cConf = injector.getInstance(CConfiguration.class);
framework.startAndWait();
// Send some logs to Kafka.
LoggingContext context = new ServiceLoggingContext(NamespaceId.SYSTEM.getNamespace(), Constants.Logging.COMPONENT_NAME, "test");
// Make sure all events get flushed in the same batch
long eventTimeBase = System.currentTimeMillis() + cConf.getInt(Constants.Logging.PIPELINE_EVENT_DELAY_MS);
final int msgCount = 50;
for (int i = 0; i < msgCount; i++) {
// Publish logs in descending timestamp order
publishLog(cConf.get(Constants.Logging.KAFKA_TOPIC), context, ImmutableList.of(createLoggingEvent("io.cdap.test." + i, Level.INFO, "Testing " + i, eventTimeBase - i)));
}
// Read the logs back. They should be sorted by timestamp order.
final FileMetaDataReader metaDataReader = injector.getInstance(FileMetaDataReader.class);
Tasks.waitFor(true, () -> {
List<LogLocation> locations = metaDataReader.listFiles(new LogPathIdentifier(NamespaceId.SYSTEM.getNamespace(), Constants.Logging.COMPONENT_NAME, "test"), 0, Long.MAX_VALUE);
if (locations.size() != 1) {
return false;
}
LogLocation location = locations.get(0);
int i = 0;
try {
try (CloseableIterator<LogEvent> iter = location.readLog(Filter.EMPTY_FILTER, 0, Long.MAX_VALUE, msgCount)) {
while (iter.hasNext()) {
String expectedMsg = "Testing " + (msgCount - i - 1);
LogEvent event = iter.next();
if (!expectedMsg.equals(event.getLoggingEvent().getMessage())) {
return false;
}
i++;
}
return i == msgCount;
}
} catch (Exception e) {
// and the time when actual content are flushed to the file
return false;
}
}, 10, TimeUnit.SECONDS, msgCount, TimeUnit.MILLISECONDS);
framework.stopAndWait();
String kafkaTopic = cConf.get(Constants.Logging.KAFKA_TOPIC);
// Check the checkpoint is persisted correctly. Since all messages are processed,
// the checkpoint should be the same as the message count.
CheckpointManager<KafkaOffset> checkpointManager = getCheckpointManager(kafkaTopic);
Checkpoint<KafkaOffset> checkpoint = checkpointManager.getCheckpoint(0);
Assert.assertEquals(msgCount, checkpoint.getOffset().getNextOffset());
}
use of io.cdap.cdap.logging.meta.FileMetaDataReader in project cdap by caskdata.
the class FileMetadataTest method testFileMetadataReadWrite.
@Test
public void testFileMetadataReadWrite() throws Exception {
TransactionRunner transactionRunner = injector.getInstance(TransactionRunner.class);
FileMetaDataWriter fileMetaDataWriter = new FileMetaDataWriter(transactionRunner);
LogPathIdentifier logPathIdentifier = new LogPathIdentifier(NamespaceId.DEFAULT.getNamespace(), "testApp", "testFlow");
LocationFactory locationFactory = injector.getInstance(LocationFactory.class);
Location location = locationFactory.create(TMP_FOLDER.newFolder().getPath()).append("/logs");
long currentTime = System.currentTimeMillis();
for (int i = 10; i <= 100; i += 10) {
// i is the event time
fileMetaDataWriter.writeMetaData(logPathIdentifier, i, currentTime, location.append(Integer.toString(i)));
}
// for the timestamp 80, add new new log path id with different current time.
fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 1, location.append("81"));
fileMetaDataWriter.writeMetaData(logPathIdentifier, 80, currentTime + 2, location.append("82"));
// reader test
FileMetaDataReader fileMetadataReader = injector.getInstance(FileMetaDataReader.class);
Assert.assertEquals(12, fileMetadataReader.listFiles(logPathIdentifier, 0, 100).size());
Assert.assertEquals(5, fileMetadataReader.listFiles(logPathIdentifier, 20, 50).size());
Assert.assertEquals(2, fileMetadataReader.listFiles(logPathIdentifier, 100, 150).size());
// should include the latest file with event start time 80.
List<LogLocation> locationList = fileMetadataReader.listFiles(logPathIdentifier, 81, 85);
Assert.assertEquals(1, locationList.size());
Assert.assertEquals(80, locationList.get(0).getEventTimeMs());
Assert.assertEquals(location.append("82"), locationList.get(0).getLocation());
Assert.assertEquals(1, fileMetadataReader.listFiles(logPathIdentifier, 150, 1000).size());
}
Aggregations