use of org.apache.hadoop.fs.Path in project hadoop by apache.
the class EntityGroupFSTimelineStore method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
metrics = EntityGroupFSTimelineStoreMetrics.create();
summaryStore = createSummaryStore();
addService(summaryStore);
long logRetainSecs = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETAIN_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETAIN_SECONDS_DEFAULT);
logRetainMillis = logRetainSecs * 1000;
LOG.info("Cleaner set to delete logs older than {} seconds", logRetainSecs);
long unknownActiveSecs = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_UNKNOWN_ACTIVE_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_UNKNOWN_ACTIVE_SECONDS_DEFAULT);
unknownActiveMillis = unknownActiveSecs * 1000;
LOG.info("Unknown apps will be treated as complete after {} seconds", unknownActiveSecs);
appCacheMaxSize = conf.getInt(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_APP_CACHE_SIZE, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_APP_CACHE_SIZE_DEFAULT);
LOG.info("Application cache size is {}", appCacheMaxSize);
cachedLogs = Collections.synchronizedMap(new LinkedHashMap<TimelineEntityGroupId, EntityCacheItem>(appCacheMaxSize + 1, 0.75f, true) {
@Override
protected boolean removeEldestEntry(Map.Entry<TimelineEntityGroupId, EntityCacheItem> eldest) {
if (super.size() > appCacheMaxSize) {
TimelineEntityGroupId groupId = eldest.getKey();
LOG.debug("Evicting {} due to space limitations", groupId);
EntityCacheItem cacheItem = eldest.getValue();
LOG.debug("Force release cache {}.", groupId);
cacheItem.forceRelease();
if (cacheItem.getAppLogs().isDone()) {
appIdLogMap.remove(groupId.getApplicationId());
}
metrics.incrCacheEvicts();
return true;
}
return false;
}
});
cacheIdPlugins = loadPlugIns(conf);
// Initialize yarn client for application status
yarnClient = createAndInitYarnClient(conf);
// if non-null, hook its lifecycle up
addIfService(yarnClient);
activeRootPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR_DEFAULT));
doneRootPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR_DEFAULT));
fs = activeRootPath.getFileSystem(conf);
CallerContext.setCurrent(new CallerContext.Builder(ATS_V15_SERVER_DFS_CALLER_CTXT).build());
super.serviceInit(conf);
}
use of org.apache.hadoop.fs.Path in project hadoop by apache.
the class TestLogInfo method testParseEntity.
@Test
public void testParseEntity() throws Exception {
// Load test data
TimelineDataManager tdm = PluginStoreTestUtils.getTdmWithMemStore(config);
EntityLogInfo testLogInfo = new EntityLogInfo(TEST_ATTEMPT_DIR_NAME, TEST_ENTITY_FILE_NAME, UserGroupInformation.getLoginUser().getUserName());
testLogInfo.parseForStore(tdm, getTestRootPath(), true, jsonFactory, objMapper, fs);
// Verify for the first batch
PluginStoreTestUtils.verifyTestEntities(tdm);
// Load new data
TimelineEntity entityNew = PluginStoreTestUtils.createEntity("id_3", "type_3", 789l, null, null, null, null, "domain_id_1");
TimelineEntities entityList = new TimelineEntities();
entityList.addEntity(entityNew);
writeEntitiesLeaveOpen(entityList, new Path(getTestRootPath(TEST_ATTEMPT_DIR_NAME), TEST_ENTITY_FILE_NAME));
testLogInfo.parseForStore(tdm, getTestRootPath(), true, jsonFactory, objMapper, fs);
// Verify the newly added data
TimelineEntity entity3 = tdm.getEntity(entityNew.getEntityType(), entityNew.getEntityId(), EnumSet.allOf(TimelineReader.Field.class), UserGroupInformation.getLoginUser());
assertNotNull(entity3);
assertEquals("Failed to read out entity new", entityNew.getStartTime(), entity3.getStartTime());
tdm.close();
}
use of org.apache.hadoop.fs.Path in project hadoop by apache.
the class TestEntityGroupFSTimelineStore method setup.
@Before
public void setup() throws Exception {
for (ApplicationId appId : sampleAppIds) {
Path attemotDirPath = new Path(getTestRootPath(appId.toString()), getAttemptDirName(appId));
createTestFiles(appId, attemotDirPath);
}
store = new EntityGroupFSTimelineStore();
if (currTestName.getMethodName().contains("Plugin")) {
rootDir = GenericTestUtils.getTestDir(getClass().getSimpleName());
if (!rootDir.exists()) {
rootDir.mkdirs();
}
testJar = null;
testJar = JarFinder.makeClassLoaderTestJar(this.getClass(), rootDir, "test-runjar.jar", 2048, EntityGroupPlugInForTest.class.getName());
config.set(YarnConfiguration.TIMELINE_SERVICE_ENTITY_GROUP_PLUGIN_CLASSPATH, testJar.getAbsolutePath());
// add "-org.apache.hadoop." as system classes
String systemClasses = "-org.apache.hadoop." + "," + ApplicationClassLoader.SYSTEM_CLASSES_DEFAULT;
config.set(YarnConfiguration.TIMELINE_SERVICE_ENTITY_GROUP_PLUGIN_SYSTEM_CLASSES, systemClasses);
config.set(YarnConfiguration.TIMELINE_SERVICE_ENTITY_GROUP_PLUGIN_CLASSES, EntityGroupPlugInForTest.class.getName());
}
store.init(config);
store.setFs(fs);
store.start();
}
use of org.apache.hadoop.fs.Path in project hadoop by apache.
the class TestEntityGroupFSTimelineStore method createTestFiles.
private void createTestFiles(ApplicationId appId, Path attemptDirPath) throws IOException {
TimelineEntities entities = PluginStoreTestUtils.generateTestEntities();
PluginStoreTestUtils.writeEntities(entities, new Path(attemptDirPath, TEST_SUMMARY_LOG_FILE_NAME), fs);
Map<String, Set<Object>> primaryFilters = new HashMap<>();
Set<Object> appSet = new HashSet<Object>();
appSet.add(appId.toString());
primaryFilters.put(EntityGroupPlugInForTest.APP_ID_FILTER_NAME, appSet);
entityNew = PluginStoreTestUtils.createEntity(appId.toString(), "type_3", 789L, null, null, primaryFilters, null, "domain_id_1");
TimelineEntities entityList = new TimelineEntities();
entityList.addEntity(entityNew);
PluginStoreTestUtils.writeEntities(entityList, new Path(attemptDirPath, mainEntityLogFileName), fs);
FSDataOutputStream out = fs.create(new Path(attemptDirPath, TEST_DOMAIN_LOG_FILE_NAME));
out.close();
}
use of org.apache.hadoop.fs.Path in project hadoop by apache.
the class TestEntityGroupFSTimelineStore method testCleanLogs.
@Test
public void testCleanLogs() throws Exception {
// Create test dirs and files
// Irrelevant file, should not be reclaimed
String appDirName = mainTestAppId.toString();
String attemptDirName = ApplicationAttemptId.appAttemptIdStrPrefix + appDirName + "_1";
Path irrelevantFilePath = new Path(testDoneDirPath, "irrelevant.log");
FSDataOutputStream stream = fs.create(irrelevantFilePath);
stream.close();
// Irrelevant directory, should not be reclaimed
Path irrelevantDirPath = new Path(testDoneDirPath, "irrelevant");
fs.mkdirs(irrelevantDirPath);
Path doneAppHomeDir = new Path(new Path(testDoneDirPath, "0000"), "001");
// First application, untouched after creation
Path appDirClean = new Path(doneAppHomeDir, appDirName);
Path attemptDirClean = new Path(appDirClean, attemptDirName);
fs.mkdirs(attemptDirClean);
Path filePath = new Path(attemptDirClean, "test.log");
stream = fs.create(filePath);
stream.close();
// Second application, one file touched after creation
Path appDirHoldByFile = new Path(doneAppHomeDir, appDirName + "1");
Path attemptDirHoldByFile = new Path(appDirHoldByFile, attemptDirName);
fs.mkdirs(attemptDirHoldByFile);
Path filePathHold = new Path(attemptDirHoldByFile, "test1.log");
stream = fs.create(filePathHold);
stream.close();
// Third application, one dir touched after creation
Path appDirHoldByDir = new Path(doneAppHomeDir, appDirName + "2");
Path attemptDirHoldByDir = new Path(appDirHoldByDir, attemptDirName);
fs.mkdirs(attemptDirHoldByDir);
Path dirPathHold = new Path(attemptDirHoldByDir, "hold");
fs.mkdirs(dirPathHold);
// Fourth application, empty dirs
Path appDirEmpty = new Path(doneAppHomeDir, appDirName + "3");
Path attemptDirEmpty = new Path(appDirEmpty, attemptDirName);
fs.mkdirs(attemptDirEmpty);
Path dirPathEmpty = new Path(attemptDirEmpty, "empty");
fs.mkdirs(dirPathEmpty);
// Should retain all logs after this run
MutableCounterLong dirsCleaned = store.metrics.getLogsDirsCleaned();
long before = dirsCleaned.value();
store.cleanLogs(testDoneDirPath, fs, 10000);
assertTrue(fs.exists(irrelevantDirPath));
assertTrue(fs.exists(irrelevantFilePath));
assertTrue(fs.exists(filePath));
assertTrue(fs.exists(filePathHold));
assertTrue(fs.exists(dirPathHold));
assertTrue(fs.exists(dirPathEmpty));
// Make sure the created dir is old enough
Thread.sleep(2000);
// Touch the second application
stream = fs.append(filePathHold);
stream.writeBytes("append");
stream.close();
// Touch the third application by creating a new dir
fs.mkdirs(new Path(dirPathHold, "holdByMe"));
store.cleanLogs(testDoneDirPath, fs, 1000);
// Verification after the second cleaner call
assertTrue(fs.exists(irrelevantDirPath));
assertTrue(fs.exists(irrelevantFilePath));
assertTrue(fs.exists(filePathHold));
assertTrue(fs.exists(dirPathHold));
assertTrue(fs.exists(doneAppHomeDir));
// appDirClean and appDirEmpty should be cleaned up
assertFalse(fs.exists(appDirClean));
assertFalse(fs.exists(appDirEmpty));
assertEquals(before + 2L, dirsCleaned.value());
}
Aggregations