use of org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId in project hadoop by apache.
the class EntityGroupFSTimelineStore method serviceInit.
@Override
protected void serviceInit(Configuration conf) throws Exception {
metrics = EntityGroupFSTimelineStoreMetrics.create();
summaryStore = createSummaryStore();
addService(summaryStore);
long logRetainSecs = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETAIN_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETAIN_SECONDS_DEFAULT);
logRetainMillis = logRetainSecs * 1000;
LOG.info("Cleaner set to delete logs older than {} seconds", logRetainSecs);
long unknownActiveSecs = conf.getLong(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_UNKNOWN_ACTIVE_SECONDS, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_UNKNOWN_ACTIVE_SECONDS_DEFAULT);
unknownActiveMillis = unknownActiveSecs * 1000;
LOG.info("Unknown apps will be treated as complete after {} seconds", unknownActiveSecs);
appCacheMaxSize = conf.getInt(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_APP_CACHE_SIZE, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_APP_CACHE_SIZE_DEFAULT);
LOG.info("Application cache size is {}", appCacheMaxSize);
cachedLogs = Collections.synchronizedMap(new LinkedHashMap<TimelineEntityGroupId, EntityCacheItem>(appCacheMaxSize + 1, 0.75f, true) {
@Override
protected boolean removeEldestEntry(Map.Entry<TimelineEntityGroupId, EntityCacheItem> eldest) {
if (super.size() > appCacheMaxSize) {
TimelineEntityGroupId groupId = eldest.getKey();
LOG.debug("Evicting {} due to space limitations", groupId);
EntityCacheItem cacheItem = eldest.getValue();
LOG.debug("Force release cache {}.", groupId);
cacheItem.forceRelease();
if (cacheItem.getAppLogs().isDone()) {
appIdLogMap.remove(groupId.getApplicationId());
}
metrics.incrCacheEvicts();
return true;
}
return false;
}
});
cacheIdPlugins = loadPlugIns(conf);
// Initialize yarn client for application status
yarnClient = createAndInitYarnClient(conf);
// if non-null, hook its lifecycle up
addIfService(yarnClient);
activeRootPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR_DEFAULT));
doneRootPath = new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR, YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_DONE_DIR_DEFAULT));
fs = activeRootPath.getFileSystem(conf);
CallerContext.setCurrent(new CallerContext.Builder(ATS_V15_SERVER_DFS_CALLER_CTXT).build());
super.serviceInit(conf);
}
use of org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId in project hadoop by apache.
the class EntityGroupFSTimelineStore method getTimelineStoresFromCacheIds.
private List<TimelineStore> getTimelineStoresFromCacheIds(Set<TimelineEntityGroupId> groupIds, String entityType, List<EntityCacheItem> cacheItems) throws IOException {
List<TimelineStore> stores = new LinkedList<TimelineStore>();
// non-null storage for the group ids.
for (TimelineEntityGroupId groupId : groupIds) {
TimelineStore storeForId = getCachedStore(groupId, cacheItems);
if (storeForId != null) {
LOG.debug("Adding {} as a store for the query", storeForId.getName());
stores.add(storeForId);
metrics.incrGetEntityToDetailOps();
}
}
if (stores.size() == 0) {
LOG.debug("Using summary store for {}", entityType);
stores.add(this.summaryStore);
metrics.incrGetEntityToSummaryOps();
}
return stores;
}
use of org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId in project hadoop by apache.
the class TestTimelineEntityGroupId method testTimelineEntityGroupId.
@Test
public void testTimelineEntityGroupId() {
ApplicationId appId1 = ApplicationId.newInstance(1234, 1);
ApplicationId appId2 = ApplicationId.newInstance(1234, 2);
TimelineEntityGroupId group1 = TimelineEntityGroupId.newInstance(appId1, "1");
TimelineEntityGroupId group2 = TimelineEntityGroupId.newInstance(appId1, "2");
TimelineEntityGroupId group3 = TimelineEntityGroupId.newInstance(appId2, "1");
TimelineEntityGroupId group4 = TimelineEntityGroupId.newInstance(appId1, "1");
Assert.assertTrue(group1.equals(group4));
Assert.assertFalse(group1.equals(group2));
Assert.assertFalse(group1.equals(group3));
Assert.assertTrue(group1.compareTo(group4) == 0);
Assert.assertTrue(group1.compareTo(group2) < 0);
Assert.assertTrue(group1.compareTo(group3) < 0);
Assert.assertTrue(group1.hashCode() == group4.hashCode());
Assert.assertFalse(group1.hashCode() == group2.hashCode());
Assert.assertFalse(group1.hashCode() == group3.hashCode());
Assert.assertEquals("timelineEntityGroupId_1234_1_1", group1.toString());
Assert.assertEquals(TimelineEntityGroupId.fromString("timelineEntityGroupId_1234_1_1"), group1);
}
use of org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId in project hadoop by apache.
the class TestTimelineClientForATS1_5 method testPostEntities.
@Test
public void testPostEntities() throws Exception {
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
TimelineEntityGroupId groupId = TimelineEntityGroupId.newInstance(appId, "1");
TimelineEntityGroupId groupId2 = TimelineEntityGroupId.newInstance(appId, "2");
// Create two entities, includes an entity type and a summary type
TimelineEntity[] entities = new TimelineEntity[2];
entities[0] = generateEntity("entity_type");
entities[1] = generateEntity("summary_type");
try {
// if attemptid is null, fall back to the original putEntities call, and
// save the entity
// into configured levelDB store
client.putEntities(null, null, entities);
verify(spyTimelineWriter, times(1)).putEntities(entities);
reset(spyTimelineWriter);
// if the attemptId is specified, but groupId is given as null, it would
// fall back to the original putEntities call if we have the entity type.
// the entity which is summary type would be written into FS
ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance(appId, 1);
client.putEntities(attemptId1, null, entities);
TimelineEntity[] entityTDB = new TimelineEntity[1];
entityTDB[0] = entities[0];
verify(spyTimelineWriter, times(1)).putEntities(entityTDB);
Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId1), "summarylog-" + attemptId1.toString())));
reset(spyTimelineWriter);
// if we specified attemptId as well as groupId, it would save the entity
// into
// FileSystem instead of levelDB store
ApplicationAttemptId attemptId2 = ApplicationAttemptId.newInstance(appId, 2);
client.putEntities(attemptId2, groupId, entities);
client.putEntities(attemptId2, groupId2, entities);
verify(spyTimelineWriter, times(0)).putEntities(any(TimelineEntity[].class));
Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId2), "summarylog-" + attemptId2.toString())));
Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId2), "entitylog-" + groupId.toString())));
Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId2), "entitylog-" + groupId2.toString())));
reset(spyTimelineWriter);
} catch (Exception e) {
Assert.fail("Exception is not expected. " + e);
}
}
use of org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId in project hadoop by apache.
the class DistributedShellTimelinePlugin method toEntityGroupId.
private Set<TimelineEntityGroupId> toEntityGroupId(String strAppId) {
ApplicationId appId = ApplicationId.fromString(strAppId);
TimelineEntityGroupId groupId = TimelineEntityGroupId.newInstance(appId, ApplicationMaster.CONTAINER_ENTITY_GROUP_ID);
Set<TimelineEntityGroupId> result = new HashSet<>();
result.add(groupId);
return result;
}
Aggregations