use of org.apache.hadoop.yarn.api.records.timeline.TimelineDomain in project hadoop by apache.
the class TestLogInfo method setup.
@Before
public void setup() throws Exception {
config.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, TEST_ROOT_DIR.toString());
HdfsConfiguration hdfsConfig = new HdfsConfiguration();
hdfsCluster = new MiniDFSCluster.Builder(hdfsConfig).numDataNodes(1).build();
fs = hdfsCluster.getFileSystem();
fc = FileContext.getFileContext(hdfsCluster.getURI(0), config);
Path testAppDirPath = getTestRootPath(TEST_ATTEMPT_DIR_NAME);
fs.mkdirs(testAppDirPath, new FsPermission(FILE_LOG_DIR_PERMISSIONS));
objMapper = PluginStoreTestUtils.createObjectMapper();
TimelineEntities testEntities = PluginStoreTestUtils.generateTestEntities();
writeEntitiesLeaveOpen(testEntities, new Path(testAppDirPath, TEST_ENTITY_FILE_NAME));
testDomain = new TimelineDomain();
testDomain.setId("domain_1");
testDomain.setReaders(UserGroupInformation.getLoginUser().getUserName());
testDomain.setOwner(UserGroupInformation.getLoginUser().getUserName());
testDomain.setDescription("description");
writeDomainLeaveOpen(testDomain, new Path(testAppDirPath, TEST_DOMAIN_FILE_NAME));
writeBrokenFile(new Path(testAppDirPath, TEST_BROKEN_FILE_NAME));
}
use of org.apache.hadoop.yarn.api.records.timeline.TimelineDomain in project hive by apache.
the class ATSHook method createTimelineDomain.
private void createTimelineDomain(String domainId, String readers, String writers) throws Exception {
TimelineDomain timelineDomain = new TimelineDomain();
timelineDomain.setId(domainId);
timelineDomain.setReaders(readers);
timelineDomain.setWriters(writers);
timelineClient.putDomain(timelineDomain);
LOG.info("ATS domain created:" + domainId + "(" + readers + "," + writers + ")");
}
use of org.apache.hadoop.yarn.api.records.timeline.TimelineDomain in project hadoop by apache.
the class TestDistributedShell method checkTimelineV1.
private void checkTimelineV1(boolean haveDomain) throws Exception {
TimelineDomain domain = null;
if (haveDomain) {
domain = yarnCluster.getApplicationHistoryServer().getTimelineStore().getDomain("TEST_DOMAIN");
Assert.assertNotNull(domain);
Assert.assertEquals("reader_user reader_group", domain.getReaders());
Assert.assertEquals("writer_user writer_group", domain.getWriters());
}
TimelineEntities entitiesAttempts = yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString(), null, null, null, null, null, null, null, null, null);
Assert.assertNotNull(entitiesAttempts);
Assert.assertEquals(1, entitiesAttempts.getEntities().size());
Assert.assertEquals(2, entitiesAttempts.getEntities().get(0).getEvents().size());
Assert.assertEquals(entitiesAttempts.getEntities().get(0).getEntityType().toString(), ApplicationMaster.DSEntity.DS_APP_ATTEMPT.toString());
if (haveDomain) {
Assert.assertEquals(domain.getId(), entitiesAttempts.getEntities().get(0).getDomainId());
} else {
Assert.assertEquals("DEFAULT", entitiesAttempts.getEntities().get(0).getDomainId());
}
String currAttemptEntityId = entitiesAttempts.getEntities().get(0).getEntityId();
ApplicationAttemptId attemptId = ApplicationAttemptId.fromString(currAttemptEntityId);
NameValuePair primaryFilter = new NameValuePair(ApplicationMaster.APPID_TIMELINE_FILTER_NAME, attemptId.getApplicationId().toString());
TimelineEntities entities = yarnCluster.getApplicationHistoryServer().getTimelineStore().getEntities(ApplicationMaster.DSEntity.DS_CONTAINER.toString(), null, null, null, null, null, primaryFilter, null, null, null);
Assert.assertNotNull(entities);
Assert.assertEquals(2, entities.getEntities().size());
Assert.assertEquals(entities.getEntities().get(0).getEntityType().toString(), ApplicationMaster.DSEntity.DS_CONTAINER.toString());
if (haveDomain) {
Assert.assertEquals(domain.getId(), entities.getEntities().get(0).getDomainId());
} else {
Assert.assertEquals("DEFAULT", entities.getEntities().get(0).getDomainId());
}
}
use of org.apache.hadoop.yarn.api.records.timeline.TimelineDomain in project hadoop by apache.
the class TestTimelineClientForATS1_5 method testPutDomain.
@Test
public void testPutDomain() {
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance(appId, 1);
try {
TimelineDomain domain = generateDomain();
client.putDomain(null, domain);
verify(spyTimelineWriter, times(1)).putDomain(domain);
reset(spyTimelineWriter);
client.putDomain(attemptId1, domain);
verify(spyTimelineWriter, times(0)).putDomain(domain);
Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId1), "domainlog-" + attemptId1.toString())));
reset(spyTimelineWriter);
} catch (Exception e) {
Assert.fail("Exception is not expected." + e);
}
}
use of org.apache.hadoop.yarn.api.records.timeline.TimelineDomain in project hadoop by apache.
the class KeyValueBasedTimelineStore method put.
public void put(TimelineDomain domain) throws IOException {
if (getServiceStopped()) {
LOG.info("Service stopped, return null for the storage");
return;
}
TimelineDomain domainToReplace = domainById.get(domain.getId());
Long currentTimestamp = System.currentTimeMillis();
TimelineDomain domainToStore = KeyValueBasedTimelineStoreUtils.createTimelineDomain(domain.getId(), domain.getDescription(), domain.getOwner(), domain.getReaders(), domain.getWriters(), (domainToReplace == null ? currentTimestamp : domainToReplace.getCreatedTime()), currentTimestamp);
domainById.put(domainToStore.getId(), domainToStore);
Set<TimelineDomain> domainsByOneOwner = domainsByOwner.get(domainToStore.getOwner());
if (domainsByOneOwner == null) {
domainsByOneOwner = new HashSet<TimelineDomain>();
domainsByOwner.put(domainToStore.getOwner(), domainsByOneOwner);
}
if (domainToReplace != null) {
domainsByOneOwner.remove(domainToReplace);
}
domainsByOneOwner.add(domainToStore);
}
Aggregations