use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestTimelineClientForATS1_5 method testPostEntities.
@Test
public void testPostEntities() throws Exception {
ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
TimelineEntityGroupId groupId = TimelineEntityGroupId.newInstance(appId, "1");
TimelineEntityGroupId groupId2 = TimelineEntityGroupId.newInstance(appId, "2");
// Create two entities, includes an entity type and a summary type
TimelineEntity[] entities = new TimelineEntity[2];
entities[0] = generateEntity("entity_type");
entities[1] = generateEntity("summary_type");
try {
// if attemptid is null, fall back to the original putEntities call, and
// save the entity
// into configured levelDB store
client.putEntities(null, null, entities);
verify(spyTimelineWriter, times(1)).putEntities(entities);
reset(spyTimelineWriter);
// if the attemptId is specified, but groupId is given as null, it would
// fall back to the original putEntities call if we have the entity type.
// the entity which is summary type would be written into FS
ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance(appId, 1);
client.putEntities(attemptId1, null, entities);
TimelineEntity[] entityTDB = new TimelineEntity[1];
entityTDB[0] = entities[0];
verify(spyTimelineWriter, times(1)).putEntities(entityTDB);
Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId1), "summarylog-" + attemptId1.toString())));
reset(spyTimelineWriter);
// if we specified attemptId as well as groupId, it would save the entity
// into
// FileSystem instead of levelDB store
ApplicationAttemptId attemptId2 = ApplicationAttemptId.newInstance(appId, 2);
client.putEntities(attemptId2, groupId, entities);
client.putEntities(attemptId2, groupId2, entities);
verify(spyTimelineWriter, times(0)).putEntities(any(TimelineEntity[].class));
Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId2), "summarylog-" + attemptId2.toString())));
Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId2), "entitylog-" + groupId.toString())));
Assert.assertTrue(localFS.util().exists(new Path(getAppAttemptDir(attemptId2), "entitylog-" + groupId2.toString())));
reset(spyTimelineWriter);
} catch (Exception e) {
Assert.fail("Exception is not expected. " + e);
}
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestTimelineClientV2Impl method createTimelineClient.
private TestV2TimelineClient createTimelineClient(YarnConfiguration config) {
ApplicationId id = ApplicationId.newInstance(0, 0);
TestV2TimelineClient tc = new TestV2TimelineClient(id);
tc.init(config);
tc.start();
return tc;
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestApplicatonReport method createApplicationReport.
protected static ApplicationReport createApplicationReport(int appIdInt, int appAttemptIdInt, long timestamp) {
ApplicationId appId = ApplicationId.newInstance(timestamp, appIdInt);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, appAttemptIdInt);
ApplicationReport appReport = ApplicationReport.newInstance(appId, appAttemptId, "user", "queue", "appname", "host", 124, null, YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null, null, false, Priority.newInstance(0), "", "");
return appReport;
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class NodeStatusUpdaterImpl method getContainerStatuses.
// Iterate through the NMContext and clone and get all the containers'
// statuses. If it's a completed container, add into the
// recentlyStoppedContainers collections.
@VisibleForTesting
protected List<ContainerStatus> getContainerStatuses() throws IOException {
List<ContainerStatus> containerStatuses = new ArrayList<ContainerStatus>();
for (Container container : this.context.getContainers().values()) {
ContainerId containerId = container.getContainerId();
ApplicationId applicationId = containerId.getApplicationAttemptId().getApplicationId();
org.apache.hadoop.yarn.api.records.ContainerStatus containerStatus = container.cloneAndGetContainerStatus();
if (containerStatus.getState() == ContainerState.COMPLETE) {
if (isApplicationStopped(applicationId)) {
if (LOG.isDebugEnabled()) {
LOG.debug(applicationId + " is completing, " + " remove " + containerId + " from NM context.");
}
context.getContainers().remove(containerId);
pendingCompletedContainers.put(containerId, containerStatus);
} else {
if (!isContainerRecentlyStopped(containerId)) {
pendingCompletedContainers.put(containerId, containerStatus);
}
}
// Adding to finished containers cache. Cache will keep it around at
// least for #durationToTrackStoppedContainers duration. In the
// subsequent call to stop container it will get removed from cache.
addCompletedContainer(containerId);
} else {
containerStatuses.add(containerStatus);
}
}
containerStatuses.addAll(pendingCompletedContainers.values());
if (LOG.isDebugEnabled()) {
LOG.debug("Sending out " + containerStatuses.size() + " container statuses: " + containerStatuses);
}
return containerStatuses;
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class NodeStatusUpdaterImpl method getNMContainerStatuses.
// These NMContainerStatus are sent on NM registration and used by YARN only.
private List<NMContainerStatus> getNMContainerStatuses() throws IOException {
List<NMContainerStatus> containerStatuses = new ArrayList<NMContainerStatus>();
for (Container container : this.context.getContainers().values()) {
ContainerId containerId = container.getContainerId();
ApplicationId applicationId = containerId.getApplicationAttemptId().getApplicationId();
if (!this.context.getApplications().containsKey(applicationId)) {
context.getContainers().remove(containerId);
continue;
}
NMContainerStatus status = container.getNMContainerStatus();
containerStatuses.add(status);
if (status.getContainerState() == ContainerState.COMPLETE) {
// Adding to finished containers cache. Cache will keep it around at
// least for #durationToTrackStoppedContainers duration. In the
// subsequent call to stop container it will get removed from cache.
addCompletedContainer(containerId);
}
}
LOG.info("Sending out " + containerStatuses.size() + " NM container statuses: " + containerStatuses);
return containerStatuses;
}
Aggregations