use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.
the class ApplicationHistoryManagerOnTimelineStore method convertToApplicationReport.
private static ApplicationReportExt convertToApplicationReport(TimelineEntity entity, ApplicationReportField field) {
String user = null;
String queue = null;
String name = null;
String type = null;
boolean unmanagedApplication = false;
long createdTime = 0;
long finishedTime = 0;
float progress = 0.0f;
int applicationPriority = 0;
ApplicationAttemptId latestApplicationAttemptId = null;
String diagnosticsInfo = null;
FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED;
YarnApplicationState state = YarnApplicationState.ACCEPTED;
ApplicationResourceUsageReport appResources = null;
Set<String> appTags = null;
Map<ApplicationAccessType, String> appViewACLs = new HashMap<ApplicationAccessType, String>();
String appNodeLabelExpression = null;
String amNodeLabelExpression = null;
Map<String, Object> entityInfo = entity.getOtherInfo();
if (entityInfo != null) {
if (entityInfo.containsKey(ApplicationMetricsConstants.USER_ENTITY_INFO)) {
user = entityInfo.get(ApplicationMetricsConstants.USER_ENTITY_INFO).toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO)) {
String appViewACLsStr = entityInfo.get(ApplicationMetricsConstants.APP_VIEW_ACLS_ENTITY_INFO).toString();
if (appViewACLsStr.length() > 0) {
appViewACLs.put(ApplicationAccessType.VIEW_APP, appViewACLsStr);
}
}
if (field == ApplicationReportField.USER_AND_ACLS) {
return new ApplicationReportExt(ApplicationReport.newInstance(ApplicationId.fromString(entity.getEntityId()), latestApplicationAttemptId, user, queue, name, null, -1, null, state, diagnosticsInfo, null, createdTime, finishedTime, finalStatus, null, null, progress, type, null, appTags, unmanagedApplication, Priority.newInstance(applicationPriority), appNodeLabelExpression, amNodeLabelExpression), appViewACLs);
}
if (entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
queue = entityInfo.get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO).toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.NAME_ENTITY_INFO)) {
name = entityInfo.get(ApplicationMetricsConstants.NAME_ENTITY_INFO).toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.TYPE_ENTITY_INFO)) {
type = entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO).toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.TYPE_ENTITY_INFO)) {
type = entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO).toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO)) {
unmanagedApplication = Boolean.parseBoolean(entityInfo.get(ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO).toString());
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO)) {
applicationPriority = Integer.parseInt(entityInfo.get(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO).toString());
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION)) {
appNodeLabelExpression = entityInfo.get(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION).toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION)) {
amNodeLabelExpression = entityInfo.get(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION).toString();
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_CPU_METRICS)) {
long vcoreSeconds = Long.parseLong(entityInfo.get(ApplicationMetricsConstants.APP_CPU_METRICS).toString());
long memorySeconds = Long.parseLong(entityInfo.get(ApplicationMetricsConstants.APP_MEM_METRICS).toString());
long preemptedMemorySeconds = Long.parseLong(entityInfo.get(ApplicationMetricsConstants.APP_MEM_PREEMPT_METRICS).toString());
long preemptedVcoreSeconds = Long.parseLong(entityInfo.get(ApplicationMetricsConstants.APP_CPU_PREEMPT_METRICS).toString());
appResources = ApplicationResourceUsageReport.newInstance(0, 0, null, null, null, memorySeconds, vcoreSeconds, 0, 0, preemptedMemorySeconds, preemptedVcoreSeconds);
}
if (entityInfo.containsKey(ApplicationMetricsConstants.APP_TAGS_INFO)) {
appTags = new HashSet<String>();
Object obj = entityInfo.get(ApplicationMetricsConstants.APP_TAGS_INFO);
if (obj != null && obj instanceof Collection<?>) {
for (Object o : (Collection<?>) obj) {
if (o != null) {
appTags.add(o.toString());
}
}
}
}
}
List<TimelineEvent> events = entity.getEvents();
long updatedTimeStamp = 0L;
if (events != null) {
for (TimelineEvent event : events) {
if (event.getEventType().equals(ApplicationMetricsConstants.CREATED_EVENT_TYPE)) {
createdTime = event.getTimestamp();
} else if (event.getEventType().equals(ApplicationMetricsConstants.UPDATED_EVENT_TYPE)) {
// before over writing.
if (event.getTimestamp() > updatedTimeStamp) {
updatedTimeStamp = event.getTimestamp();
} else {
continue;
}
Map<String, Object> eventInfo = event.getEventInfo();
if (eventInfo == null) {
continue;
}
applicationPriority = Integer.parseInt(eventInfo.get(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO).toString());
queue = eventInfo.get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO).toString();
} else if (event.getEventType().equals(ApplicationMetricsConstants.STATE_UPDATED_EVENT_TYPE)) {
Map<String, Object> eventInfo = event.getEventInfo();
if (eventInfo == null) {
continue;
}
if (eventInfo.containsKey(ApplicationMetricsConstants.STATE_EVENT_INFO)) {
if (!isFinalState(state)) {
state = YarnApplicationState.valueOf(eventInfo.get(ApplicationMetricsConstants.STATE_EVENT_INFO).toString());
}
}
} else if (event.getEventType().equals(ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
progress = 1.0F;
finishedTime = event.getTimestamp();
Map<String, Object> eventInfo = event.getEventInfo();
if (eventInfo == null) {
continue;
}
if (eventInfo.containsKey(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO)) {
latestApplicationAttemptId = ApplicationAttemptId.fromString(eventInfo.get(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO).toString());
}
if (eventInfo.containsKey(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)) {
diagnosticsInfo = eventInfo.get(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO).toString();
}
if (eventInfo.containsKey(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO)) {
finalStatus = FinalApplicationStatus.valueOf(eventInfo.get(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO).toString());
}
if (eventInfo.containsKey(ApplicationMetricsConstants.STATE_EVENT_INFO)) {
state = YarnApplicationState.valueOf(eventInfo.get(ApplicationMetricsConstants.STATE_EVENT_INFO).toString());
}
}
}
}
return new ApplicationReportExt(ApplicationReport.newInstance(ApplicationId.fromString(entity.getEntityId()), latestApplicationAttemptId, user, queue, name, null, -1, null, state, diagnosticsInfo, null, createdTime, finishedTime, finalStatus, appResources, null, progress, type, null, appTags, unmanagedApplication, Priority.newInstance(applicationPriority), appNodeLabelExpression, amNodeLabelExpression), appViewACLs);
}
use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.
the class TimelineACLsManager method putDomainIntoCache.
private AccessControlListExt putDomainIntoCache(TimelineDomain domain) {
Map<ApplicationAccessType, AccessControlList> acls = new HashMap<ApplicationAccessType, AccessControlList>(2);
acls.put(ApplicationAccessType.VIEW_APP, new AccessControlList(StringHelper.cjoin(domain.getReaders())));
acls.put(ApplicationAccessType.MODIFY_APP, new AccessControlList(StringHelper.cjoin(domain.getWriters())));
AccessControlListExt aclExt = new AccessControlListExt(domain.getOwner(), acls);
aclExts.put(domain.getId(), aclExt);
return aclExt;
}
use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.
the class ContainerManagerImpl method recoverApplication.
private void recoverApplication(ContainerManagerApplicationProto p) throws IOException {
ApplicationId appId = new ApplicationIdPBImpl(p.getId());
Credentials creds = new Credentials();
creds.readTokenStorageStream(new DataInputStream(p.getCredentials().newInput()));
List<ApplicationACLMapProto> aclProtoList = p.getAclsList();
Map<ApplicationAccessType, String> acls = new HashMap<ApplicationAccessType, String>(aclProtoList.size());
for (ApplicationACLMapProto aclProto : aclProtoList) {
acls.put(ProtoUtils.convertFromProtoFormat(aclProto.getAccessType()), aclProto.getAcl());
}
LogAggregationContext logAggregationContext = null;
if (p.getLogAggregationContext() != null) {
logAggregationContext = new LogAggregationContextPBImpl(p.getLogAggregationContext());
}
LOG.info("Recovering application " + appId);
//TODO: Recover flow and flow run ID
ApplicationImpl app = new ApplicationImpl(dispatcher, p.getUser(), appId, creds, context, p.getAppLogAggregationInitedTime());
context.getApplications().put(appId, app);
app.handle(new ApplicationInitEvent(appId, acls, logAggregationContext));
}
use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.
the class TestContainerManagerRecovery method testContainerResizeRecovery.
@Test
public void testContainerResizeRecovery() throws Exception {
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.setBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, true);
NMStateStoreService stateStore = new NMMemoryStateStoreService();
stateStore.init(conf);
stateStore.start();
Context context = createContext(conf, stateStore);
ContainerManagerImpl cm = createContainerManager(context, delSrvc);
cm.init(conf);
cm.start();
// add an application by starting a container
ApplicationId appId = ApplicationId.newInstance(0, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId cid = ContainerId.newContainerId(attemptId, 1);
Map<String, String> containerEnv = Collections.emptyMap();
Map<String, ByteBuffer> serviceData = Collections.emptyMap();
Credentials containerCreds = new Credentials();
DataOutputBuffer dob = new DataOutputBuffer();
containerCreds.writeTokenStorageToStream(dob);
ByteBuffer containerTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
Map<ApplicationAccessType, String> acls = Collections.emptyMap();
File tmpDir = new File("target", this.getClass().getSimpleName() + "-tmpDir");
File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
PrintWriter fileWriter = new PrintWriter(scriptFile);
if (Shell.WINDOWS) {
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
} else {
fileWriter.write("\numask 0");
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
FileContext localFS = FileContext.getLocalFSFileContext();
URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources = new HashMap<>();
localResources.put(destinationFile, rsrc_alpha);
List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
ContainerLaunchContext clc = ContainerLaunchContext.newInstance(localResources, containerEnv, commands, serviceData, containerTokens, acls);
StartContainersResponse startResponse = startContainer(context, cm, cid, clc, null);
assertTrue(startResponse.getFailedRequests().isEmpty());
assertEquals(1, context.getApplications().size());
Application app = context.getApplications().get(appId);
assertNotNull(app);
// make sure the container reaches RUNNING state
waitForNMContainerState(cm, cid, org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.RUNNING);
Resource targetResource = Resource.newInstance(2048, 2);
IncreaseContainersResourceResponse increaseResponse = increaseContainersResource(context, cm, cid, targetResource);
assertTrue(increaseResponse.getFailedRequests().isEmpty());
// check status
ContainerStatus containerStatus = getContainerStatus(context, cm, cid);
assertEquals(targetResource, containerStatus.getCapability());
// restart and verify container is running and recovered
// to the correct size
cm.stop();
context = createContext(conf, stateStore);
cm = createContainerManager(context);
cm.init(conf);
cm.start();
assertEquals(1, context.getApplications().size());
app = context.getApplications().get(appId);
assertNotNull(app);
containerStatus = getContainerStatus(context, cm, cid);
assertEquals(targetResource, containerStatus.getCapability());
}
use of org.apache.hadoop.yarn.api.records.ApplicationAccessType in project hadoop by apache.
the class TestAppLogAggregatorImpl method createAppLogAggregator.
private static AppLogAggregatorInTest createAppLogAggregator(ApplicationId applicationId, String rootLogDir, YarnConfiguration config, long recoveredLogInitedTimeMillis, DeletionService deletionServiceWithFilesToExpect) throws IOException {
final Dispatcher dispatcher = createNullDispatcher();
final NodeId nodeId = NodeId.newInstance("localhost", 0);
final String userId = "AppLogAggregatorTest";
final UserGroupInformation ugi = UserGroupInformation.createRemoteUser(userId);
final LocalDirsHandlerService dirsService = createLocalDirsHandlerService(config, rootLogDir);
final DeletionService deletionService = deletionServiceWithFilesToExpect;
final LogAggregationContext logAggregationContext = null;
final Map<ApplicationAccessType, String> appAcls = new HashMap<>();
final Context context = createContext(config);
final FileContext fakeLfs = mock(FileContext.class);
final Path remoteLogDirForApp = new Path(REMOTE_LOG_FILE.getAbsolutePath());
return new AppLogAggregatorInTest(dispatcher, deletionService, config, applicationId, ugi, nodeId, dirsService, remoteLogDirForApp, appAcls, logAggregationContext, context, fakeLfs, recoveredLogInitedTimeMillis);
}
Aggregations