use of org.apache.hadoop.yarn.webapp.NotFoundException in project hadoop by apache.
the class RMWebServices method getApp.
@GET
@Path("/apps/{appid}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public AppInfo getApp(@Context HttpServletRequest hsr, @PathParam("appid") String appId) {
init();
ApplicationId id = WebAppUtils.parseApplicationId(recordFactory, appId);
RMApp app = rm.getRMContext().getRMApps().get(id);
if (app == null) {
throw new NotFoundException("app with id: " + appId + " not found");
}
return new AppInfo(rm, app, hasAccess(app, hsr), hsr.getScheme() + "://");
}
use of org.apache.hadoop.yarn.webapp.NotFoundException in project hadoop by apache.
the class RMWebServices method getNode.
@GET
@Path("/nodes/{nodeId}")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public NodeInfo getNode(@PathParam("nodeId") String nodeId) {
init();
if (nodeId == null || nodeId.isEmpty()) {
throw new NotFoundException("nodeId, " + nodeId + ", is empty or null");
}
ResourceScheduler sched = this.rm.getResourceScheduler();
if (sched == null) {
throw new NotFoundException("Null ResourceScheduler instance");
}
NodeId nid = NodeId.fromString(nodeId);
RMNode ni = this.rm.getRMContext().getRMNodes().get(nid);
boolean isInactive = false;
if (ni == null) {
ni = this.rm.getRMContext().getInactiveRMNodes().get(nid);
if (ni == null) {
throw new NotFoundException("nodeId, " + nodeId + ", is not found");
}
isInactive = true;
}
NodeInfo nodeInfo = new NodeInfo(ni, sched);
if (isInactive) {
nodeInfo.setNodeHTTPAddress(EMPTY);
}
return nodeInfo;
}
use of org.apache.hadoop.yarn.webapp.NotFoundException in project hadoop by apache.
the class TimelineCollectorWebService method putEntities.
/**
* Accepts writes to the collector, and returns a response. It simply routes
* the request to the app level collector. It expects an application as a
* context.
*
* @param req Servlet request.
* @param res Servlet response.
* @param async flag indicating whether its an async put or not. "true"
* indicates, its an async call. If null, its considered false.
* @param appId Application Id to which the entities to be put belong to. If
* appId is not there or it cannot be parsed, HTTP 400 will be sent back.
* @param entities timeline entities to be put.
* @return a Response with appropriate HTTP status.
*/
@PUT
@Path("/entities")
@Consumes({ MediaType.APPLICATION_JSON })
public Response putEntities(@Context HttpServletRequest req, @Context HttpServletResponse res, @QueryParam("async") String async, @QueryParam("appid") String appId, TimelineEntities entities) {
init(res);
UserGroupInformation callerUgi = getUser(req);
if (callerUgi == null) {
String msg = "The owner of the posted timeline entities is not set";
LOG.error(msg);
throw new ForbiddenException(msg);
}
// TODO how to express async posts and handle them
boolean isAsync = async != null && async.trim().equalsIgnoreCase("true");
try {
ApplicationId appID = parseApplicationId(appId);
if (appID == null) {
return Response.status(Response.Status.BAD_REQUEST).build();
}
NodeTimelineCollectorManager collectorManager = (NodeTimelineCollectorManager) context.getAttribute(NodeTimelineCollectorManager.COLLECTOR_MANAGER_ATTR_KEY);
TimelineCollector collector = collectorManager.get(appID);
if (collector == null) {
LOG.error("Application: " + appId + " is not found");
// different exception?
throw new NotFoundException();
}
collector.putEntities(processTimelineEntities(entities), callerUgi);
return Response.ok().build();
} catch (Exception e) {
LOG.error("Error putting entities", e);
throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR);
}
}
use of org.apache.hadoop.yarn.webapp.NotFoundException in project hadoop by apache.
the class GenericEntityReader method lookupFlowContext.
/**
* Looks up flow context from AppToFlow table.
*
* @param appToFlowRowKey to identify Cluster and App Ids.
* @param hbaseConf HBase configuration.
* @param conn HBase Connection.
* @return flow context information.
* @throws IOException if any problem occurs while fetching flow information.
*/
protected FlowContext lookupFlowContext(AppToFlowRowKey appToFlowRowKey, Configuration hbaseConf, Connection conn) throws IOException {
byte[] rowKey = appToFlowRowKey.getRowKey();
Get get = new Get(rowKey);
Result result = appToFlowTable.getResult(hbaseConf, conn, get);
if (result != null && !result.isEmpty()) {
return new FlowContext(AppToFlowColumn.USER_ID.readResult(result).toString(), AppToFlowColumn.FLOW_ID.readResult(result).toString(), ((Number) AppToFlowColumn.FLOW_RUN_ID.readResult(result)).longValue());
} else {
throw new NotFoundException("Unable to find the context flow ID and flow run ID for clusterId=" + appToFlowRowKey.getClusterId() + ", appId=" + appToFlowRowKey.getAppId());
}
}
use of org.apache.hadoop.yarn.webapp.NotFoundException in project hadoop by apache.
the class AHSWebServices method getContainerLogsInfo.
// TODO: YARN-6080: Create WebServiceUtils to have common functions used in
// RMWebService, NMWebService and AHSWebService.
/**
* Returns log file's name as well as current file size for a container.
*
* @param req
* HttpServletRequest
* @param res
* HttpServletResponse
* @param containerIdStr
* The container ID
* @param nmId
* The Node Manager NodeId
* @param redirected_from_node
* Whether this is a redirected request from NM
* @return
* The log file's name and current file size
*/
@GET
@Path("/containers/{containerid}/logs")
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
public Response getContainerLogsInfo(@Context HttpServletRequest req, @Context HttpServletResponse res, @PathParam(YarnWebServiceParams.CONTAINER_ID) String containerIdStr, @QueryParam(YarnWebServiceParams.NM_ID) String nmId, @QueryParam(YarnWebServiceParams.REDIRECTED_FROM_NODE) @DefaultValue("false") boolean redirected_from_node) {
ContainerId containerId = null;
init(res);
try {
containerId = ContainerId.fromString(containerIdStr);
} catch (IllegalArgumentException e) {
throw new BadRequestException("invalid container id, " + containerIdStr);
}
ApplicationId appId = containerId.getApplicationAttemptId().getApplicationId();
AppInfo appInfo;
try {
appInfo = super.getApp(req, res, appId.toString());
} catch (Exception ex) {
// directly find logs from HDFS.
return getContainerLogMeta(appId, null, null, containerIdStr, false);
}
// from HDFS.
if (isFinishedState(appInfo.getAppState())) {
return getContainerLogMeta(appId, null, null, containerIdStr, false);
}
if (isRunningState(appInfo.getAppState())) {
String appOwner = appInfo.getUser();
String nodeHttpAddress = null;
if (nmId != null && !nmId.isEmpty()) {
try {
nodeHttpAddress = getNMWebAddressFromRM(conf, nmId);
} catch (Exception ex) {
if (LOG.isDebugEnabled()) {
LOG.debug(ex.getMessage());
}
}
}
if (nodeHttpAddress == null || nodeHttpAddress.isEmpty()) {
ContainerInfo containerInfo;
try {
containerInfo = super.getContainer(req, res, appId.toString(), containerId.getApplicationAttemptId().toString(), containerId.toString());
} catch (Exception ex) {
// It will also return empty log meta for the local logs.
return getContainerLogMeta(appId, appOwner, null, containerIdStr, true);
}
nodeHttpAddress = containerInfo.getNodeHttpAddress();
// re-directing the request
if (nodeHttpAddress == null || nodeHttpAddress.isEmpty() || redirected_from_node) {
// re-direct the request back. Simply output the aggregated log meta.
return getContainerLogMeta(appId, appOwner, null, containerIdStr, true);
}
}
String uri = "/" + containerId.toString() + "/logs";
String resURI = JOINER.join(getAbsoluteNMWebAddress(nodeHttpAddress), NM_DOWNLOAD_URI_STR, uri);
String query = req.getQueryString();
if (query != null && !query.isEmpty()) {
resURI += "?" + query;
}
ResponseBuilder response = Response.status(HttpServletResponse.SC_TEMPORARY_REDIRECT);
response.header("Location", resURI);
return response.build();
} else {
throw new NotFoundException("The application is not at Running or Finished State.");
}
}
Aggregations