use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class TestYarnClient method testBestEffortTimelineDelegationToken.
@Test
public void testBestEffortTimelineDelegationToken() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
YarnClientImpl client = spy(new YarnClientImpl() {
@Override
TimelineClient createTimelineClient() throws IOException, YarnException {
timelineClient = mock(TimelineClient.class);
when(timelineClient.getDelegationToken(any(String.class))).thenThrow(new IOException("Best effort test exception"));
return timelineClient;
}
});
client.init(conf);
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, true);
client.serviceInit(conf);
client.getTimelineDelegationToken();
try {
conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_CLIENT_BEST_EFFORT, false);
client.serviceInit(conf);
client.getTimelineDelegationToken();
Assert.fail("Get delegation token should have thrown an exception");
} catch (Exception e) {
// Success
}
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class TestYarnClient method testCreateReservation.
@Test
public void testCreateReservation() throws Exception {
MiniYARNCluster cluster = setupMiniYARNCluster();
YarnClient client = setupYarnClient(cluster);
try {
Clock clock = new UTCClock();
long arrival = clock.getTime();
long duration = 60000;
long deadline = (long) (arrival + 1.05 * duration);
ReservationSubmissionRequest sRequest = submitReservationTestHelper(client, arrival, deadline, duration);
// Submit the reservation again with the same request and make sure it
// passes.
client.submitReservation(sRequest);
// Submit the reservation with the same reservation id but different
// reservation definition, and ensure YarnException is thrown.
arrival = clock.getTime();
ReservationDefinition rDef = sRequest.getReservationDefinition();
rDef.setArrival(arrival + duration);
sRequest.setReservationDefinition(rDef);
try {
client.submitReservation(sRequest);
Assert.fail("Reservation submission should fail if a duplicate " + "reservation id is used, but the reservation definition has been " + "updated.");
} catch (Exception e) {
Assert.assertTrue(e instanceof YarnException);
}
} finally {
// clean-up
if (client != null) {
client.stop();
}
cluster.stop();
}
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class ContainerManagerImpl method startContainerInternal.
@SuppressWarnings("unchecked")
protected void startContainerInternal(ContainerTokenIdentifier containerTokenIdentifier, StartContainerRequest request) throws YarnException, IOException {
ContainerId containerId = containerTokenIdentifier.getContainerID();
String containerIdStr = containerId.toString();
String user = containerTokenIdentifier.getApplicationSubmitter();
LOG.info("Start request for " + containerIdStr + " by user " + user);
ContainerLaunchContext launchContext = request.getContainerLaunchContext();
Credentials credentials = YarnServerSecurityUtils.parseCredentials(launchContext);
Container container = new ContainerImpl(getConfig(), this.dispatcher, launchContext, credentials, metrics, containerTokenIdentifier, context);
ApplicationId applicationID = containerId.getApplicationAttemptId().getApplicationId();
if (context.getContainers().putIfAbsent(containerId, container) != null) {
NMAuditLogger.logFailure(user, AuditConstants.START_CONTAINER, "ContainerManagerImpl", "Container already running on this node!", applicationID, containerId);
throw RPCUtil.getRemoteException("Container " + containerIdStr + " already is running on this node!!");
}
this.readLock.lock();
try {
if (!isServiceStopped()) {
// Create the application
// populate the flow context from the launch context if the timeline
// service v.2 is enabled
FlowContext flowContext = null;
if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
String flowName = launchContext.getEnvironment().get(TimelineUtils.FLOW_NAME_TAG_PREFIX);
String flowVersion = launchContext.getEnvironment().get(TimelineUtils.FLOW_VERSION_TAG_PREFIX);
String flowRunIdStr = launchContext.getEnvironment().get(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
long flowRunId = 0L;
if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
flowRunId = Long.parseLong(flowRunIdStr);
}
flowContext = new FlowContext(flowName, flowVersion, flowRunId);
}
if (!context.getApplications().containsKey(applicationID)) {
Application application = new ApplicationImpl(dispatcher, user, flowContext, applicationID, credentials, context);
if (context.getApplications().putIfAbsent(applicationID, application) == null) {
LOG.info("Creating a new application reference for app " + applicationID);
LogAggregationContext logAggregationContext = containerTokenIdentifier.getLogAggregationContext();
Map<ApplicationAccessType, String> appAcls = container.getLaunchContext().getApplicationACLs();
context.getNMStateStore().storeApplication(applicationID, buildAppProto(applicationID, user, credentials, appAcls, logAggregationContext));
dispatcher.getEventHandler().handle(new ApplicationInitEvent(applicationID, appAcls, logAggregationContext));
}
}
this.context.getNMStateStore().storeContainer(containerId, containerTokenIdentifier.getVersion(), request);
dispatcher.getEventHandler().handle(new ApplicationContainerInitEvent(container));
this.context.getContainerTokenSecretManager().startContainerSuccessful(containerTokenIdentifier);
NMAuditLogger.logSuccess(user, AuditConstants.START_CONTAINER, "ContainerManageImpl", applicationID, containerId);
// TODO launchedContainer misplaced -> doesn't necessarily mean a container
// launch. A finished Application will not launch containers.
metrics.launchedContainer();
metrics.allocateContainer(containerTokenIdentifier.getResource());
} else {
throw new YarnException("Container start failed as the NodeManager is " + "in the process of shutting down");
}
} finally {
this.readLock.unlock();
}
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class ContainerManagerImpl method preReInitializeOrLocalizeCheck.
private Container preReInitializeOrLocalizeCheck(ContainerId containerId, ReInitOp op) throws YarnException {
UserGroupInformation remoteUgi = getRemoteUgi();
NMTokenIdentifier nmTokenIdentifier = selectNMTokenIdentifier(remoteUgi);
authorizeUser(remoteUgi, nmTokenIdentifier);
if (!nmTokenIdentifier.getApplicationAttemptId().getApplicationId().equals(containerId.getApplicationAttemptId().getApplicationId())) {
throw new YarnException("ApplicationMaster not authorized to perform " + "[" + op + "] on Container [" + containerId + "]!!");
}
Container container = context.getContainers().get(containerId);
if (container == null) {
throw new YarnException("Specified " + containerId + " does not exist!");
}
if (!container.isRunning() || container.isReInitializing()) {
throw new YarnException("Cannot perform " + op + " on [" + containerId + "]. Current state is [" + container.getContainerState() + ", " + "isReInitializing=" + container.isReInitializing() + "].");
}
return container;
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class ContainerManagerImpl method getContainerStatuses.
/**
* Get a list of container statuses running on this NodeManager
*/
@Override
public GetContainerStatusesResponse getContainerStatuses(GetContainerStatusesRequest request) throws YarnException, IOException {
List<ContainerStatus> succeededRequests = new ArrayList<ContainerStatus>();
Map<ContainerId, SerializedException> failedRequests = new HashMap<ContainerId, SerializedException>();
UserGroupInformation remoteUgi = getRemoteUgi();
NMTokenIdentifier identifier = selectNMTokenIdentifier(remoteUgi);
if (identifier == null) {
throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG);
}
for (ContainerId id : request.getContainerIds()) {
try {
ContainerStatus status = getContainerStatusInternal(id, identifier);
succeededRequests.add(status);
} catch (YarnException e) {
failedRequests.put(id, SerializedException.newInstance(e));
}
}
return GetContainerStatusesResponse.newInstance(succeededRequests, failedRequests);
}
Aggregations