use of org.apache.hadoop.yarn.api.records.SerializedException in project hadoop by apache.
the class TestContainerManagerSecurity method startContainer.
private void startContainer(final YarnRPC rpc, org.apache.hadoop.yarn.api.records.Token nmToken, org.apache.hadoop.yarn.api.records.Token containerToken, NodeId nodeId, String user) throws Exception {
ContainerLaunchContext context = Records.newRecord(ContainerLaunchContext.class);
StartContainerRequest scRequest = StartContainerRequest.newInstance(context, containerToken);
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
ContainerManagementProtocol proxy = null;
try {
proxy = getContainerManagementProtocolProxy(rpc, nmToken, nodeId, user);
StartContainersResponse response = proxy.startContainers(allRequests);
for (SerializedException ex : response.getFailedRequests().values()) {
parseAndThrowException(ex.deSerialize());
}
} finally {
if (proxy != null) {
rpc.stopProxy(proxy, conf);
}
}
}
use of org.apache.hadoop.yarn.api.records.SerializedException in project hadoop by apache.
the class ContainerManagerImpl method getContainerStatuses.
/**
* Get a list of container statuses running on this NodeManager
*/
@Override
public GetContainerStatusesResponse getContainerStatuses(GetContainerStatusesRequest request) throws YarnException, IOException {
List<ContainerStatus> succeededRequests = new ArrayList<ContainerStatus>();
Map<ContainerId, SerializedException> failedRequests = new HashMap<ContainerId, SerializedException>();
UserGroupInformation remoteUgi = getRemoteUgi();
NMTokenIdentifier identifier = selectNMTokenIdentifier(remoteUgi);
if (identifier == null) {
throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG);
}
for (ContainerId id : request.getContainerIds()) {
try {
ContainerStatus status = getContainerStatusInternal(id, identifier);
succeededRequests.add(status);
} catch (YarnException e) {
failedRequests.put(id, SerializedException.newInstance(e));
}
}
return GetContainerStatusesResponse.newInstance(succeededRequests, failedRequests);
}
use of org.apache.hadoop.yarn.api.records.SerializedException in project hadoop by apache.
the class StopContainersResponsePBImpl method initFailedRequests.
private void initFailedRequests() {
if (this.failedRequests != null) {
return;
}
StopContainersResponseProtoOrBuilder p = viaProto ? proto : builder;
List<ContainerExceptionMapProto> protoList = p.getFailedRequestsList();
this.failedRequests = new HashMap<ContainerId, SerializedException>();
for (ContainerExceptionMapProto ce : protoList) {
this.failedRequests.put(convertFromProtoFormat(ce.getContainerId()), convertFromProtoFormat(ce.getException()));
}
}
use of org.apache.hadoop.yarn.api.records.SerializedException in project hadoop by apache.
the class SerializedExceptionPBImpl method deSerialize.
@SuppressWarnings("unchecked")
@Override
public Throwable deSerialize() {
SerializedException cause = getCause();
SerializedExceptionProtoOrBuilder p = viaProto ? proto : builder;
Class<?> realClass = null;
try {
realClass = Class.forName(p.getClassName());
} catch (ClassNotFoundException e) {
throw new YarnRuntimeException(e);
}
Class classType = null;
if (YarnException.class.isAssignableFrom(realClass)) {
classType = YarnException.class;
} else if (IOException.class.isAssignableFrom(realClass)) {
classType = IOException.class;
} else if (RuntimeException.class.isAssignableFrom(realClass)) {
classType = RuntimeException.class;
} else {
classType = Throwable.class;
}
return instantiateException(realClass.asSubclass(classType), getMessage(), cause == null ? null : cause.deSerialize());
}
use of org.apache.hadoop.yarn.api.records.SerializedException in project hadoop by apache.
the class TestContainerManager method testIncreaseContainerResourceWithInvalidResource.
@Test
public void testIncreaseContainerResourceWithInvalidResource() throws Exception {
containerManager.start();
File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
PrintWriter fileWriter = new PrintWriter(scriptFile);
// Construct the Container-id
ContainerId cId = createContainerId(0);
if (Shell.WINDOWS) {
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
} else {
fileWriter.write("\numask 0");
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
URL resource_alpha = URL.fromPath(localFS.makeQualified(new Path(scriptFile.getAbsolutePath())));
LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class);
rsrc_alpha.setResource(resource_alpha);
rsrc_alpha.setSize(-1);
rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION);
rsrc_alpha.setType(LocalResourceType.FILE);
rsrc_alpha.setTimestamp(scriptFile.lastModified());
String destinationFile = "dest_file";
Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
localResources.put(destinationFile, rsrc_alpha);
containerLaunchContext.setLocalResources(localResources);
List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest = StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, context.getContainerTokenSecretManager()));
List<StartContainerRequest> list = new ArrayList<>();
list.add(scRequest);
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
// Make sure the container reaches RUNNING state
BaseContainerManagerTest.waitForNMContainerState(containerManager, cId, org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState.RUNNING);
// Construct container resource increase request,
List<Token> increaseTokens = new ArrayList<>();
// Add increase request. The increase request should fail
// as the current resource does not fit in the target resource
Token containerToken = createContainerToken(cId, DUMMY_RM_IDENTIFIER, context.getNodeId(), user, Resource.newInstance(512, 1), context.getContainerTokenSecretManager(), null);
increaseTokens.add(containerToken);
IncreaseContainersResourceRequest increaseRequest = IncreaseContainersResourceRequest.newInstance(increaseTokens);
IncreaseContainersResourceResponse increaseResponse = containerManager.increaseContainersResource(increaseRequest);
// Check response
Assert.assertEquals(0, increaseResponse.getSuccessfullyIncreasedContainers().size());
Assert.assertEquals(1, increaseResponse.getFailedRequests().size());
for (Map.Entry<ContainerId, SerializedException> entry : increaseResponse.getFailedRequests().entrySet()) {
if (cId.equals(entry.getKey())) {
Assert.assertNotNull("Failed message", entry.getValue().getMessage());
Assert.assertTrue(entry.getValue().getMessage().contains("The target resource " + Resource.newInstance(512, 1).toString() + " is smaller than the current resource " + Resource.newInstance(1024, 1)));
} else {
throw new YarnException("Received failed request from wrong" + " container: " + entry.getKey().toString());
}
}
}
Aggregations