use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class CleanerService method writeGlobalCleanerPidFile.
/**
* To ensure there are not multiple instances of the SCM running on a given
* cluster, a global pid file is used. This file contains the hostname of the
* machine that owns the pid file.
*
* @return true if the pid file was written, false otherwise
* @throws YarnException
*/
private boolean writeGlobalCleanerPidFile() throws YarnException {
String root = conf.get(YarnConfiguration.SHARED_CACHE_ROOT, YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
Path pidPath = new Path(root, GLOBAL_CLEANER_PID);
try {
FileSystem fs = FileSystem.get(this.conf);
if (fs.exists(pidPath)) {
return false;
}
FSDataOutputStream os = fs.create(pidPath, false);
// write the hostname and the process id in the global cleaner pid file
final String ID = ManagementFactory.getRuntimeMXBean().getName();
os.writeUTF(ID);
os.close();
// add it to the delete-on-exit to ensure it gets deleted when the JVM
// exits
fs.deleteOnExit(pidPath);
} catch (IOException e) {
throw new YarnException(e);
}
LOG.info("Created the global cleaner pid file at " + pidPath.toString());
return true;
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class RemoteAppChecker method getActiveApplications.
@Override
@Private
public Collection<ApplicationId> getActiveApplications() throws YarnException {
try {
List<ApplicationId> activeApps = new ArrayList<ApplicationId>();
List<ApplicationReport> apps = client.getApplications(ACTIVE_STATES);
for (ApplicationReport app : apps) {
activeApps.add(app.getApplicationId());
}
return activeApps;
} catch (IOException e) {
throw new YarnException(e);
}
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class TestTokenClientRMService method testTokenCancellationByWrongUser.
@Test
public void testTokenCancellationByWrongUser() {
// two sets to test -
// 1. try to cancel tokens of short and kerberos users as a kerberos UGI
// 2. try to cancel tokens of short and kerberos users as a simple auth UGI
RMContext rmContext = mock(RMContext.class);
final ClientRMService rmService = new ClientRMService(rmContext, null, null, null, null, dtsm);
UserGroupInformation[] kerbTestOwners = { owner, other, tester, ownerKerb, otherKerb };
UserGroupInformation[] kerbTestRenewers = { owner, other, ownerKerb, otherKerb };
for (final UserGroupInformation tokOwner : kerbTestOwners) {
for (final UserGroupInformation tokRenewer : kerbTestRenewers) {
try {
testerKerb.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
checkTokenCancellation(rmService, tokOwner, tokRenewer);
Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = " + tokRenewer.getUserName());
return null;
} catch (YarnException e) {
Assert.assertTrue(e.getMessage().contains(testerKerb.getUserName() + " is not authorized to cancel the token"));
return null;
}
}
});
} catch (Exception e) {
Assert.fail("Unexpected exception; " + e.getMessage());
}
}
}
UserGroupInformation[] simpleTestOwners = { owner, other, ownerKerb, otherKerb, testerKerb };
UserGroupInformation[] simpleTestRenewers = { owner, other, ownerKerb, otherKerb };
for (final UserGroupInformation tokOwner : simpleTestOwners) {
for (final UserGroupInformation tokRenewer : simpleTestRenewers) {
try {
tester.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
try {
checkTokenCancellation(tokOwner, tokRenewer);
Assert.fail("We should not reach here; token owner = " + tokOwner.getUserName() + ", renewer = " + tokRenewer.getUserName());
return null;
} catch (YarnException ex) {
Assert.assertTrue(ex.getMessage().contains(tester.getUserName() + " is not authorized to cancel the token"));
return null;
}
}
});
} catch (Exception e) {
Assert.fail("Unexpected exception; " + e.getMessage());
}
}
}
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class TestCapacitySchedulerNodeLabelUpdate method testMoveApplicationWithLabel.
@Test(timeout = 300000)
public void testMoveApplicationWithLabel() throws Exception {
// set node -> label
mgr.addToCluserNodeLabelsWithDefaultExclusivity(ImmutableSet.of("x", "y", "z"));
// set mapping:
// h1 -> x
// h2 -> y
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("x")));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h2", 0), toSet("y")));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h4", 0), toSet("z")));
// inject node label manager
MockRM rm = new MockRM(getConfigurationWithSubQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm.getRMContext().getContainerTokenSecretManager().rollMasterKey();
rm.getRMContext().getNMTokenSecretManager().rollMasterKey();
rm.getRMContext().setNodeLabelManager(mgr);
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 4096 * 2);
MockNM nm2 = rm.registerNode("h2:1234", 4096 * 2);
MockNM nm3 = rm.registerNode("h3:1234", 4096 * 2);
MockNM nm4 = rm.registerNode("h4:1234", 4096 * 2);
// launch an app to queue a1 (label = x), and check all container will
// be allocated in h1
RMApp app1 = rm.submitApp(GB, "app", "user", null, "a1");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm3);
am1.allocate("*", GB, 1, new ArrayList<ContainerId>(), "x");
ContainerId container1 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
rm.waitForState(nm1, container1, RMContainerState.ALLOCATED, 10 * 1000);
am1.allocate("*", GB, 1, new ArrayList<ContainerId>(), "y");
ContainerId container2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 3);
rm.waitForState(nm2, container2, RMContainerState.ALLOCATED, 10 * 1000);
CapacityScheduler scheduler = ((CapacityScheduler) rm.getResourceScheduler());
try {
scheduler.preValidateMoveApplication(app1.getApplicationId(), "a2");
scheduler.moveApplication(app1.getApplicationId(), "a2");
fail("Should throw exception since target queue doesnt have " + "required labels");
} catch (Exception e) {
Assert.assertTrue("Yarn Exception should be thrown", e instanceof YarnException);
Assert.assertEquals("Specified queue=a2 can't satisfy " + "following apps label expressions =[x] accessible " + "node labels =[y]", e.getMessage());
}
try {
scheduler.moveApplication(app1.getApplicationId(), "a3");
scheduler.moveApplication(app1.getApplicationId(), "a4");
// Check move to queue with accessible label ANY
scheduler.moveApplication(app1.getApplicationId(), "b");
} catch (Exception e) {
fail("Should not throw exception since target queue has " + "required labels");
}
rm.stop();
}
use of org.apache.hadoop.yarn.exceptions.YarnException in project hadoop by apache.
the class EntityGroupFSTimelineStore method getAppState.
/**
* Ask the RM for the state of the application.
* This method has to be synchronized to control traffic to RM
* @param appId application ID
* @param yarnClient
* @return the state or {@link AppState#UNKNOWN} if it could not
* be determined
* @throws IOException
*/
private static synchronized AppState getAppState(ApplicationId appId, YarnClient yarnClient) throws IOException {
AppState appState = AppState.ACTIVE;
try {
ApplicationReport report = yarnClient.getApplicationReport(appId);
YarnApplicationState yarnState = report.getYarnApplicationState();
if (APP_FINAL_STATES.contains(yarnState)) {
appState = AppState.COMPLETED;
}
} catch (ApplicationNotFoundException e) {
appState = AppState.UNKNOWN;
} catch (YarnException e) {
throw new IOException(e);
}
return appState;
}
Aggregations