use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestApplicationACLs method verifyFriendAccess.
private void verifyFriendAccess() throws Exception {
AccessControlList viewACL = new AccessControlList("");
viewACL.addGroup(FRIENDLY_GROUP);
AccessControlList modifyACL = new AccessControlList("");
modifyACL.addUser(FRIEND);
ApplicationId applicationId = submitAppAndGetAppId(viewACL, modifyACL);
final GetApplicationReportRequest appReportRequest = recordFactory.newRecordInstance(GetApplicationReportRequest.class);
appReportRequest.setApplicationId(applicationId);
final KillApplicationRequest finishAppRequest = recordFactory.newRecordInstance(KillApplicationRequest.class);
finishAppRequest.setApplicationId(applicationId);
ApplicationClientProtocol friendClient = getRMClientForUser(FRIEND);
// View as the friend
friendClient.getApplicationReport(appReportRequest);
// List apps as friend
Assert.assertEquals("App view by a friend should list the apps!!", 3, friendClient.getApplications(recordFactory.newRecordInstance(GetApplicationsRequest.class)).getApplicationList().size());
// Kill app as the friend
friendClient.forceKillApplication(finishAppRequest);
resourceManager.waitForState(applicationId, RMAppState.KILLED);
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestApplicationACLs method verifyEnemyAccess.
private void verifyEnemyAccess() throws Exception {
AccessControlList viewACL = new AccessControlList("");
viewACL.addGroup(FRIENDLY_GROUP);
AccessControlList modifyACL = new AccessControlList("");
modifyACL.addUser(FRIEND);
ApplicationId applicationId = submitAppAndGetAppId(viewACL, modifyACL);
final GetApplicationReportRequest appReportRequest = recordFactory.newRecordInstance(GetApplicationReportRequest.class);
appReportRequest.setApplicationId(applicationId);
final KillApplicationRequest finishAppRequest = recordFactory.newRecordInstance(KillApplicationRequest.class);
finishAppRequest.setApplicationId(applicationId);
ApplicationClientProtocol enemyRmClient = getRMClientForUser(ENEMY);
// View as the enemy
ApplicationReport appReport = enemyRmClient.getApplicationReport(appReportRequest).getApplicationReport();
verifyEnemyAppReport(appReport);
// List apps as enemy
List<ApplicationReport> appReports = enemyRmClient.getApplications(recordFactory.newRecordInstance(GetApplicationsRequest.class)).getApplicationList();
Assert.assertEquals("App view by enemy should list the apps!!", 4, appReports.size());
for (ApplicationReport report : appReports) {
verifyEnemyAppReport(report);
}
// Kill app as the enemy
try {
enemyRmClient.forceKillApplication(finishAppRequest);
Assert.fail("App killing by the enemy should fail!!");
} catch (YarnException e) {
LOG.info("Got exception while killing app as the enemy", e);
Assert.assertTrue(e.getMessage().contains("User enemy cannot perform operation MODIFY_APP on " + applicationId));
}
rmClient.forceKillApplication(finishAppRequest);
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestMRJobs method testThreadDumpOnTaskTimeout.
@Test(timeout = 120000)
public void testThreadDumpOnTaskTimeout() throws IOException, InterruptedException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
final SleepJob sleepJob = new SleepJob();
final JobConf sleepConf = new JobConf(mrCluster.getConfig());
sleepConf.setLong(MRJobConfig.TASK_TIMEOUT, 3 * 1000L);
sleepConf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1);
sleepJob.setConf(sleepConf);
if (this instanceof TestUberAM) {
sleepConf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS, 30 * 1000);
}
// sleep for 10 seconds to trigger a kill with thread dump
final Job job = sleepJob.createJob(1, 0, 10 * 60 * 1000L, 1, 0L, 0);
job.setJarByClass(SleepJob.class);
// The AppMaster jar itself.
job.addFileToClassPath(APP_JAR);
job.waitForCompletion(true);
final JobId jobId = TypeConverter.toYarn(job.getJobID());
final ApplicationId appID = jobId.getAppId();
int pollElapsed = 0;
while (true) {
Thread.sleep(1000);
pollElapsed += 1000;
if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
// Job finished, verify logs
//
final String appIdStr = appID.toString();
final String appIdSuffix = appIdStr.substring("application_".length(), appIdStr.length());
final String containerGlob = "container_" + appIdSuffix + "_*_*";
final String syslogGlob = appIdStr + Path.SEPARATOR + containerGlob + Path.SEPARATOR + TaskLog.LogName.SYSLOG;
int numAppMasters = 0;
int numMapTasks = 0;
for (int i = 0; i < NUM_NODE_MGRS; i++) {
final Configuration nmConf = mrCluster.getNodeManager(i).getConfig();
for (String logDir : nmConf.getTrimmedStrings(YarnConfiguration.NM_LOG_DIRS)) {
final Path absSyslogGlob = new Path(logDir + Path.SEPARATOR + syslogGlob);
LOG.info("Checking for glob: " + absSyslogGlob);
for (FileStatus syslog : localFs.globStatus(absSyslogGlob)) {
boolean foundAppMaster = false;
boolean foundThreadDump = false;
// Determine the container type
final BufferedReader syslogReader = new BufferedReader(new InputStreamReader(localFs.open(syslog.getPath())));
try {
for (String line; (line = syslogReader.readLine()) != null; ) {
if (line.contains(MRAppMaster.class.getName())) {
foundAppMaster = true;
break;
}
}
} finally {
syslogReader.close();
}
// Check for thread dump in stdout
final Path stdoutPath = new Path(syslog.getPath().getParent(), TaskLog.LogName.STDOUT.toString());
final BufferedReader stdoutReader = new BufferedReader(new InputStreamReader(localFs.open(stdoutPath)));
try {
for (String line; (line = stdoutReader.readLine()) != null; ) {
if (line.contains("Full thread dump")) {
foundThreadDump = true;
break;
}
}
} finally {
stdoutReader.close();
}
if (foundAppMaster) {
numAppMasters++;
if (this instanceof TestUberAM) {
Assert.assertTrue("No thread dump", foundThreadDump);
} else {
Assert.assertFalse("Unexpected thread dump", foundThreadDump);
}
} else {
numMapTasks++;
Assert.assertTrue("No thread dump", foundThreadDump);
}
}
}
}
// Make sure we checked non-empty set
//
Assert.assertEquals("No AppMaster log found!", 1, numAppMasters);
if (sleepConf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false)) {
Assert.assertSame("MapTask log with uber found!", 0, numMapTasks);
} else {
Assert.assertSame("No MapTask log found!", 1, numMapTasks);
}
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestMRJobsWithHistoryService method testJobHistoryData.
@Test(timeout = 90000)
public void testJobHistoryData() throws IOException, InterruptedException, AvroRemoteException, ClassNotFoundException {
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR + " not found. Not running test.");
return;
}
SleepJob sleepJob = new SleepJob();
sleepJob.setConf(mrCluster.getConfig());
// Job with 3 maps and 2 reduces
Job job = sleepJob.createJob(3, 2, 1000, 1, 500, 1);
job.setJarByClass(SleepJob.class);
// The AppMaster jar itself.
job.addFileToClassPath(APP_JAR);
job.waitForCompletion(true);
Counters counterMR = job.getCounters();
JobId jobId = TypeConverter.toYarn(job.getJobID());
ApplicationId appID = jobId.getAppId();
int pollElapsed = 0;
while (true) {
Thread.sleep(1000);
pollElapsed += 1000;
if (TERMINAL_RM_APP_STATES.contains(mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState())) {
break;
}
if (pollElapsed >= 60000) {
LOG.warn("application did not reach terminal state within 60 seconds");
break;
}
}
Assert.assertEquals(RMAppState.FINISHED, mrCluster.getResourceManager().getRMContext().getRMApps().get(appID).getState());
Counters counterHS = job.getCounters();
//TODO the Assert below worked. need to check
//Should we compare each field or convert to V2 counter and compare
LOG.info("CounterHS " + counterHS);
LOG.info("CounterMR " + counterMR);
Assert.assertEquals(counterHS, counterMR);
HSClientProtocol historyClient = instantiateHistoryProxy();
GetJobReportRequest gjReq = Records.newRecord(GetJobReportRequest.class);
gjReq.setJobId(jobId);
JobReport jobReport = historyClient.getJobReport(gjReq).getJobReport();
verifyJobReport(jobReport, jobId);
}
use of org.apache.hadoop.yarn.api.records.ApplicationId in project hadoop by apache.
the class TestShuffleHandler method testRecoveryFromOtherVersions.
@Test
public void testRecoveryFromOtherVersions() throws IOException {
final String user = "someuser";
final ApplicationId appId = ApplicationId.newInstance(12345, 1);
final File tmpDir = new File(System.getProperty("test.build.data", System.getProperty("java.io.tmpdir")), TestShuffleHandler.class.getName());
Configuration conf = new Configuration();
conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
ShuffleHandler shuffle = new ShuffleHandler();
// emulate aux services startup with recovery enabled
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
tmpDir.mkdirs();
try {
shuffle.init(conf);
shuffle.start();
// setup a shuffle token for an application
DataOutputBuffer outputBuffer = new DataOutputBuffer();
outputBuffer.reset();
Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text(user), new Text("shuffleService"));
jt.write(outputBuffer);
shuffle.initializeApplication(new ApplicationInitializationContext(user, appId, ByteBuffer.wrap(outputBuffer.getData(), 0, outputBuffer.getLength())));
// verify we are authorized to shuffle
int rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
// emulate shuffle handler restart
shuffle.close();
shuffle = new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
// verify we are still authorized to shuffle to the old application
rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
Version version = Version.newInstance(1, 0);
Assert.assertEquals(version, shuffle.getCurrentVersion());
// emulate shuffle handler restart with compatible version
Version version11 = Version.newInstance(1, 1);
// update version info before close shuffle
shuffle.storeVersion(version11);
Assert.assertEquals(version11, shuffle.loadVersion());
shuffle.close();
shuffle = new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
shuffle.start();
// shuffle version will be override by CURRENT_VERSION_INFO after restart
// successfully.
Assert.assertEquals(version, shuffle.loadVersion());
// verify we are still authorized to shuffle to the old application
rc = getShuffleResponseCode(shuffle, jt);
Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
// emulate shuffle handler restart with incompatible version
Version version21 = Version.newInstance(2, 1);
shuffle.storeVersion(version21);
Assert.assertEquals(version21, shuffle.loadVersion());
shuffle.close();
shuffle = new ShuffleHandler();
shuffle.setRecoveryPath(new Path(tmpDir.toString()));
shuffle.init(conf);
try {
shuffle.start();
Assert.fail("Incompatible version, should expect fail here.");
} catch (ServiceStateException e) {
Assert.assertTrue("Exception message mismatch", e.getMessage().contains("Incompatible version for state DB schema:"));
}
} finally {
if (shuffle != null) {
shuffle.close();
}
FileUtil.fullyDelete(tmpDir);
}
}
Aggregations