Search in sources :

Example 1 with ApplicationTerminationContext

use of org.apache.hadoop.yarn.server.api.ApplicationTerminationContext in project hadoop by apache.

the class TestShuffleHandler method testRecovery.

@Test
public void testRecovery() throws IOException {
    final String user = "someuser";
    final ApplicationId appId = ApplicationId.newInstance(12345, 1);
    final JobID jobId = JobID.downgrade(TypeConverter.fromYarn(appId));
    final File tmpDir = new File(System.getProperty("test.build.data", System.getProperty("java.io.tmpdir")), TestShuffleHandler.class.getName());
    Configuration conf = new Configuration();
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
    conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
    ShuffleHandler shuffle = new ShuffleHandler();
    // emulate aux services startup with recovery enabled
    shuffle.setRecoveryPath(new Path(tmpDir.toString()));
    tmpDir.mkdirs();
    try {
        shuffle.init(conf);
        shuffle.start();
        // setup a shuffle token for an application
        DataOutputBuffer outputBuffer = new DataOutputBuffer();
        outputBuffer.reset();
        Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text(user), new Text("shuffleService"));
        jt.write(outputBuffer);
        shuffle.initializeApplication(new ApplicationInitializationContext(user, appId, ByteBuffer.wrap(outputBuffer.getData(), 0, outputBuffer.getLength())));
        // verify we are authorized to shuffle
        int rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        // emulate shuffle handler restart
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        shuffle.start();
        // verify we are still authorized to shuffle to the old application
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        // shutdown app and verify access is lost
        shuffle.stopApplication(new ApplicationTerminationContext(appId));
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, rc);
        // emulate shuffle handler restart
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        shuffle.start();
        // verify we still don't have access
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, rc);
    } finally {
        if (shuffle != null) {
            shuffle.close();
        }
        FileUtil.fullyDelete(tmpDir);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) JobTokenIdentifier(org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier) Token(org.apache.hadoop.security.token.Token) Text(org.apache.hadoop.io.Text) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) ApplicationInitializationContext(org.apache.hadoop.yarn.server.api.ApplicationInitializationContext) ApplicationTerminationContext(org.apache.hadoop.yarn.server.api.ApplicationTerminationContext) Test(org.junit.Test)

Example 2 with ApplicationTerminationContext

use of org.apache.hadoop.yarn.server.api.ApplicationTerminationContext in project hadoop by apache.

the class AuxServices method handle.

@Override
public void handle(AuxServicesEvent event) {
    LOG.info("Got event " + event.getType() + " for appId " + event.getApplicationID());
    switch(event.getType()) {
        case APPLICATION_INIT:
            LOG.info("Got APPLICATION_INIT for service " + event.getServiceID());
            AuxiliaryService service = null;
            try {
                service = serviceMap.get(event.getServiceID());
                service.initializeApplication(new ApplicationInitializationContext(event.getUser(), event.getApplicationID(), event.getServiceData()));
            } catch (Throwable th) {
                logWarningWhenAuxServiceThrowExceptions(service, AuxServicesEventType.APPLICATION_INIT, th);
            }
            break;
        case APPLICATION_STOP:
            for (AuxiliaryService serv : serviceMap.values()) {
                try {
                    serv.stopApplication(new ApplicationTerminationContext(event.getApplicationID()));
                } catch (Throwable th) {
                    logWarningWhenAuxServiceThrowExceptions(serv, AuxServicesEventType.APPLICATION_STOP, th);
                }
            }
            break;
        case CONTAINER_INIT:
            for (AuxiliaryService serv : serviceMap.values()) {
                try {
                    serv.initializeContainer(new ContainerInitializationContext(event.getUser(), event.getContainer().getContainerId(), event.getContainer().getResource(), event.getContainer().getContainerTokenIdentifier().getContainerType()));
                } catch (Throwable th) {
                    logWarningWhenAuxServiceThrowExceptions(serv, AuxServicesEventType.CONTAINER_INIT, th);
                }
            }
            break;
        case CONTAINER_STOP:
            for (AuxiliaryService serv : serviceMap.values()) {
                try {
                    serv.stopContainer(new ContainerTerminationContext(event.getUser(), event.getContainer().getContainerId(), event.getContainer().getResource(), event.getContainer().getContainerTokenIdentifier().getContainerType()));
                } catch (Throwable th) {
                    logWarningWhenAuxServiceThrowExceptions(serv, AuxServicesEventType.CONTAINER_STOP, th);
                }
            }
            break;
        default:
            throw new RuntimeException("Unknown type: " + event.getType());
    }
}
Also used : AuxiliaryService(org.apache.hadoop.yarn.server.api.AuxiliaryService) ContainerTerminationContext(org.apache.hadoop.yarn.server.api.ContainerTerminationContext) ContainerInitializationContext(org.apache.hadoop.yarn.server.api.ContainerInitializationContext) ApplicationInitializationContext(org.apache.hadoop.yarn.server.api.ApplicationInitializationContext) ApplicationTerminationContext(org.apache.hadoop.yarn.server.api.ApplicationTerminationContext)

Example 3 with ApplicationTerminationContext

use of org.apache.hadoop.yarn.server.api.ApplicationTerminationContext in project tez by apache.

the class TestShuffleHandler method testRecovery.

@Test
public void testRecovery() throws IOException {
    final String user = "someuser";
    final ApplicationId appId = ApplicationId.newInstance(12345, 1);
    final JobID jobId = JobID.downgrade(TypeConverter.fromYarn(appId));
    final File tmpDir = new File(System.getProperty("test.build.data", System.getProperty("java.io.tmpdir")), TestShuffleHandler.class.getName());
    Configuration conf = new Configuration();
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
    conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
    ShuffleHandler shuffle = new ShuffleHandler();
    // emulate aux services startup with recovery enabled
    shuffle.setRecoveryPath(new Path(tmpDir.toString()));
    tmpDir.mkdirs();
    try {
        shuffle.init(conf);
        shuffle.start();
        // setup a shuffle token for an application
        DataOutputBuffer outputBuffer = new DataOutputBuffer();
        outputBuffer.reset();
        Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text(user), new Text("shuffleService"));
        jt.write(outputBuffer);
        shuffle.initializeApplication(new ApplicationInitializationContext(user, appId, ByteBuffer.wrap(outputBuffer.getData(), 0, outputBuffer.getLength())));
        // verify we are authorized to shuffle
        int rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        // emulate shuffle handler restart
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        shuffle.start();
        // verify we are still authorized to shuffle to the old application
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        // shutdown app and verify access is lost
        shuffle.stopApplication(new ApplicationTerminationContext(appId));
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, rc);
        // emulate shuffle handler restart
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        shuffle.start();
        // verify we still don't have access
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, rc);
    } finally {
        if (shuffle != null) {
            shuffle.close();
        }
        FileUtil.fullyDelete(tmpDir);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) JobTokenIdentifier(org.apache.tez.common.security.JobTokenIdentifier) Token(org.apache.hadoop.security.token.Token) Text(org.apache.hadoop.io.Text) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) JobID(org.apache.hadoop.mapred.JobID) ApplicationInitializationContext(org.apache.hadoop.yarn.server.api.ApplicationInitializationContext) ApplicationTerminationContext(org.apache.hadoop.yarn.server.api.ApplicationTerminationContext) Test(org.junit.Test)

Aggregations

ApplicationInitializationContext (org.apache.hadoop.yarn.server.api.ApplicationInitializationContext)3 ApplicationTerminationContext (org.apache.hadoop.yarn.server.api.ApplicationTerminationContext)3 File (java.io.File)2 Configuration (org.apache.hadoop.conf.Configuration)2 Path (org.apache.hadoop.fs.Path)2 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)2 Text (org.apache.hadoop.io.Text)2 Token (org.apache.hadoop.security.token.Token)2 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)2 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)2 Test (org.junit.Test)2 JobID (org.apache.hadoop.mapred.JobID)1 JobTokenIdentifier (org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier)1 AuxiliaryService (org.apache.hadoop.yarn.server.api.AuxiliaryService)1 ContainerInitializationContext (org.apache.hadoop.yarn.server.api.ContainerInitializationContext)1 ContainerTerminationContext (org.apache.hadoop.yarn.server.api.ContainerTerminationContext)1 JobTokenIdentifier (org.apache.tez.common.security.JobTokenIdentifier)1