Search in sources :

Example 1 with ServiceStateException

use of org.apache.hadoop.service.ServiceStateException in project hadoop by apache.

the class TestShuffleHandler method testRecoveryFromOtherVersions.

@Test
public void testRecoveryFromOtherVersions() throws IOException {
    final String user = "someuser";
    final ApplicationId appId = ApplicationId.newInstance(12345, 1);
    final File tmpDir = new File(System.getProperty("test.build.data", System.getProperty("java.io.tmpdir")), TestShuffleHandler.class.getName());
    Configuration conf = new Configuration();
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
    conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
    ShuffleHandler shuffle = new ShuffleHandler();
    // emulate aux services startup with recovery enabled
    shuffle.setRecoveryPath(new Path(tmpDir.toString()));
    tmpDir.mkdirs();
    try {
        shuffle.init(conf);
        shuffle.start();
        // setup a shuffle token for an application
        DataOutputBuffer outputBuffer = new DataOutputBuffer();
        outputBuffer.reset();
        Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text(user), new Text("shuffleService"));
        jt.write(outputBuffer);
        shuffle.initializeApplication(new ApplicationInitializationContext(user, appId, ByteBuffer.wrap(outputBuffer.getData(), 0, outputBuffer.getLength())));
        // verify we are authorized to shuffle
        int rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        // emulate shuffle handler restart
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        shuffle.start();
        // verify we are still authorized to shuffle to the old application
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        Version version = Version.newInstance(1, 0);
        Assert.assertEquals(version, shuffle.getCurrentVersion());
        // emulate shuffle handler restart with compatible version
        Version version11 = Version.newInstance(1, 1);
        // update version info before close shuffle
        shuffle.storeVersion(version11);
        Assert.assertEquals(version11, shuffle.loadVersion());
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        shuffle.start();
        // shuffle version will be override by CURRENT_VERSION_INFO after restart
        // successfully.
        Assert.assertEquals(version, shuffle.loadVersion());
        // verify we are still authorized to shuffle to the old application
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        // emulate shuffle handler restart with incompatible version
        Version version21 = Version.newInstance(2, 1);
        shuffle.storeVersion(version21);
        Assert.assertEquals(version21, shuffle.loadVersion());
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        try {
            shuffle.start();
            Assert.fail("Incompatible version, should expect fail here.");
        } catch (ServiceStateException e) {
            Assert.assertTrue("Exception message mismatch", e.getMessage().contains("Incompatible version for state DB schema:"));
        }
    } finally {
        if (shuffle != null) {
            shuffle.close();
        }
        FileUtil.fullyDelete(tmpDir);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) JobTokenIdentifier(org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier) Token(org.apache.hadoop.security.token.Token) Text(org.apache.hadoop.io.Text) Version(org.apache.hadoop.yarn.server.records.Version) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) ServiceStateException(org.apache.hadoop.service.ServiceStateException) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) ApplicationInitializationContext(org.apache.hadoop.yarn.server.api.ApplicationInitializationContext) Test(org.junit.Test)

Example 2 with ServiceStateException

use of org.apache.hadoop.service.ServiceStateException in project hadoop by apache.

the class RegistrySecurity method initSecurity.

/**
   * Init security.
   *
   * After this operation, the {@link #systemACLs} list is valid.
   * @throws IOException
   */
private void initSecurity() throws IOException {
    secureRegistry = getConfig().getBoolean(KEY_REGISTRY_SECURE, DEFAULT_REGISTRY_SECURE);
    systemACLs.clear();
    if (secureRegistry) {
        addSystemACL(ALL_READ_ACCESS);
        // determine the kerberos realm from JVM and settings
        kerberosRealm = getConfig().get(KEY_REGISTRY_KERBEROS_REALM, getDefaultRealmInJVM());
        // System Accounts
        String system = getOrFail(KEY_REGISTRY_SYSTEM_ACCOUNTS, DEFAULT_REGISTRY_SYSTEM_ACCOUNTS);
        usesRealm = system.contains("@");
        systemACLs.addAll(buildACLs(system, kerberosRealm, ZooDefs.Perms.ALL));
        // user accounts (may be empty, but for digest one user AC must
        // be built up
        String user = getConfig().get(KEY_REGISTRY_USER_ACCOUNTS, DEFAULT_REGISTRY_USER_ACCOUNTS);
        List<ACL> userACLs = buildACLs(user, kerberosRealm, ZooDefs.Perms.ALL);
        // add self if the current user can be determined
        ACL self;
        if (UserGroupInformation.isSecurityEnabled()) {
            self = createSaslACLFromCurrentUser(ZooDefs.Perms.ALL);
            if (self != null) {
                userACLs.add(self);
            }
        }
        // here check for UGI having secure on or digest + ID
        switch(access) {
            case sasl:
                // secure + SASL => has to be authenticated
                if (!UserGroupInformation.isSecurityEnabled()) {
                    throw new IOException("Kerberos required for secure registry access");
                }
                UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
                jaasClientContext = getOrFail(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT);
                jaasClientIdentity = currentUser.getShortUserName();
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Auth is SASL user=\"{}\" JAAS context=\"{}\"", jaasClientIdentity, jaasClientContext);
                }
                break;
            case digest:
                String id = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, "");
                String pass = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, "");
                if (userACLs.isEmpty()) {
                    //
                    throw new ServiceStateException(E_NO_USER_DETERMINED_FOR_ACLS);
                }
                digest(id, pass);
                ACL acl = new ACL(ZooDefs.Perms.ALL, toDigestId(id, pass));
                userACLs.add(acl);
                digestAuthUser = id;
                digestAuthPassword = pass;
                String authPair = id + ":" + pass;
                digestAuthData = authPair.getBytes("UTF-8");
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Auth is Digest ACL: {}", aclToString(acl));
                }
                break;
            case anon:
                // nothing is needed; account is read only.
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Auth is anonymous");
                }
                userACLs = new ArrayList<ACL>(0);
                break;
        }
        systemACLs.addAll(userACLs);
    } else {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Registry has no security");
        }
        // wide open cluster, adding system acls
        systemACLs.addAll(WorldReadWriteACL);
    }
}
Also used : ACL(org.apache.zookeeper.data.ACL) IOException(java.io.IOException) ServiceStateException(org.apache.hadoop.service.ServiceStateException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 3 with ServiceStateException

use of org.apache.hadoop.service.ServiceStateException in project hadoop by apache.

the class TestJvmMetrics method testStopBeforeInit.

@Test
public void testStopBeforeInit() throws Throwable {
    pauseMonitor = new JvmPauseMonitor();
    try {
        pauseMonitor.stop();
        pauseMonitor.init(new Configuration());
        Assert.fail("Expected an exception, got " + pauseMonitor);
    } catch (ServiceStateException e) {
        GenericTestUtils.assertExceptionContains("cannot enter state", e);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ServiceStateException(org.apache.hadoop.service.ServiceStateException) JvmPauseMonitor(org.apache.hadoop.util.JvmPauseMonitor) Test(org.junit.Test)

Example 4 with ServiceStateException

use of org.apache.hadoop.service.ServiceStateException in project tez by apache.

the class TestShuffleHandler method testRecoveryFromOtherVersions.

@Test
public void testRecoveryFromOtherVersions() throws IOException {
    final String user = "someuser";
    final ApplicationId appId = ApplicationId.newInstance(12345, 1);
    final File tmpDir = new File(System.getProperty("test.build.data", System.getProperty("java.io.tmpdir")), TestShuffleHandler.class.getName());
    Configuration conf = new Configuration();
    conf.setInt(ShuffleHandler.SHUFFLE_PORT_CONFIG_KEY, 0);
    conf.setInt(ShuffleHandler.MAX_SHUFFLE_CONNECTIONS, 3);
    ShuffleHandler shuffle = new ShuffleHandler();
    // emulate aux services startup with recovery enabled
    shuffle.setRecoveryPath(new Path(tmpDir.toString()));
    tmpDir.mkdirs();
    try {
        shuffle.init(conf);
        shuffle.start();
        // setup a shuffle token for an application
        DataOutputBuffer outputBuffer = new DataOutputBuffer();
        outputBuffer.reset();
        Token<JobTokenIdentifier> jt = new Token<JobTokenIdentifier>("identifier".getBytes(), "password".getBytes(), new Text(user), new Text("shuffleService"));
        jt.write(outputBuffer);
        shuffle.initializeApplication(new ApplicationInitializationContext(user, appId, ByteBuffer.wrap(outputBuffer.getData(), 0, outputBuffer.getLength())));
        // verify we are authorized to shuffle
        int rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        // emulate shuffle handler restart
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        shuffle.start();
        // verify we are still authorized to shuffle to the old application
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        Version version = Version.newInstance(1, 0);
        Assert.assertEquals(version, shuffle.getCurrentVersion());
        // emulate shuffle handler restart with compatible version
        Version version11 = Version.newInstance(1, 1);
        // update version info before close shuffle
        shuffle.storeVersion(version11);
        Assert.assertEquals(version11, shuffle.loadVersion());
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        shuffle.start();
        // shuffle version will be override by CURRENT_VERSION_INFO after restart
        // successfully.
        Assert.assertEquals(version, shuffle.loadVersion());
        // verify we are still authorized to shuffle to the old application
        rc = getShuffleResponseCode(shuffle, jt);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, rc);
        // emulate shuffle handler restart with incompatible version
        Version version21 = Version.newInstance(2, 1);
        shuffle.storeVersion(version21);
        Assert.assertEquals(version21, shuffle.loadVersion());
        shuffle.close();
        shuffle = new ShuffleHandler();
        shuffle.setRecoveryPath(new Path(tmpDir.toString()));
        shuffle.init(conf);
        try {
            shuffle.start();
            Assert.fail("Incompatible version, should expect fail here.");
        } catch (ServiceStateException e) {
            Assert.assertTrue("Exception message mismatch", e.getMessage().contains("Incompatible version for state DB schema:"));
        }
    } finally {
        if (shuffle != null) {
            shuffle.close();
        }
        FileUtil.fullyDelete(tmpDir);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) JobTokenIdentifier(org.apache.tez.common.security.JobTokenIdentifier) Token(org.apache.hadoop.security.token.Token) Text(org.apache.hadoop.io.Text) Version(org.apache.hadoop.yarn.server.records.Version) DataOutputBuffer(org.apache.hadoop.io.DataOutputBuffer) ServiceStateException(org.apache.hadoop.service.ServiceStateException) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) File(java.io.File) ApplicationInitializationContext(org.apache.hadoop.yarn.server.api.ApplicationInitializationContext) Test(org.junit.Test)

Example 5 with ServiceStateException

use of org.apache.hadoop.service.ServiceStateException in project hadoop by apache.

the class TestLeveldbTimelineStateStore method testCheckVersion.

@Test
public void testCheckVersion() throws IOException {
    LeveldbTimelineStateStore store = initAndStartTimelineServiceStateStoreService();
    // default version
    Version defaultVersion = store.getCurrentVersion();
    Assert.assertEquals(defaultVersion, store.loadVersion());
    // compatible version
    Version compatibleVersion = Version.newInstance(defaultVersion.getMajorVersion(), defaultVersion.getMinorVersion() + 2);
    store.storeVersion(compatibleVersion);
    Assert.assertEquals(compatibleVersion, store.loadVersion());
    store.stop();
    // overwrite the compatible version
    store = initAndStartTimelineServiceStateStoreService();
    Assert.assertEquals(defaultVersion, store.loadVersion());
    // incompatible version
    Version incompatibleVersion = Version.newInstance(defaultVersion.getMajorVersion() + 1, defaultVersion.getMinorVersion());
    store.storeVersion(incompatibleVersion);
    store.stop();
    try {
        initAndStartTimelineServiceStateStoreService();
        Assert.fail("Incompatible version, should expect fail here.");
    } catch (ServiceStateException e) {
        Assert.assertTrue("Exception message mismatch", e.getMessage().contains("Incompatible version for timeline state store"));
    }
}
Also used : Version(org.apache.hadoop.yarn.server.records.Version) ServiceStateException(org.apache.hadoop.service.ServiceStateException) Test(org.junit.Test)

Aggregations

ServiceStateException (org.apache.hadoop.service.ServiceStateException)11 Test (org.junit.Test)9 Version (org.apache.hadoop.yarn.server.records.Version)7 Configuration (org.apache.hadoop.conf.Configuration)5 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)3 File (java.io.File)2 IOException (java.io.IOException)2 Path (org.apache.hadoop.fs.Path)2 DataOutputBuffer (org.apache.hadoop.io.DataOutputBuffer)2 Text (org.apache.hadoop.io.Text)2 Token (org.apache.hadoop.security.token.Token)2 JvmPauseMonitor (org.apache.hadoop.util.JvmPauseMonitor)2 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)2 ApplicationInitializationContext (org.apache.hadoop.yarn.server.api.ApplicationInitializationContext)2 JobTokenIdentifier (org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier)1 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)1 JobTokenIdentifier (org.apache.tez.common.security.JobTokenIdentifier)1 ACL (org.apache.zookeeper.data.ACL)1