Search in sources :

Example 1 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.

the class TestBackupNode method startBackupNodeWithIncorrectAuthentication.

/**
   *  Regression test for HDFS-9249.
   *  This test configures the primary name node with SIMPLE authentication,
   *  and configures the backup node with Kerberose authentication with
   *  invalid keytab settings.
   *
   *  This configuration causes the backup node to throw a NPE trying to abort
   *  the edit log.
   *  */
@Test
public void startBackupNodeWithIncorrectAuthentication() throws IOException {
    Configuration c = new HdfsConfiguration();
    StartupOption startupOpt = StartupOption.CHECKPOINT;
    String dirs = getBackupNodeDir(startupOpt, 1);
    c.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:" + ServerSocketUtil.getPort(0, 100));
    c.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
    c.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
    c.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, // disable block scanner
    -1);
    c.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
    c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
    c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
    c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY, "127.0.0.1:0");
    c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, "127.0.0.1:0");
    NameNode nn;
    try {
        Configuration nnconf = new HdfsConfiguration(c);
        DFSTestUtil.formatNameNode(nnconf);
        nn = NameNode.createNameNode(new String[] {}, nnconf);
    } catch (IOException e) {
        LOG.info("IOException is thrown creating name node");
        throw e;
    }
    c.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    c.set(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY, "");
    BackupNode bn = null;
    try {
        bn = (BackupNode) NameNode.createNameNode(new String[] { startupOpt.getName() }, c);
        assertTrue("Namesystem in BackupNode should be null", bn.getNamesystem() == null);
        fail("Incorrect authentication setting should throw IOException");
    } catch (IOException e) {
        LOG.info("IOException thrown.", e);
        assertTrue(e.getMessage().contains("Running in secure mode"));
    } finally {
        if (nn != null) {
            nn.stop();
        }
        if (bn != null) {
            bn.stop();
        }
        SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE, c);
        // reset security authentication
        UserGroupInformation.setConfiguration(c);
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 2 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.

the class TestNameNodeOptionParsing method testUpgrade.

@Test(timeout = 10000)
public void testUpgrade() {
    StartupOption opt = null;
    // UPGRADE is set, but nothing else
    opt = NameNode.parseArguments(new String[] { "-upgrade" });
    assertEquals(opt, StartupOption.UPGRADE);
    assertNull(opt.getClusterId());
    assertTrue(FSImageFormat.renameReservedMap.isEmpty());
    // cluster ID is set
    opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid", "mycid" });
    assertEquals(StartupOption.UPGRADE, opt);
    assertEquals("mycid", opt.getClusterId());
    assertTrue(FSImageFormat.renameReservedMap.isEmpty());
    // Everything is set
    opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid", "mycid", "-renameReserved", ".snapshot=.my-snapshot,.reserved=.my-reserved" });
    assertEquals(StartupOption.UPGRADE, opt);
    assertEquals("mycid", opt.getClusterId());
    assertEquals(".my-snapshot", FSImageFormat.renameReservedMap.get(".snapshot"));
    assertEquals(".my-reserved", FSImageFormat.renameReservedMap.get(".reserved"));
    // Reset the map
    FSImageFormat.renameReservedMap.clear();
    // Everything is set, but in a different order
    opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved", ".reserved=.my-reserved,.snapshot=.my-snapshot", "-clusterid", "mycid" });
    assertEquals(StartupOption.UPGRADE, opt);
    assertEquals("mycid", opt.getClusterId());
    assertEquals(".my-snapshot", FSImageFormat.renameReservedMap.get(".snapshot"));
    assertEquals(".my-reserved", FSImageFormat.renameReservedMap.get(".reserved"));
    // Try the default renameReserved
    opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved" });
    assertEquals(StartupOption.UPGRADE, opt);
    assertEquals(".snapshot." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED", FSImageFormat.renameReservedMap.get(".snapshot"));
    assertEquals(".reserved." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + ".UPGRADE_RENAMED", FSImageFormat.renameReservedMap.get(".reserved"));
    // Try some error conditions
    try {
        opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved", ".reserved=.my-reserved,.not-reserved=.my-not-reserved" });
    } catch (IllegalArgumentException e) {
        assertExceptionContains("Unknown reserved path", e);
    }
    try {
        opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved", ".reserved=.my-reserved,.snapshot=.snapshot" });
    } catch (IllegalArgumentException e) {
        assertExceptionContains("Invalid rename path", e);
    }
    try {
        opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved", ".snapshot=.reserved" });
    } catch (IllegalArgumentException e) {
        assertExceptionContains("Invalid rename path", e);
    }
    opt = NameNode.parseArguments(new String[] { "-upgrade", "-cid" });
    assertNull(opt);
}
Also used : RollingUpgradeStartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption) StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) Test(org.junit.Test)

Example 3 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.

the class DataNode method initStorage.

/**
   * Initializes the {@link #data}. The initialization is done only once, when
   * handshake with the the first namenode is completed.
   */
private void initStorage(final NamespaceInfo nsInfo) throws IOException {
    final FsDatasetSpi.Factory<? extends FsDatasetSpi<?>> factory = FsDatasetSpi.Factory.getFactory(getConf());
    if (!factory.isSimulated()) {
        final StartupOption startOpt = getStartupOption(getConf());
        if (startOpt == null) {
            throw new IOException("Startup option not set.");
        }
        final String bpid = nsInfo.getBlockPoolID();
        //read storage info, lock data dirs and transition fs state if necessary
        synchronized (this) {
            storage.recoverTransitionRead(this, nsInfo, dataDirs, startOpt);
        }
        final StorageInfo bpStorage = storage.getBPStorage(bpid);
        LOG.info("Setting up storage: nsid=" + bpStorage.getNamespaceID() + ";bpid=" + bpid + ";lv=" + storage.getLayoutVersion() + ";nsInfo=" + nsInfo + ";dnuuid=" + storage.getDatanodeUuid());
    }
    // If this is a newly formatted DataNode then assign a new DatanodeUuid.
    checkDatanodeUuid();
    synchronized (this) {
        if (data == null) {
            data = factory.newInstance(this, storage, getConf());
        }
    }
}
Also used : FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) StorageInfo(org.apache.hadoop.hdfs.server.common.StorageInfo) StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) IOException(java.io.IOException)

Example 4 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.

the class NameNode method parseArguments.

@VisibleForTesting
static StartupOption parseArguments(String[] args) {
    int argsLen = (args == null) ? 0 : args.length;
    StartupOption startOpt = StartupOption.REGULAR;
    for (int i = 0; i < argsLen; i++) {
        String cmd = args[i];
        if (StartupOption.FORMAT.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.FORMAT;
            for (i = i + 1; i < argsLen; i++) {
                if (args[i].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
                    i++;
                    if (i >= argsLen) {
                        // if no cluster id specified, return null
                        LOG.error("Must specify a valid cluster ID after the " + StartupOption.CLUSTERID.getName() + " flag");
                        return null;
                    }
                    String clusterId = args[i];
                    // Make sure an id is specified and not another flag
                    if (clusterId.isEmpty() || clusterId.equalsIgnoreCase(StartupOption.FORCE.getName()) || clusterId.equalsIgnoreCase(StartupOption.NONINTERACTIVE.getName())) {
                        LOG.error("Must specify a valid cluster ID after the " + StartupOption.CLUSTERID.getName() + " flag");
                        return null;
                    }
                    startOpt.setClusterId(clusterId);
                }
                if (args[i].equalsIgnoreCase(StartupOption.FORCE.getName())) {
                    startOpt.setForceFormat(true);
                }
                if (args[i].equalsIgnoreCase(StartupOption.NONINTERACTIVE.getName())) {
                    startOpt.setInteractiveFormat(false);
                }
            }
        } else if (StartupOption.GENCLUSTERID.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.GENCLUSTERID;
        } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.REGULAR;
        } else if (StartupOption.BACKUP.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.BACKUP;
        } else if (StartupOption.CHECKPOINT.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.CHECKPOINT;
        } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) || StartupOption.UPGRADEONLY.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd) ? StartupOption.UPGRADE : StartupOption.UPGRADEONLY;
            /* Can be followed by CLUSTERID with a required parameter or
         * RENAMERESERVED with an optional parameter
         */
            while (i + 1 < argsLen) {
                String flag = args[i + 1];
                if (flag.equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
                    if (i + 2 < argsLen) {
                        i += 2;
                        startOpt.setClusterId(args[i]);
                    } else {
                        LOG.error("Must specify a valid cluster ID after the " + StartupOption.CLUSTERID.getName() + " flag");
                        return null;
                    }
                } else if (flag.equalsIgnoreCase(StartupOption.RENAMERESERVED.getName())) {
                    if (i + 2 < argsLen) {
                        FSImageFormat.setRenameReservedPairs(args[i + 2]);
                        i += 2;
                    } else {
                        FSImageFormat.useDefaultRenameReservedPairs();
                        i += 1;
                    }
                } else {
                    LOG.error("Unknown upgrade flag " + flag);
                    return null;
                }
            }
        } else if (StartupOption.ROLLINGUPGRADE.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.ROLLINGUPGRADE;
            ++i;
            if (i >= argsLen) {
                LOG.error("Must specify a rolling upgrade startup option " + RollingUpgradeStartupOption.getAllOptionString());
                return null;
            }
            startOpt.setRollingUpgradeStartupOption(args[i]);
        } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.ROLLBACK;
        } else if (StartupOption.IMPORT.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.IMPORT;
        } else if (StartupOption.BOOTSTRAPSTANDBY.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.BOOTSTRAPSTANDBY;
            return startOpt;
        } else if (StartupOption.INITIALIZESHAREDEDITS.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.INITIALIZESHAREDEDITS;
            for (i = i + 1; i < argsLen; i++) {
                if (StartupOption.NONINTERACTIVE.getName().equals(args[i])) {
                    startOpt.setInteractiveFormat(false);
                } else if (StartupOption.FORCE.getName().equals(args[i])) {
                    startOpt.setForceFormat(true);
                } else {
                    LOG.error("Invalid argument: " + args[i]);
                    return null;
                }
            }
            return startOpt;
        } else if (StartupOption.RECOVER.getName().equalsIgnoreCase(cmd)) {
            if (startOpt != StartupOption.REGULAR) {
                throw new RuntimeException("Can't combine -recover with " + "other startup options.");
            }
            startOpt = StartupOption.RECOVER;
            while (++i < argsLen) {
                if (args[i].equalsIgnoreCase(StartupOption.FORCE.getName())) {
                    startOpt.setForce(MetaRecoveryContext.FORCE_FIRST_CHOICE);
                } else {
                    throw new RuntimeException("Error parsing recovery options: " + "can't understand option \"" + args[i] + "\"");
                }
            }
        } else if (StartupOption.METADATAVERSION.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.METADATAVERSION;
        } else {
            return null;
        }
    }
    return startOpt;
}
Also used : StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) RollingUpgradeStartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 5 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.

the class NameNode method createNameNode.

public static NameNode createNameNode(String[] argv, Configuration conf) throws IOException {
    LOG.info("createNameNode " + Arrays.asList(argv));
    if (conf == null)
        conf = new HdfsConfiguration();
    // Parse out some generic args into Configuration.
    GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
    argv = hParser.getRemainingArgs();
    // Parse the rest, NN specific args.
    StartupOption startOpt = parseArguments(argv);
    if (startOpt == null) {
        printUsage(System.err);
        return null;
    }
    setStartupOption(conf, startOpt);
    boolean aborted = false;
    switch(startOpt) {
        case FORMAT:
            aborted = format(conf, startOpt.getForceFormat(), startOpt.getInteractiveFormat());
            terminate(aborted ? 1 : 0);
            // avoid javac warning
            return null;
        case GENCLUSTERID:
            System.err.println("Generating new cluster id:");
            System.out.println(NNStorage.newClusterID());
            terminate(0);
            return null;
        case ROLLBACK:
            aborted = doRollback(conf, true);
            terminate(aborted ? 1 : 0);
            // avoid warning
            return null;
        case BOOTSTRAPSTANDBY:
            String[] toolArgs = Arrays.copyOfRange(argv, 1, argv.length);
            int rc = BootstrapStandby.run(toolArgs, conf);
            terminate(rc);
            // avoid warning
            return null;
        case INITIALIZESHAREDEDITS:
            aborted = initializeSharedEdits(conf, startOpt.getForceFormat(), startOpt.getInteractiveFormat());
            terminate(aborted ? 1 : 0);
            // avoid warning
            return null;
        case BACKUP:
        case CHECKPOINT:
            NamenodeRole role = startOpt.toNodeRole();
            DefaultMetricsSystem.initialize(role.toString().replace(" ", ""));
            return new BackupNode(conf, role);
        case RECOVER:
            NameNode.doRecovery(startOpt, conf);
            return null;
        case METADATAVERSION:
            printMetadataVersion(conf);
            terminate(0);
            // avoid javac warning
            return null;
        case UPGRADEONLY:
            DefaultMetricsSystem.initialize("NameNode");
            new NameNode(conf);
            terminate(0);
            return null;
        default:
            DefaultMetricsSystem.initialize("NameNode");
            return new NameNode(conf);
    }
}
Also used : StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) RollingUpgradeStartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) GenericOptionsParser(org.apache.hadoop.util.GenericOptionsParser) NamenodeRole(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole)

Aggregations

StartupOption (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption)13 RollingUpgradeStartupOption (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption)6 IOException (java.io.IOException)4 VisibleForTesting (com.google.common.annotations.VisibleForTesting)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 Configuration (org.apache.hadoop.conf.Configuration)2 Test (org.junit.Test)2 NamenodeRole (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole)1 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)1 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)1 NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)1 NameNodeMetrics (org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics)1 GenericOptionsParser (org.apache.hadoop.util.GenericOptionsParser)1 GenericOptionsParser (org.smartdata.server.utils.GenericOptionsParser)1