Search in sources :

Example 6 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.

the class DataNode method parseArguments.

/**
   * Parse and verify command line arguments and set configuration parameters.
   *
   * @return false if passed argements are incorrect
   */
@VisibleForTesting
static boolean parseArguments(String[] args, Configuration conf) {
    StartupOption startOpt = StartupOption.REGULAR;
    int i = 0;
    if (args != null && args.length != 0) {
        String cmd = args[i++];
        if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
            LOG.error("-r, --rack arguments are not supported anymore. RackID " + "resolution is handled by the NameNode.");
            return false;
        } else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.ROLLBACK;
        } else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
            startOpt = StartupOption.REGULAR;
        } else {
            return false;
        }
    }
    setStartupOption(conf, startOpt);
    // Fail if more than one cmd specified!
    return (args == null || i == args.length);
}
Also used : StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 7 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.

the class FSNamesystem method loadFromDisk.

/**
   * Instantiates an FSNamesystem loaded from the image and edits
   * directories specified in the passed Configuration.
   *
   * @param conf the Configuration which specifies the storage directories
   *             from which to load
   * @return an FSNamesystem which contains the loaded namespace
   * @throws IOException if loading fails
   */
static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
    checkConfiguration(conf);
    FSImage fsImage = new FSImage(conf, FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf));
    FSNamesystem namesystem = new FSNamesystem(conf, fsImage, false);
    StartupOption startOpt = NameNode.getStartupOption(conf);
    if (startOpt == StartupOption.RECOVER) {
        namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    }
    long loadStart = monotonicNow();
    try {
        namesystem.loadFSImage(startOpt);
    } catch (IOException ioe) {
        LOG.warn("Encountered exception loading fsimage", ioe);
        fsImage.close();
        throw ioe;
    }
    long timeTakenToLoadFSImage = monotonicNow() - loadStart;
    LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
    NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
    if (nnMetrics != null) {
        nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
    }
    namesystem.getFSDirectory().createReservedStatuses(namesystem.getCTime());
    return namesystem;
}
Also used : RollingUpgradeStartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption) StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) IOException(java.io.IOException) NameNodeMetrics(org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics)

Example 8 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.

the class FSNamesystem method getStorageDirs.

private static Collection<URI> getStorageDirs(Configuration conf, String propertyName) {
    Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
    StartupOption startOpt = NameNode.getStartupOption(conf);
    if (startOpt == StartupOption.IMPORT) {
        // In case of IMPORT this will get rid of default directories 
        // but will retain directories specified in hdfs-site.xml
        // When importing image from a checkpoint, the name-node can
        // start with empty set of storage directories.
        Configuration cE = new HdfsConfiguration(false);
        cE.addResource("core-default.xml");
        cE.addResource("core-site.xml");
        cE.addResource("hdfs-default.xml");
        Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
        dirNames.removeAll(dirNames2);
        if (dirNames.isEmpty())
            LOG.warn("!!! WARNING !!!" + "\n\tThe NameNode currently runs without persistent storage." + "\n\tAny changes to the file system meta-data may be lost." + "\n\tRecommended actions:" + "\n\t\t- shutdown and restart NameNode with configured \"" + propertyName + "\" in hdfs-site.xml;" + "\n\t\t- use Backup Node as a persistent and up-to-date storage " + "of the file system meta-data.");
    } else if (dirNames.isEmpty()) {
        dirNames = Collections.singletonList(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
    }
    return Util.stringCollectionAsURIs(dirNames);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RollingUpgradeStartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption) StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration)

Example 9 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.

the class MiniDFSCluster method restartNameNode.

/**
   * Restart the namenode at a given index. Optionally wait for the cluster
   * to become active.
   */
public synchronized void restartNameNode(int nnIndex, boolean waitActive, String... args) throws IOException {
    NameNodeInfo info = getNN(nnIndex);
    StartupOption startOpt = info.startOpt;
    shutdownNameNode(nnIndex);
    if (args.length != 0) {
        startOpt = null;
    } else {
        args = createArgs(startOpt);
    }
    NameNode nn = NameNode.createNameNode(args, info.conf);
    info.nameNode = nn;
    info.setStartOpt(startOpt);
    if (waitActive) {
        waitClusterUp();
        LOG.info("Restarted the namenode");
        waitActive();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption)

Example 10 with StartupOption

use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project SSM by Intel-bigdata.

the class SmartServer method createSSM.

/**
   * Create SSM instance and launch the daemon threads.
   *
   * @param args
   * @param conf
   * @return
   */
public static SmartServer createSSM(String[] args, SmartConf conf) throws Exception {
    if (args == null) {
        args = new String[0];
    }
    StringUtils.startupShutdownMessage(SmartServer.class, args, LOG);
    if (args != null) {
        if (parseHelpArgument(args, USAGE, System.out, true)) {
            return null;
        }
        // TODO: handle args
        // Parse out some generic args into Configuration.
        GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
        args = hParser.getRemainingArgs();
        // Parse the rest, NN specific args.
        StartupOption startOpt = parseArguments(args);
    }
    SmartServer ssm = new SmartServer(conf);
    try {
        ssm.runSSMDaemons();
    } catch (IOException e) {
        ssm.shutdown();
        throw e;
    }
    return ssm;
}
Also used : StartupOption(org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption) IOException(java.io.IOException) GenericOptionsParser(org.smartdata.server.utils.GenericOptionsParser)

Aggregations

StartupOption (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption)13 RollingUpgradeStartupOption (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption)6 IOException (java.io.IOException)4 VisibleForTesting (com.google.common.annotations.VisibleForTesting)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 Configuration (org.apache.hadoop.conf.Configuration)2 Test (org.junit.Test)2 NamenodeRole (org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole)1 StorageInfo (org.apache.hadoop.hdfs.server.common.StorageInfo)1 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)1 NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)1 NameNodeMetrics (org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics)1 GenericOptionsParser (org.apache.hadoop.util.GenericOptionsParser)1 GenericOptionsParser (org.smartdata.server.utils.GenericOptionsParser)1