use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.
the class DataNode method parseArguments.
/**
* Parse and verify command line arguments and set configuration parameters.
*
* @return false if passed argements are incorrect
*/
@VisibleForTesting
static boolean parseArguments(String[] args, Configuration conf) {
StartupOption startOpt = StartupOption.REGULAR;
int i = 0;
if (args != null && args.length != 0) {
String cmd = args[i++];
if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
LOG.error("-r, --rack arguments are not supported anymore. RackID " + "resolution is handled by the NameNode.");
return false;
} else if (StartupOption.ROLLBACK.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.ROLLBACK;
} else if (StartupOption.REGULAR.getName().equalsIgnoreCase(cmd)) {
startOpt = StartupOption.REGULAR;
} else {
return false;
}
}
setStartupOption(conf, startOpt);
// Fail if more than one cmd specified!
return (args == null || i == args.length);
}
use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.
the class FSNamesystem method loadFromDisk.
/**
* Instantiates an FSNamesystem loaded from the image and edits
* directories specified in the passed Configuration.
*
* @param conf the Configuration which specifies the storage directories
* from which to load
* @return an FSNamesystem which contains the loaded namespace
* @throws IOException if loading fails
*/
static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
checkConfiguration(conf);
FSImage fsImage = new FSImage(conf, FSNamesystem.getNamespaceDirs(conf), FSNamesystem.getNamespaceEditsDirs(conf));
FSNamesystem namesystem = new FSNamesystem(conf, fsImage, false);
StartupOption startOpt = NameNode.getStartupOption(conf);
if (startOpt == StartupOption.RECOVER) {
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
}
long loadStart = monotonicNow();
try {
namesystem.loadFSImage(startOpt);
} catch (IOException ioe) {
LOG.warn("Encountered exception loading fsimage", ioe);
fsImage.close();
throw ioe;
}
long timeTakenToLoadFSImage = monotonicNow() - loadStart;
LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
NameNodeMetrics nnMetrics = NameNode.getNameNodeMetrics();
if (nnMetrics != null) {
nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
}
namesystem.getFSDirectory().createReservedStatuses(namesystem.getCTime());
return namesystem;
}
use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.
the class FSNamesystem method getStorageDirs.
private static Collection<URI> getStorageDirs(Configuration conf, String propertyName) {
Collection<String> dirNames = conf.getTrimmedStringCollection(propertyName);
StartupOption startOpt = NameNode.getStartupOption(conf);
if (startOpt == StartupOption.IMPORT) {
// In case of IMPORT this will get rid of default directories
// but will retain directories specified in hdfs-site.xml
// When importing image from a checkpoint, the name-node can
// start with empty set of storage directories.
Configuration cE = new HdfsConfiguration(false);
cE.addResource("core-default.xml");
cE.addResource("core-site.xml");
cE.addResource("hdfs-default.xml");
Collection<String> dirNames2 = cE.getTrimmedStringCollection(propertyName);
dirNames.removeAll(dirNames2);
if (dirNames.isEmpty())
LOG.warn("!!! WARNING !!!" + "\n\tThe NameNode currently runs without persistent storage." + "\n\tAny changes to the file system meta-data may be lost." + "\n\tRecommended actions:" + "\n\t\t- shutdown and restart NameNode with configured \"" + propertyName + "\" in hdfs-site.xml;" + "\n\t\t- use Backup Node as a persistent and up-to-date storage " + "of the file system meta-data.");
} else if (dirNames.isEmpty()) {
dirNames = Collections.singletonList(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
}
return Util.stringCollectionAsURIs(dirNames);
}
use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project hadoop by apache.
the class MiniDFSCluster method restartNameNode.
/**
* Restart the namenode at a given index. Optionally wait for the cluster
* to become active.
*/
public synchronized void restartNameNode(int nnIndex, boolean waitActive, String... args) throws IOException {
NameNodeInfo info = getNN(nnIndex);
StartupOption startOpt = info.startOpt;
shutdownNameNode(nnIndex);
if (args.length != 0) {
startOpt = null;
} else {
args = createArgs(startOpt);
}
NameNode nn = NameNode.createNameNode(args, info.conf);
info.nameNode = nn;
info.setStartOpt(startOpt);
if (waitActive) {
waitClusterUp();
LOG.info("Restarted the namenode");
waitActive();
}
}
use of org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption in project SSM by Intel-bigdata.
the class SmartServer method createSSM.
/**
* Create SSM instance and launch the daemon threads.
*
* @param args
* @param conf
* @return
*/
public static SmartServer createSSM(String[] args, SmartConf conf) throws Exception {
if (args == null) {
args = new String[0];
}
StringUtils.startupShutdownMessage(SmartServer.class, args, LOG);
if (args != null) {
if (parseHelpArgument(args, USAGE, System.out, true)) {
return null;
}
// TODO: handle args
// Parse out some generic args into Configuration.
GenericOptionsParser hParser = new GenericOptionsParser(conf, args);
args = hParser.getRemainingArgs();
// Parse the rest, NN specific args.
StartupOption startOpt = parseArguments(args);
}
SmartServer ssm = new SmartServer(conf);
try {
ssm.runSSMDaemons();
} catch (IOException e) {
ssm.shutdown();
throw e;
}
return ssm;
}
Aggregations