use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.
the class NNThroughputBenchmark method run.
/**
* Main method of the benchmark.
* @param aArgs command line parameters
*/
// Tool
@Override
public int run(String[] aArgs) throws Exception {
List<String> args = new ArrayList<String>(Arrays.asList(aArgs));
if (args.size() < 2 || !args.get(0).startsWith("-op"))
printUsage();
String type = args.get(1);
boolean runAll = OperationStatsBase.OP_ALL_NAME.equals(type);
final URI nnUri = FileSystem.getDefaultUri(config);
// Start the NameNode
String[] argv = new String[] {};
List<OperationStatsBase> ops = new ArrayList<OperationStatsBase>();
OperationStatsBase opStat = null;
try {
if (runAll || CreateFileStats.OP_CREATE_NAME.equals(type)) {
opStat = new CreateFileStats(args);
ops.add(opStat);
}
if (runAll || MkdirsStats.OP_MKDIRS_NAME.equals(type)) {
opStat = new MkdirsStats(args);
ops.add(opStat);
}
if (runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) {
opStat = new OpenFileStats(args);
ops.add(opStat);
}
if (runAll || DeleteFileStats.OP_DELETE_NAME.equals(type)) {
opStat = new DeleteFileStats(args);
ops.add(opStat);
}
if (runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
opStat = new FileStatusStats(args);
ops.add(opStat);
}
if (runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) {
opStat = new RenameFileStats(args);
ops.add(opStat);
}
if (runAll || BlockReportStats.OP_BLOCK_REPORT_NAME.equals(type)) {
opStat = new BlockReportStats(args);
ops.add(opStat);
}
if (runAll || ReplicationStats.OP_REPLICATION_NAME.equals(type)) {
if (nnUri.getScheme() != null && nnUri.getScheme().equals("hdfs")) {
LOG.warn("The replication test is ignored as it does not support " + "standalone namenode in another process or on another host. ");
} else {
opStat = new ReplicationStats(args);
ops.add(opStat);
}
}
if (runAll || CleanAllStats.OP_CLEAN_NAME.equals(type)) {
opStat = new CleanAllStats(args);
ops.add(opStat);
}
if (ops.isEmpty()) {
printUsage();
}
if (nnUri.getScheme() == null || nnUri.getScheme().equals("file")) {
LOG.info("Remote NameNode is not specified. Creating one.");
FileSystem.setDefaultUri(config, "hdfs://localhost:0");
config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
nameNode = NameNode.createNameNode(argv, config);
NamenodeProtocols nnProtos = nameNode.getRpcServer();
nameNodeProto = nnProtos;
clientProto = nnProtos;
dataNodeProto = nnProtos;
refreshUserMappingsProto = nnProtos;
bpid = nameNode.getNamesystem().getBlockPoolId();
} else {
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(getConf());
nameNodeProto = DFSTestUtil.getNamenodeProtocolProxy(config, nnUri, UserGroupInformation.getCurrentUser());
clientProto = dfs.getClient().getNamenode();
dataNodeProto = new DatanodeProtocolClientSideTranslatorPB(DFSUtilClient.getNNAddress(nnUri), config);
refreshUserMappingsProto = DFSTestUtil.getRefreshUserMappingsProtocolProxy(config, nnUri);
getBlockPoolId(dfs);
}
// run each benchmark
for (OperationStatsBase op : ops) {
LOG.info("Starting benchmark: " + op.getOpName());
op.benchmark();
op.cleanUp();
}
// print statistics
for (OperationStatsBase op : ops) {
LOG.info("");
op.printResults();
}
} catch (Exception e) {
LOG.error(StringUtils.stringifyException(e));
throw e;
}
return 0;
}
Aggregations