Search in sources :

Example 26 with DatanodeProtocolClientSideTranslatorPB

use of org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB in project hadoop by apache.

the class NNThroughputBenchmark method run.

/**
   * Main method of the benchmark.
   * @param aArgs command line parameters
   */
// Tool
@Override
public int run(String[] aArgs) throws Exception {
    List<String> args = new ArrayList<String>(Arrays.asList(aArgs));
    if (args.size() < 2 || !args.get(0).startsWith("-op"))
        printUsage();
    String type = args.get(1);
    boolean runAll = OperationStatsBase.OP_ALL_NAME.equals(type);
    final URI nnUri = FileSystem.getDefaultUri(config);
    // Start the NameNode
    String[] argv = new String[] {};
    List<OperationStatsBase> ops = new ArrayList<OperationStatsBase>();
    OperationStatsBase opStat = null;
    try {
        if (runAll || CreateFileStats.OP_CREATE_NAME.equals(type)) {
            opStat = new CreateFileStats(args);
            ops.add(opStat);
        }
        if (runAll || MkdirsStats.OP_MKDIRS_NAME.equals(type)) {
            opStat = new MkdirsStats(args);
            ops.add(opStat);
        }
        if (runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) {
            opStat = new OpenFileStats(args);
            ops.add(opStat);
        }
        if (runAll || DeleteFileStats.OP_DELETE_NAME.equals(type)) {
            opStat = new DeleteFileStats(args);
            ops.add(opStat);
        }
        if (runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) {
            opStat = new FileStatusStats(args);
            ops.add(opStat);
        }
        if (runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) {
            opStat = new RenameFileStats(args);
            ops.add(opStat);
        }
        if (runAll || BlockReportStats.OP_BLOCK_REPORT_NAME.equals(type)) {
            opStat = new BlockReportStats(args);
            ops.add(opStat);
        }
        if (runAll || ReplicationStats.OP_REPLICATION_NAME.equals(type)) {
            if (nnUri.getScheme() != null && nnUri.getScheme().equals("hdfs")) {
                LOG.warn("The replication test is ignored as it does not support " + "standalone namenode in another process or on another host. ");
            } else {
                opStat = new ReplicationStats(args);
                ops.add(opStat);
            }
        }
        if (runAll || CleanAllStats.OP_CLEAN_NAME.equals(type)) {
            opStat = new CleanAllStats(args);
            ops.add(opStat);
        }
        if (ops.isEmpty()) {
            printUsage();
        }
        if (nnUri.getScheme() == null || nnUri.getScheme().equals("file")) {
            LOG.info("Remote NameNode is not specified. Creating one.");
            FileSystem.setDefaultUri(config, "hdfs://localhost:0");
            config.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
            nameNode = NameNode.createNameNode(argv, config);
            NamenodeProtocols nnProtos = nameNode.getRpcServer();
            nameNodeProto = nnProtos;
            clientProto = nnProtos;
            dataNodeProto = nnProtos;
            refreshUserMappingsProto = nnProtos;
            bpid = nameNode.getNamesystem().getBlockPoolId();
        } else {
            DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(getConf());
            nameNodeProto = DFSTestUtil.getNamenodeProtocolProxy(config, nnUri, UserGroupInformation.getCurrentUser());
            clientProto = dfs.getClient().getNamenode();
            dataNodeProto = new DatanodeProtocolClientSideTranslatorPB(DFSUtilClient.getNNAddress(nnUri), config);
            refreshUserMappingsProto = DFSTestUtil.getRefreshUserMappingsProtocolProxy(config, nnUri);
            getBlockPoolId(dfs);
        }
        // run each benchmark
        for (OperationStatsBase op : ops) {
            LOG.info("Starting benchmark: " + op.getOpName());
            op.benchmark();
            op.cleanUp();
        }
        // print statistics
        for (OperationStatsBase op : ops) {
            LOG.info("");
            op.printResults();
        }
    } catch (Exception e) {
        LOG.error(StringUtils.stringifyException(e));
        throw e;
    }
    return 0;
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) ArrayList(java.util.ArrayList) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)

Aggregations

DatanodeProtocolClientSideTranslatorPB (org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB)26 Test (org.junit.Test)16 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)14 NameNode (org.apache.hadoop.hdfs.server.namenode.NameNode)7 NamespaceInfo (org.apache.hadoop.hdfs.server.protocol.NamespaceInfo)6 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)6 InvocationOnMock (org.mockito.invocation.InvocationOnMock)6 ArrayList (java.util.ArrayList)5 Configuration (org.apache.hadoop.conf.Configuration)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)5 SlowPeerReports (org.apache.hadoop.hdfs.server.protocol.SlowPeerReports)5 VolumeFailureSummary (org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)5 DelayAnswer (org.apache.hadoop.test.GenericTestUtils.DelayAnswer)5 IOException (java.io.IOException)4 InetSocketAddress (java.net.InetSocketAddress)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 Path (org.apache.hadoop.fs.Path)4 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)4 NNHAStatusHeartbeat (org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat)4 File (java.io.File)3