Search in sources :

Example 6 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSAdmin method getBalancerBandwidth.

/**
   * Command to get balancer bandwidth for the given datanode. Usage: hdfs
   * dfsadmin -getBalancerBandwidth {@literal <datanode_host:ipc_port>}
   * @param argv List of of command line parameters.
   * @param idx The index of the command that is being processed.
   * @exception IOException
   */
public int getBalancerBandwidth(String[] argv, int idx) throws IOException {
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[idx]);
    try {
        long bandwidth = dnProxy.getBalancerBandwidth();
        System.out.println("Balancer bandwidth is " + bandwidth + " bytes per second.");
    } catch (IOException ioe) {
        throw new IOException("Datanode unreachable. " + ioe, ioe);
    }
    return 0;
}
Also used : IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 7 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class ExecuteCommand method submitPlan.

/**
   * Submits plan to a given data node.
   *
   * @param planFile - Plan file name
   * @param planData - Plan data in json format
   * @throws IOException
   */
private void submitPlan(final String planFile, final String planData) throws IOException {
    Preconditions.checkNotNull(planData);
    NodePlan plan = NodePlan.parseJson(planData);
    String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
    Preconditions.checkNotNull(dataNodeAddress);
    ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
    String planHash = DigestUtils.shaHex(planData);
    try {
        // TODO : Support skipping date check.
        dataNode.submitDiskBalancerPlan(planHash, DiskBalancerCLI.PLAN_VERSION, planFile, planData, false);
    } catch (DiskBalancerException ex) {
        LOG.error("Submitting plan on  {} failed. Result: {}, Message: {}", plan.getNodeName(), ex.getResult().toString(), ex.getMessage());
        throw ex;
    }
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Example 8 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class TestBlockToken method testBlockTokenRpcLeak.

/**
   * Test that fast repeated invocations of createClientDatanodeProtocolProxy
   * will not end up using up thousands of sockets. This is a regression test
   * for HDFS-1965.
   */
private void testBlockTokenRpcLeak(boolean enableProtobuf) throws Exception {
    Configuration conf = new Configuration();
    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    Assume.assumeTrue(FD_DIR.exists());
    BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, enableProtobuf);
    Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
    final Server server = createMockDatanode(sm, token, conf);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    fakeBlock.setBlockToken(token);
    // Create another RPC proxy with the same configuration - this will never
    // attempt to connect anywhere -- but it causes the refcount on the
    // RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
    // actually close the TCP connections to the real target DN.
    ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, new InetSocketAddress("1.1.1.1", 1), UserGroupInformation.createRemoteUser("junk"), conf, NetUtils.getDefaultSocketFactory(conf));
    ClientDatanodeProtocol proxy = null;
    int fdsAtStart = countOpenFileDescriptors();
    try {
        long endTime = Time.now() + 3000;
        while (Time.now() < endTime) {
            proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000, false, fakeBlock);
            assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
            if (proxy != null) {
                RPC.stopProxy(proxy);
            }
            LOG.info("Num open fds:" + countOpenFileDescriptors());
        }
        int fdsAtEnd = countOpenFileDescriptors();
        if (fdsAtEnd - fdsAtStart > 50) {
            fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
        }
    } finally {
        server.stop();
    }
    RPC.stopProxy(proxyToNoWhere);
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) SaslRpcServer(org.apache.hadoop.security.SaslRpcServer) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 9 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class TestDFSClientRetries method testClientDNProtocolTimeout.

/** Test that timeout occurs when DN does not respond to RPC.
   * Start up a server and ask it to sleep for n seconds. Make an
   * RPC to the server and set rpcTimeout to less than n and ensure
   * that socketTimeoutException is obtained
   */
@Test
public void testClientDNProtocolTimeout() throws IOException {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    ClientDatanodeProtocol proxy = null;
    try {
        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 500, false, fakeBlock);
        proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
        fail("Did not get expected exception: SocketTimeoutException");
    } catch (SocketTimeoutException e) {
        LOG.info("Got the expected Exception: SocketTimeoutException");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) SocketTimeoutException(java.net.SocketTimeoutException) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) Test(org.junit.Test)

Example 10 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class TestShortCircuitLocalRead method testDeprecatedGetBlockLocalPathInfoRpc.

@Test(timeout = 60000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(true).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    try {
        DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
        LocatedBlocks lb = cluster.getNameNode().getRpcServer().getBlockLocations("/tmp/x", 0, 16);
        // Create a new block object, because the block inside LocatedBlock at
        // namenode is of type BlockInfo.
        ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
        Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
        final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
        ClientDatanodeProtocol proxy = DFSUtilClient.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
        try {
            proxy.getBlockLocalPathInfo(blk, token);
            Assert.fail("The call should have failed as this user " + " is not configured in " + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
        } catch (IOException ex) {
            Assert.assertTrue(ex.getMessage().contains("not configured in " + DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
        }
    } finally {
        fs.close();
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) Test(org.junit.Test)

Aggregations

ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)22 IOException (java.io.IOException)7 InetSocketAddress (java.net.InetSocketAddress)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 Configuration (org.apache.hadoop.conf.Configuration)4 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 Server (org.apache.hadoop.ipc.Server)3 Test (org.junit.Test)3 LinkedList (java.util.LinkedList)2 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 Block (org.apache.hadoop.hdfs.protocol.Block)2 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)2 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)2