Search in sources :

Example 21 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class TestBlockReaderLocalLegacy method testBlockReaderLocalLegacyWithAppend.

@Test(timeout = 20000)
public void testBlockReaderLocalLegacyWithAppend() throws Exception {
    final short REPL_FACTOR = 1;
    final HdfsConfiguration conf = getConfiguration(null);
    conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final Path path = new Path("/testBlockReaderLocalLegacy");
    DFSTestUtil.createFile(dfs, path, 10, REPL_FACTOR, 0);
    DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
    final ClientDatanodeProtocol proxy;
    final Token<BlockTokenIdentifier> token;
    final ExtendedBlock originalBlock;
    final long originalGS;
    {
        final LocatedBlock lb = cluster.getNameNode().getRpcServer().getBlockLocations(path.toString(), 0, 1).get(0);
        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(lb.getLocations()[0], conf, 60000, false);
        token = lb.getBlockToken();
        // get block and generation stamp
        final ExtendedBlock blk = new ExtendedBlock(lb.getBlock());
        originalBlock = new ExtendedBlock(blk);
        originalGS = originalBlock.getGenerationStamp();
        // test getBlockLocalPathInfo
        final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token);
        Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp());
    }
    {
        // append one byte
        FSDataOutputStream out = dfs.append(path);
        out.write(1);
        out.close();
    }
    {
        // get new generation stamp
        final LocatedBlock lb = cluster.getNameNode().getRpcServer().getBlockLocations(path.toString(), 0, 1).get(0);
        final long newGS = lb.getBlock().getGenerationStamp();
        Assert.assertTrue(newGS > originalGS);
        // getBlockLocalPathInfo using the original block.
        Assert.assertEquals(originalGS, originalBlock.getGenerationStamp());
        final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(originalBlock, token);
        Assert.assertEquals(newGS, info.getBlock().getGenerationStamp());
    }
    cluster.shutdown();
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) BlockLocalPathInfo(org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 22 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class TestBlockToken method testBlockTokenRpc.

private void testBlockTokenRpc(boolean enableProtobuf) throws Exception {
    Configuration conf = new Configuration();
    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);
    BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, enableProtobuf);
    Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
    final Server server = createMockDatanode(sm, token, conf);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    final UserGroupInformation ticket = UserGroupInformation.createRemoteUser(block3.toString());
    ticket.addToken(token);
    ClientDatanodeProtocol proxy = null;
    try {
        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(addr, ticket, conf, NetUtils.getDefaultSocketFactory(conf));
        assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
    } finally {
        server.stop();
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) SaslRpcServer(org.apache.hadoop.security.SaslRpcServer) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Aggregations

ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)22 IOException (java.io.IOException)7 InetSocketAddress (java.net.InetSocketAddress)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 Configuration (org.apache.hadoop.conf.Configuration)4 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 Server (org.apache.hadoop.ipc.Server)3 Test (org.junit.Test)3 LinkedList (java.util.LinkedList)2 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 Block (org.apache.hadoop.hdfs.protocol.Block)2 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)2 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)2