use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class TestBlockReaderLocalLegacy method testBlockReaderLocalLegacyWithAppend.
@Test(timeout = 20000)
public void testBlockReaderLocalLegacyWithAppend() throws Exception {
final short REPL_FACTOR = 1;
final HdfsConfiguration conf = getConfiguration(null);
conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final Path path = new Path("/testBlockReaderLocalLegacy");
DFSTestUtil.createFile(dfs, path, 10, REPL_FACTOR, 0);
DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
final ClientDatanodeProtocol proxy;
final Token<BlockTokenIdentifier> token;
final ExtendedBlock originalBlock;
final long originalGS;
{
final LocatedBlock lb = cluster.getNameNode().getRpcServer().getBlockLocations(path.toString(), 0, 1).get(0);
proxy = DFSUtilClient.createClientDatanodeProtocolProxy(lb.getLocations()[0], conf, 60000, false);
token = lb.getBlockToken();
// get block and generation stamp
final ExtendedBlock blk = new ExtendedBlock(lb.getBlock());
originalBlock = new ExtendedBlock(blk);
originalGS = originalBlock.getGenerationStamp();
// test getBlockLocalPathInfo
final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(blk, token);
Assert.assertEquals(originalGS, info.getBlock().getGenerationStamp());
}
{
// append one byte
FSDataOutputStream out = dfs.append(path);
out.write(1);
out.close();
}
{
// get new generation stamp
final LocatedBlock lb = cluster.getNameNode().getRpcServer().getBlockLocations(path.toString(), 0, 1).get(0);
final long newGS = lb.getBlock().getGenerationStamp();
Assert.assertTrue(newGS > originalGS);
// getBlockLocalPathInfo using the original block.
Assert.assertEquals(originalGS, originalBlock.getGenerationStamp());
final BlockLocalPathInfo info = proxy.getBlockLocalPathInfo(originalBlock, token);
Assert.assertEquals(newGS, info.getBlock().getGenerationStamp());
}
cluster.shutdown();
}
use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.
the class TestBlockToken method testBlockTokenRpc.
private void testBlockTokenRpc(boolean enableProtobuf) throws Exception {
Configuration conf = new Configuration();
conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
UserGroupInformation.setConfiguration(conf);
BlockTokenSecretManager sm = new BlockTokenSecretManager(blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null, enableProtobuf);
Token<BlockTokenIdentifier> token = sm.generateToken(block3, EnumSet.allOf(BlockTokenIdentifier.AccessMode.class));
final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
final UserGroupInformation ticket = UserGroupInformation.createRemoteUser(block3.toString());
ticket.addToken(token);
ClientDatanodeProtocol proxy = null;
try {
proxy = DFSUtilClient.createClientDatanodeProtocolProxy(addr, ticket, conf, NetUtils.getDefaultSocketFactory(conf));
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
} finally {
server.stop();
if (proxy != null) {
RPC.stopProxy(proxy);
}
}
}
Aggregations