use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestWriteToReplica method testReplicaMapAfterDatanodeRestart.
/**
* This is a test to check the replica map before and after the datanode
* quick restart (less than 5 minutes)
* @throws Exception
*/
@Test
public void testReplicaMapAfterDatanodeRestart() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2)).build();
try {
cluster.waitActive();
NameNode nn1 = cluster.getNameNode(0);
NameNode nn2 = cluster.getNameNode(1);
assertNotNull("cannot create nn1", nn1);
assertNotNull("cannot create nn2", nn2);
// check number of volumes in fsdataset
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetImpl dataSet = (FsDatasetImpl) DataNodeTestUtils.getFSDataset(dn);
List<FsVolumeSpi> volumes = null;
try (FsDatasetSpi.FsVolumeReferences referredVols = dataSet.getFsVolumeReferences()) {
// number of volumes should be 2 - [data1, data2]
assertEquals("number of volumes is wrong", 2, referredVols.size());
volumes = new ArrayList<>(referredVols.size());
for (FsVolumeSpi vol : referredVols) {
volumes.add(vol);
}
}
ArrayList<String> bpList = new ArrayList<>(Arrays.asList(cluster.getNamesystem(0).getBlockPoolId(), cluster.getNamesystem(1).getBlockPoolId()));
Assert.assertTrue("Cluster should have 2 block pools", bpList.size() == 2);
createReplicas(bpList, volumes, cluster.getFsDatasetTestUtils(dn));
ReplicaMap oldReplicaMap = new ReplicaMap(new AutoCloseableLock());
oldReplicaMap.addAll(dataSet.volumeMap);
cluster.restartDataNode(0);
cluster.waitActive();
dn = cluster.getDataNodes().get(0);
dataSet = (FsDatasetImpl) dn.getFSDataset();
testEqualityOfReplicaMap(oldReplicaMap, dataSet.volumeMap, bpList);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class DFSTestUtil method createKey.
/**
* Helper function to create a key in the Key Provider.
*
* @param keyName The name of the key to create
* @param cluster The cluster to create it in
* @param idx The NameNode index
* @param conf Configuration to use
*/
public static void createKey(String keyName, MiniDFSCluster cluster, int idx, Configuration conf) throws NoSuchAlgorithmException, IOException {
NameNode nn = cluster.getNameNode(idx);
KeyProvider provider = nn.getNamesystem().getProvider();
final KeyProvider.Options options = KeyProvider.options(conf);
options.setDescription(keyName);
options.setBitLength(128);
provider.createKey(keyName, options);
provider.flush();
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestBlockTokenWithDFS method testWrite.
/**
* testing that WRITE operation can handle token expiration when
* re-establishing pipeline is needed
*/
@Test
public void testWrite() throws Exception {
MiniDFSCluster cluster = null;
int numDataNodes = 2;
Configuration conf = getConf(numDataNodes);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
final NameNode nn = cluster.getNameNode();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second)
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToWrite = new Path(FILE_TO_WRITE);
FileSystem fs = cluster.getFileSystem();
byte[] expected = generateBytes(FILE_SIZE);
FSDataOutputStream stm = writeFile(fs, fileToWrite, (short) numDataNodes, BLOCK_SIZE);
// write a partial block
int mid = expected.length - 1;
stm.write(expected, 0, mid);
stm.hflush();
/*
* wait till token used in stm expires
*/
Token<BlockTokenIdentifier> token = DFSTestUtil.getBlockToken(stm);
while (!SecurityTestUtil.isBlockTokenExpired(token)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
// remove a datanode to force re-establishing pipeline
cluster.stopDataNode(0);
// write the rest of the file
stm.write(expected, mid, expected.length - mid);
stm.close();
// check if write is successful
FSDataInputStream in4 = fs.open(fileToWrite);
assertTrue(checkFile1(in4, expected));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class TestBlockTokenWithDFS method doTestRead.
protected void doTestRead(Configuration conf, MiniDFSCluster cluster, boolean isStriped) throws Exception {
final int numDataNodes = cluster.getDataNodes().size();
final NameNode nn = cluster.getNameNode();
final NamenodeProtocols nnProto = nn.getRpcServer();
final BlockManager bm = nn.getNamesystem().getBlockManager();
final BlockTokenSecretManager sm = bm.getBlockTokenSecretManager();
// set a short token lifetime (1 second) initially
SecurityTestUtil.setBlockTokenLifetime(sm, 1000L);
Path fileToRead = new Path(FILE_TO_READ);
FileSystem fs = cluster.getFileSystem();
byte[] expected = generateBytes(FILE_SIZE);
createFile(fs, fileToRead, expected);
/*
* setup for testing expiration handling of cached tokens
*/
// read using blockSeekTo(). Acquired tokens are cached in in1
FSDataInputStream in1 = fs.open(fileToRead);
assertTrue(checkFile1(in1, expected));
// read using blockSeekTo(). Acquired tokens are cached in in2
FSDataInputStream in2 = fs.open(fileToRead);
assertTrue(checkFile1(in2, expected));
// read using fetchBlockByteRange(). Acquired tokens are cached in in3
FSDataInputStream in3 = fs.open(fileToRead);
assertTrue(checkFile2(in3, expected));
/*
* testing READ interface on DN using a BlockReader
*/
DFSClient client = null;
try {
client = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
} finally {
if (client != null)
client.close();
}
List<LocatedBlock> locatedBlocks = nnProto.getBlockLocations(FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();
// first block
LocatedBlock lblock = locatedBlocks.get(0);
// verify token is not expired
assertFalse(isBlockTokenExpired(lblock));
// read with valid token, should succeed
tryRead(conf, lblock, true);
while (!isBlockTokenExpired(lblock)) {
try {
Thread.sleep(10);
} catch (InterruptedException ignored) {
}
}
/*
* continue testing READ interface on DN using a BlockReader
*/
// verify token is expired
assertTrue(isBlockTokenExpired(lblock));
// read should fail
tryRead(conf, lblock, false);
// use a valid new token
bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
// read should succeed
tryRead(conf, lblock, true);
// use a token with wrong blockID
long rightId = lblock.getBlock().getBlockId();
long wrongId = rightId + 1;
lblock.getBlock().setBlockId(wrongId);
bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.READ);
lblock.getBlock().setBlockId(rightId);
// read should fail
tryRead(conf, lblock, false);
// use a token with wrong access modes
bm.setBlockToken(lblock, BlockTokenIdentifier.AccessMode.WRITE);
// read should fail
tryRead(conf, lblock, false);
// set a long token lifetime for future tokens
SecurityTestUtil.setBlockTokenLifetime(sm, 600 * 1000L);
/*
* testing that when cached tokens are expired, DFSClient will re-fetch
* tokens transparently for READ.
*/
// confirm all tokens cached in in1 are expired by now
List<LocatedBlock> lblocks = DFSTestUtil.getAllBlocks(in1);
for (LocatedBlock blk : lblocks) {
assertTrue(isBlockTokenExpired(blk));
}
// verify blockSeekTo() is able to re-fetch token transparently
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// confirm all tokens cached in in2 are expired by now
List<LocatedBlock> lblocks2 = DFSTestUtil.getAllBlocks(in2);
for (LocatedBlock blk : lblocks2) {
assertTrue(isBlockTokenExpired(blk));
}
// via another interface method)
if (isStriped) {
// striped block doesn't support seekToNewSource
in2.seek(0);
} else {
assertTrue(in2.seekToNewSource(0));
}
assertTrue(checkFile1(in2, expected));
// confirm all tokens cached in in3 are expired by now
List<LocatedBlock> lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertTrue(isBlockTokenExpired(blk));
}
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3, expected));
/*
* testing that after datanodes are restarted on the same ports, cached
* tokens should still work and there is no need to fetch new tokens from
* namenode. This test should run while namenode is down (to make sure no
* new tokens can be fetched from namenode).
*/
// restart datanodes on the same ports that they currently use
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
cluster.shutdownNameNode(0);
// confirm tokens cached in in1 are still valid
lblocks = DFSTestUtil.getAllBlocks(in1);
for (LocatedBlock blk : lblocks) {
assertFalse(isBlockTokenExpired(blk));
}
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// confirm tokens cached in in2 are still valid
lblocks2 = DFSTestUtil.getAllBlocks(in2);
for (LocatedBlock blk : lblocks2) {
assertFalse(isBlockTokenExpired(blk));
}
// verify blockSeekTo() still works (forced to use cached tokens)
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// confirm tokens cached in in3 are still valid
lblocks3 = DFSTestUtil.getAllBlocks(in3);
for (LocatedBlock blk : lblocks3) {
assertFalse(isBlockTokenExpired(blk));
}
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3, expected));
/*
* testing that when namenode is restarted, cached tokens should still
* work and there is no need to fetch new tokens from namenode. Like the
* previous test, this test should also run while namenode is down. The
* setup for this test depends on the previous test.
*/
// restart the namenode and then shut it down for test
cluster.restartNameNode(0);
cluster.shutdownNameNode(0);
// verify blockSeekTo() still works (forced to use cached tokens)
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// verify again blockSeekTo() still works (forced to use cached tokens)
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// verify fetchBlockByteRange() still works (forced to use cached tokens)
assertTrue(checkFile2(in3, expected));
/*
* testing that after both namenode and datanodes got restarted (namenode
* first, followed by datanodes), DFSClient can't access DN without
* re-fetching tokens and is able to re-fetch tokens transparently. The
* setup of this test depends on the previous test.
*/
// restore the cluster and restart the datanodes for test
cluster.restartNameNode(0);
assertTrue(cluster.restartDataNodes(true));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// shutdown namenode so that DFSClient can't get new tokens from namenode
cluster.shutdownNameNode(0);
// verify blockSeekTo() fails (cached tokens become invalid)
in1.seek(0);
assertFalse(checkFile1(in1, expected));
// verify fetchBlockByteRange() fails (cached tokens become invalid)
assertFalse(checkFile2(in3, expected));
// restart the namenode to allow DFSClient to re-fetch tokens
cluster.restartNameNode(0);
// verify blockSeekTo() works again (by transparently re-fetching
// tokens from namenode)
in1.seek(0);
assertTrue(checkFile1(in1, expected));
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// verify fetchBlockByteRange() works again (by transparently
// re-fetching tokens from namenode)
assertTrue(checkFile2(in3, expected));
/*
* testing that when datanodes are restarted on different ports, DFSClient
* is able to re-fetch tokens transparently to connect to them
*/
// restart datanodes on newly assigned ports
assertTrue(cluster.restartDataNodes(false));
cluster.waitActive();
assertEquals(numDataNodes, cluster.getDataNodes().size());
// verify blockSeekTo() is able to re-fetch token transparently
in1.seek(0);
assertTrue(checkFile1(in1, expected));
// verify blockSeekTo() is able to re-fetch token transparently
if (isStriped) {
in2.seek(0);
} else {
in2.seekToNewSource(0);
}
assertTrue(checkFile1(in2, expected));
// verify fetchBlockByteRange() is able to re-fetch token transparently
assertTrue(checkFile2(in3, expected));
}
use of org.apache.hadoop.hdfs.server.namenode.NameNode in project hadoop by apache.
the class BaseReplicationPolicyTest method setupCluster.
@Before
public void setupCluster() throws Exception {
Configuration conf = new HdfsConfiguration();
dataNodes = getDatanodeDescriptors(conf);
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath());
conf.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, blockPlacementPolicy);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
DFSTestUtil.formatNameNode(conf);
namenode = new NameNode(conf);
nameNodeRpc = namenode.getRpcServer();
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
dnManager = bm.getDatanodeManager();
// construct network topology
for (int i = 0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
bm.getDatanodeManager().getHeartbeatManager().addDatanode(dataNodes[i]);
bm.getDatanodeManager().getHeartbeatManager().updateDnStat(dataNodes[i]);
}
updateHeartbeatWithUsage();
}
Aggregations