use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestDataNodeVolumeFailure method triggerFailure.
/**
* go to each block on the 2nd DataNode until it fails...
* @param path
* @param size
* @throws IOException
*/
private void triggerFailure(String path, long size) throws IOException {
NamenodeProtocols nn = cluster.getNameNodeRpc();
List<LocatedBlock> locatedBlocks = nn.getBlockLocations(path, 0, size).getLocatedBlocks();
for (LocatedBlock lb : locatedBlocks) {
DatanodeInfo dinfo = lb.getLocations()[1];
ExtendedBlock b = lb.getBlock();
try {
accessBlock(dinfo, lb);
} catch (IOException e) {
System.out.println("Failure triggered, on block: " + b.getBlockId() + "; corresponding volume should be removed by now");
break;
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestAddBlockRetry method testAddBlockRetryShouldReturnBlockWithLocations.
/*
* Since NameNode will not persist any locations of the block, addBlock()
* retry call after restart NN should re-select the locations and return to
* client. refer HDFS-5257
*/
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations() throws Exception {
final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
// create file
nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 3, 1024, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertTrue("Block locations should be present", lb1.getLocations().length > 0);
cluster.restartNameNode();
nameNodeRpc = cluster.getNameNodeRpc();
LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestNamenodeRetryCache method testUpdatePipelineWithFailOver.
/**
* Make sure a retry call does not hang because of the exception thrown in the
* first call.
*/
@Test(timeout = 60000)
public void testUpdatePipelineWithFailOver() throws Exception {
cluster.shutdown();
nnRpc = null;
filesystem = null;
cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
cluster.waitActive();
NamenodeProtocols ns0 = cluster.getNameNodeRpc(0);
ExtendedBlock oldBlock = new ExtendedBlock();
ExtendedBlock newBlock = new ExtendedBlock();
DatanodeID[] newNodes = new DatanodeID[2];
String[] newStorages = new String[2];
newCall();
try {
ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
fail("Expect StandbyException from the updatePipeline call");
} catch (StandbyException e) {
// expected, since in the beginning both nn are in standby state
GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(), e);
}
cluster.transitionToActive(0);
try {
ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
} catch (IOException e) {
// ignore call should not hang.
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestStartup method checkNameSpace.
private void checkNameSpace(Configuration conf) throws IOException {
NameNode namenode = new NameNode(conf);
NamenodeProtocols nnRpc = namenode.getRpcServer();
assertTrue(nnRpc.getFileInfo("/test").isDir());
nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
nnRpc.saveNamespace(0, 0);
namenode.stop();
namenode.join();
namenode.joinHttpServer();
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestStartup method testNNRestart.
/**
* This test tests hosts include list contains host names. After namenode
* restarts, the still alive datanodes should not have any trouble in getting
* registrant again.
*/
@Test
public void testNNRestart() throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
// heartbeat interval in seconds
int HEARTBEAT_INTERVAL = 1;
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(config, "work-dir/restartnn");
byte[] b = { 127, 0, 0, 1 };
InetAddress inetAddress = InetAddress.getByAddress(b);
hostsFileWriter.initIncludeHosts(new String[] { inetAddress.getHostName() });
int numDatanodes = 1;
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes).setupHostsFile(true).build();
cluster.waitActive();
cluster.restartNameNode();
NamenodeProtocols nn = cluster.getNameNodeRpc();
assertNotNull(nn);
assertTrue(cluster.isDataNodeUp());
DatanodeInfo[] info = nn.getDatanodeReport(DatanodeReportType.LIVE);
for (int i = 0; i < 5 && info.length != numDatanodes; i++) {
Thread.sleep(HEARTBEAT_INTERVAL * 1000);
info = nn.getDatanodeReport(DatanodeReportType.LIVE);
}
assertEquals("Number of live nodes should be " + numDatanodes, numDatanodes, info.length);
} catch (IOException e) {
fail(StringUtils.stringifyException(e));
throw e;
} finally {
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}
Aggregations