use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestDecommission method testDecommissionWithNamenodeRestart.
/**
* Tests restart of namenode while datanode hosts are added to exclude file
**/
@Test(timeout = 360000)
public void testDecommissionWithNamenodeRestart() throws IOException, InterruptedException {
LOG.info("Starting test testDecommissionWithNamenodeRestart");
int numNamenodes = 1;
int numDatanodes = 1;
int replicas = 1;
getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
getConf().setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
startCluster(numNamenodes, numDatanodes);
Path file1 = new Path("testDecommissionWithNamenodeRestart.dat");
FileSystem fileSys = getCluster().getFileSystem();
writeFile(fileSys, file1, replicas);
DFSClient client = getDfsClient(0);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
DatanodeID excludedDatanodeID = info[0];
String excludedDatanodeName = info[0].getXferAddr();
initExcludeHost(excludedDatanodeName);
//Add a new datanode to cluster
getCluster().startDataNodes(getConf(), 1, true, null, null, null, null);
numDatanodes += 1;
assertEquals("Number of datanodes should be 2 ", 2, getCluster().getDataNodes().size());
//Restart the namenode
getCluster().restartNameNode();
DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(getCluster().getNamesystem(), excludedDatanodeID);
waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
// Ensure decommissioned datanode is not automatically shutdown
assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length);
assertTrue("Checked if block was replicated after decommission.", checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(), numDatanodes) == null);
cleanupFile(fileSys, file1);
// Restart the cluster and ensure recommissioned datanodes
// are allowed to register with the namenode
shutdownCluster();
startCluster(numNamenodes, numDatanodes);
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestBlockRecovery method initBlockRecords.
private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
BlockRecord blockRecord = new BlockRecord(new DatanodeID(dnR), spyDN, new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), ReplicaState.FINALIZED));
blocks.add(blockRecord);
return blocks;
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestNamenodeRetryCache method testUpdatePipelineWithFailOver.
/**
* Make sure a retry call does not hang because of the exception thrown in the
* first call.
*/
@Test(timeout = 60000)
public void testUpdatePipelineWithFailOver() throws Exception {
cluster.shutdown();
nnRpc = null;
filesystem = null;
cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
cluster.waitActive();
NamenodeProtocols ns0 = cluster.getNameNodeRpc(0);
ExtendedBlock oldBlock = new ExtendedBlock();
ExtendedBlock newBlock = new ExtendedBlock();
DatanodeID[] newNodes = new DatanodeID[2];
String[] newStorages = new String[2];
newCall();
try {
ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
fail("Expect StandbyException from the updatePipeline call");
} catch (StandbyException e) {
// expected, since in the beginning both nn are in standby state
GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(), e);
}
cluster.transitionToActive(0);
try {
ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
} catch (IOException e) {
// ignore call should not hang.
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestUpgradeDomainBlockPlacementPolicy method testPlacementAfterDecommission.
@Test(timeout = 300000)
public void testPlacementAfterDecommission() throws Exception {
final long fileSize = DEFAULT_BLOCK_SIZE * 5;
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
DFSTestUtil.createFile(cluster.getFileSystem(), path, fileSize, REPLICATION_FACTOR, 1000L);
// Decommission some nodes and wait until decommissions have finished.
refreshDatanodeAdminProperties2();
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
boolean successful = true;
LocatedBlocks locatedBlocks;
try {
locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
} catch (IOException ioe) {
return false;
}
for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
Set<DatanodeInfo> locs = new HashSet<>();
for (DatanodeInfo datanodeInfo : block.getLocations()) {
if (datanodeInfo.getAdminState() == DatanodeInfo.AdminStates.NORMAL) {
locs.add(datanodeInfo);
}
}
for (DatanodeID datanodeID : expectedDatanodeIDs) {
successful = successful && locs.contains(datanodeID);
}
}
return successful;
}
}, 1000, 60000);
// Verify block placement policy of each block.
LocatedBlocks locatedBlocks;
locatedBlocks = cluster.getFileSystem().getClient().getLocatedBlocks(path.toString(), 0, fileSize);
for (LocatedBlock block : locatedBlocks.getLocatedBlocks()) {
BlockPlacementStatus status = cluster.getNamesystem().getBlockManager().getBlockPlacementPolicy().verifyBlockPlacement(block.getLocations(), REPLICATION_FACTOR);
assertTrue(status.isPlacementPolicySatisfied());
}
}
use of org.apache.hadoop.hdfs.protocol.DatanodeID in project hadoop by apache.
the class TestUpgradeDomainBlockPlacementPolicy method refreshDatanodeAdminProperties.
/**
* Define admin properties for these datanodes as follows.
* dn0's upgrade domain is ud5.
* dn1's upgrade domain is ud2.
* dn2's upgrade domain is ud3.
* dn3's upgrade domain is ud1.
* dn4's upgrade domain is ud2.
* dn5's upgrade domain is ud4.
* dn0 and dn5 are decommissioned.
* Given dn0, dn1 and dn2 are on rack1 and dn3, dn4 and dn5 are on
* rack2. Then any block's replicas should be on either
* {dn1, dn2, d3} or {dn2, dn3, dn4}.
*/
private void refreshDatanodeAdminProperties() throws IOException {
DatanodeAdminProperties[] datanodes = new DatanodeAdminProperties[hosts.length];
for (int i = 0; i < hosts.length; i++) {
datanodes[i] = new DatanodeAdminProperties();
DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
datanodes[i].setHostName(datanodeID.getHostName());
datanodes[i].setPort(datanodeID.getXferPort());
datanodes[i].setUpgradeDomain(upgradeDomains[i]);
}
datanodes[0].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
datanodes[5].setAdminState(DatanodeInfo.AdminStates.DECOMMISSIONED);
hostsFileWriter.initIncludeHosts(datanodes);
cluster.getFileSystem().refreshNodes();
expectedDatanodeIDs.clear();
expectedDatanodeIDs.add(cluster.getDataNodes().get(2).getDatanodeId());
expectedDatanodeIDs.add(cluster.getDataNodes().get(3).getDatanodeId());
}
Aggregations