use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestEncryptedTransfer method testEncryptedAppendRequiringBlockTransfer.
@Test
public void testEncryptedAppendRequiringBlockTransfer() throws IOException {
setEncryptionConfigKeys();
// start up 4 DNs
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = getFileSystem(conf);
// Create a file with replication 3, so its block is on 3 / 4 DNs.
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
// Shut down one of the DNs holding a block replica.
FSDataInputStream in = fs.open(TEST_PATH);
List<LocatedBlock> locatedBlocks = DFSTestUtil.getAllBlocks(in);
in.close();
assertEquals(1, locatedBlocks.size());
assertEquals(3, locatedBlocks.get(0).getLocations().length);
DataNode dn = cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort());
dn.shutdown();
// Reopen the file for append, which will need to add another DN to the
// pipeline and in doing so trigger a block transfer.
writeTestDataToFile(fs);
assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestDatanodeReport method assertReports.
static void assertReports(int numDatanodes, DatanodeReportType type, DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
final DatanodeInfo[] infos = client.datanodeReport(type);
assertEquals(numDatanodes, infos.length);
final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
assertEquals(numDatanodes, reports.length);
for (int i = 0; i < infos.length; i++) {
assertEquals(infos[i], reports[i].getDatanodeInfo());
final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
if (bpid != null) {
//check storage
final StorageReport[] computed = reports[i].getStorageReports();
Arrays.sort(computed, CMP);
final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
Arrays.sort(expected, CMP);
assertEquals(expected.length, computed.length);
for (int j = 0; j < expected.length; j++) {
assertEquals(expected[j].getStorage().getStorageID(), computed[j].getStorage().getStorageID());
}
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestDatanodeReport method testDatanodeReport.
/**
* This test attempts to different types of datanode report.
*/
@Test
public void testDatanodeReport() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, // 0.5s
500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
try {
//wait until the cluster is up
cluster.waitActive();
final String bpid = cluster.getNamesystem().getBlockPoolId();
final List<DataNode> datanodes = cluster.getDataNodes();
final DFSClient client = cluster.getFileSystem().dfs;
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);
// bring down one datanode
final DataNode last = datanodes.get(datanodes.size() - 1);
LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
last.shutdown();
DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
while (nodeInfo.length != 1) {
try {
Thread.sleep(500);
} catch (Exception e) {
}
nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
}
assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
Thread.sleep(5000);
assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestDecommission method doDecomCheck.
private void doDecomCheck(DatanodeManager datanodeManager, DecommissionManager decomManager, int expectedNumCheckedNodes) throws IOException, ExecutionException, InterruptedException {
// Decom all nodes
ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
for (DataNode d : getCluster().getDataNodes()) {
DatanodeInfo dn = takeNodeOutofService(0, d.getDatanodeUuid(), 0, decommissionedNodes, AdminStates.DECOMMISSION_INPROGRESS);
decommissionedNodes.add(dn);
}
// Run decom scan and check
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
assertEquals("Unexpected # of nodes checked", expectedNumCheckedNodes, decomManager.getNumNodesChecked());
// Recommission all nodes
for (DatanodeInfo dn : decommissionedNodes) {
putNodeInService(0, dn);
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestDecommission method testClusterStats.
public void testClusterStats(int numNameNodes) throws IOException, InterruptedException {
LOG.info("Starting test testClusterStats");
int numDatanodes = 1;
startCluster(numNameNodes, numDatanodes);
for (int i = 0; i < numNameNodes; i++) {
FileSystem fileSys = getCluster().getFileSystem(i);
Path file = new Path("testClusterStats.dat");
writeFile(fileSys, file, 1);
FSNamesystem fsn = getCluster().getNamesystem(i);
NameNode namenode = getCluster().getNameNode(i);
DatanodeInfo decomInfo = takeNodeOutofService(i, null, 0, null, AdminStates.DECOMMISSION_INPROGRESS);
DataNode decomNode = getDataNode(decomInfo);
// Check namenode stats for multiple datanode heartbeats
verifyStats(namenode, fsn, decomInfo, decomNode, true);
// Stop decommissioning and verify stats
DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
putNodeInService(i, retInfo);
DataNode retNode = getDataNode(decomInfo);
verifyStats(namenode, fsn, retInfo, retNode, false);
}
}
Aggregations