use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class TestBlocksScheduledCounter method testBlocksScheduledCounter.
@Test
public void testBlocksScheduledCounter() throws IOException {
cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
cluster.waitActive();
fs = cluster.getFileSystem();
//open a file an write a few bytes:
FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
for (int i = 0; i < 1024; i++) {
out.write(i);
}
// flush to make sure a block is allocated.
out.hflush();
ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager().getDatanodeManager();
dm.fetchDatanodes(dnList, dnList, false);
DatanodeDescriptor dn = dnList.get(0);
assertEquals(1, dn.getBlocksScheduled());
// close the file and the counter should go to zero.
out.close();
assertEquals(0, dn.getBlocksScheduled());
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class TestDFSNetworkTopology method testAddAndRemoveTopology.
/**
* Test the correctness of storage type info when nodes are added and removed.
* @throws Exception
*/
@Test
public void testAddAndRemoveTopology() throws Exception {
String[] newRack = { "/l1/d1/r1", "/l1/d1/r3", "/l1/d3/r3", "/l1/d3/r3" };
String[] newHost = { "nhost1", "nhost2", "nhost3", "nhost4" };
String[] newips = { "30.30.30.30", "31.31.31.31", "32.32.32.32", "33.33.33.33" };
StorageType[] newTypes = { StorageType.DISK, StorageType.SSD, StorageType.SSD, StorageType.SSD };
DatanodeDescriptor[] newDD = new DatanodeDescriptor[4];
for (int i = 0; i < 4; i++) {
DatanodeStorageInfo dsi = DFSTestUtil.createDatanodeStorageInfo("s" + newHost[i], newips[i], newRack[i], newHost[i], newTypes[i], null);
newDD[i] = dsi.getDatanodeDescriptor();
CLUSTER.add(newDD[i]);
}
DFSTopologyNodeImpl d1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d1");
HashMap<String, EnumMap<StorageType, Integer>> d1info = d1.getChildrenStorageInfo();
assertEquals(3, d1info.keySet().size());
assertTrue(d1info.get("r1").size() == 2 && d1info.get("r2").size() == 2 && d1info.get("r3").size() == 1);
assertEquals(2, (int) d1info.get("r1").get(StorageType.DISK));
assertEquals(1, (int) d1info.get("r1").get(StorageType.ARCHIVE));
assertEquals(2, (int) d1info.get("r2").get(StorageType.DISK));
assertEquals(1, (int) d1info.get("r2").get(StorageType.ARCHIVE));
assertEquals(1, (int) d1info.get("r3").get(StorageType.SSD));
DFSTopologyNodeImpl d3 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d3");
HashMap<String, EnumMap<StorageType, Integer>> d3info = d3.getChildrenStorageInfo();
assertEquals(1, d3info.keySet().size());
assertTrue(d3info.get("r3").size() == 1);
assertEquals(2, (int) d3info.get("r3").get(StorageType.SSD));
DFSTopologyNodeImpl l1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1");
HashMap<String, EnumMap<StorageType, Integer>> l1info = l1.getChildrenStorageInfo();
assertEquals(3, l1info.keySet().size());
assertTrue(l1info.get("d1").size() == 3 && l1info.get("d2").size() == 3 && l1info.get("d3").size() == 1);
assertEquals(4, (int) l1info.get("d1").get(StorageType.DISK));
assertEquals(2, (int) l1info.get("d1").get(StorageType.ARCHIVE));
assertEquals(1, (int) l1info.get("d1").get(StorageType.SSD));
assertEquals(1, (int) l1info.get("d2").get(StorageType.SSD));
assertEquals(1, (int) l1info.get("d2").get(StorageType.RAM_DISK));
assertEquals(1, (int) l1info.get("d2").get(StorageType.DISK));
assertEquals(2, (int) l1info.get("d3").get(StorageType.SSD));
for (int i = 0; i < 4; i++) {
CLUSTER.remove(newDD[i]);
}
// /d1/r3 should've been out, /d1/r1 should've been resumed
DFSTopologyNodeImpl nd1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1/d1");
HashMap<String, EnumMap<StorageType, Integer>> nd1info = nd1.getChildrenStorageInfo();
assertEquals(2, nd1info.keySet().size());
assertTrue(nd1info.get("r1").size() == 2 && nd1info.get("r2").size() == 2);
assertEquals(1, (int) nd1info.get("r1").get(StorageType.DISK));
assertEquals(1, (int) nd1info.get("r1").get(StorageType.ARCHIVE));
assertEquals(2, (int) nd1info.get("r2").get(StorageType.DISK));
assertEquals(1, (int) nd1info.get("r2").get(StorageType.ARCHIVE));
// /l1/d3 should've been out, and /l1/d1 should've been resumed
DFSTopologyNodeImpl nl1 = (DFSTopologyNodeImpl) CLUSTER.getNode("/l1");
HashMap<String, EnumMap<StorageType, Integer>> nl1info = nl1.getChildrenStorageInfo();
assertEquals(2, nl1info.keySet().size());
assertTrue(l1info.get("d1").size() == 2 && l1info.get("d2").size() == 3);
assertEquals(2, (int) nl1info.get("d1").get(StorageType.ARCHIVE));
assertEquals(3, (int) nl1info.get("d1").get(StorageType.DISK));
assertEquals(1, (int) l1info.get("d2").get(StorageType.DISK));
assertEquals(1, (int) l1info.get("d2").get(StorageType.RAM_DISK));
assertEquals(1, (int) l1info.get("d2").get(StorageType.SSD));
assertNull(CLUSTER.getNode("/l1/d3"));
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class TestPipelinesFailover method testFailoverRightBeforeCommitSynchronization.
/**
* Test the scenario where the NN fails over after issuing a block
* synchronization request, but before it is committed. The
* DN running the recovery should then fail to commit the synchronization
* and a later retry will succeed.
*/
@Test(timeout = 30000)
public void testFailoverRightBeforeCommitSynchronization() throws Exception {
final Configuration conf = new Configuration();
// Disable permissions so that another user can recover the lease.
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
FSDataOutputStream stm = null;
final MiniDFSCluster cluster = newMiniCluster(conf, 3);
try {
cluster.waitActive();
cluster.transitionToActive(0);
Thread.sleep(500);
LOG.info("Starting with NN 0 active");
FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
stm = fs.create(TEST_PATH);
// write a half block
AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
stm.hflush();
// Look into the block manager on the active node for the block
// under construction.
NameNode nn0 = cluster.getNameNode(0);
ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn0, blk);
LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
// Find the corresponding DN daemon, and spy on its connection to the
// active.
DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
// Delay the commitBlockSynchronization call
DelayAnswer delayer = new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk), // new genstamp
Mockito.anyInt(), // new length
Mockito.anyLong(), // close file
Mockito.eq(true), // delete block
Mockito.eq(false), // new targets
(DatanodeID[]) Mockito.anyObject(), // new target storages
(String[]) Mockito.anyObject());
DistributedFileSystem fsOtherUser = createFsAsOtherUser(cluster, conf);
assertFalse(fsOtherUser.recoverLease(TEST_PATH));
LOG.info("Waiting for commitBlockSynchronization call from primary");
delayer.waitForCall();
LOG.info("Failing over to NN 1");
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
// Let the commitBlockSynchronization call go through, and check that
// it failed with the correct exception.
delayer.proceed();
delayer.waitForResult();
Throwable t = delayer.getThrown();
if (t == null) {
fail("commitBlockSynchronization call did not fail on standby");
}
GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported", t);
// Now, if we try again to recover the block, it should succeed on the new
// active.
loopRecoverLease(fsOtherUser, TEST_PATH);
AppendTestUtil.check(fs, TEST_PATH, BLOCK_SIZE / 2);
} finally {
IOUtils.closeStream(stm);
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project SSM by Intel-bigdata.
the class CompatibilityHelper31 method newDatanodeInfo.
@Override
public DatanodeInfo newDatanodeInfo(String ipAddress, int xferPort) {
DatanodeID datanodeID = new DatanodeID(ipAddress, null, null, xferPort, 0, 0, 0);
DatanodeDescriptor datanodeDescriptor = new DatanodeDescriptor(datanodeID);
return datanodeDescriptor;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor in project hadoop by apache.
the class TestDecommissioningStatus method testDecommissionStatus.
/**
* Tests Decommissioning Status in DFS.
*/
@Test
public void testDecommissionStatus() throws Exception {
InetSocketAddress addr = new InetSocketAddress("localhost", cluster.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", 2, info.length);
DistributedFileSystem fileSys = cluster.getFileSystem();
DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
short replicas = numDatanodes;
//
// Decommission one node. Verify the decommission status
//
Path file1 = new Path("decommission.dat");
DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize, replicas, seed);
Path file2 = new Path("decommission1.dat");
FSDataOutputStream st1 = AdminStatesBaseTest.writeIncompleteFile(fileSys, file2, replicas, (short) (fileSize / blockSize));
for (DataNode d : cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(d);
}
FSNamesystem fsn = cluster.getNamesystem();
final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
for (int iteration = 0; iteration < numDatanodes; iteration++) {
String downnode = decommissionNode(client, iteration);
dm.refreshNodes(conf);
decommissionedNodes.add(downnode);
BlockManagerTestUtil.recheckDecommissionState(dm);
final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
if (iteration == 0) {
assertEquals(decommissioningNodes.size(), 1);
DatanodeDescriptor decommNode = decommissioningNodes.get(0);
checkDecommissionStatus(decommNode, 3, 0, 1);
checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1), fileSys, admin);
} else {
assertEquals(decommissioningNodes.size(), 2);
DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
// This one is still 3,3,1 since it passed over the UC block
// earlier, before node 2 was decommed
checkDecommissionStatus(decommNode1, 3, 3, 1);
// This one is 4,4,2 since it has the full state
checkDecommissionStatus(decommNode2, 4, 4, 2);
checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2), fileSys, admin);
}
}
// Call refreshNodes on FSNamesystem with empty exclude file.
// This will remove the datanodes from decommissioning list and
// make them available again.
hostsFileWriter.initExcludeHost("");
dm.refreshNodes(conf);
st1.close();
AdminStatesBaseTest.cleanupFile(fileSys, file1);
AdminStatesBaseTest.cleanupFile(fileSys, file2);
}
Aggregations