Search in sources :

Example 31 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancer method testDiskBalancerNameNodeConnectivity.

@Test
public void testDiskBalancerNameNodeConnectivity() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int numDatanodes = 2;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    try {
        cluster.waitActive();
        ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
        DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
        diskBalancerCluster.readClusterInfo();
        assertEquals(diskBalancerCluster.getNodes().size(), numDatanodes);
        DataNode dnNode = cluster.getDataNodes().get(0);
        DiskBalancerDataNode dbDnNode = diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
        assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID());
        assertEquals(dnNode.getDatanodeId().getIpAddr(), dbDnNode.getDataNodeIP());
        assertEquals(dnNode.getDatanodeId().getHostName(), dbDnNode.getDataNodeName());
        try (FsDatasetSpi.FsVolumeReferences ref = dnNode.getFSDataset().getFsVolumeReferences()) {
            assertEquals(ref.size(), dbDnNode.getVolumeCount());
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 32 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancerRPC method testgetDiskBalancerBandwidth.

@Test
public void testgetDiskBalancerBandwidth() throws Exception {
    RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
    DataNode dataNode = rpcTestHelper.getDataNode();
    String planHash = rpcTestHelper.getPlanHash();
    int planVersion = rpcTestHelper.getPlanVersion();
    NodePlan plan = rpcTestHelper.getPlan();
    dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
    String bandwidthString = dataNode.getDiskBalancerSetting(DiskBalancerConstants.DISKBALANCER_BANDWIDTH);
    long value = Long.decode(bandwidthString);
    Assert.assertEquals(10L, value);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 33 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancerRPC method testGetDiskBalancerVolumeMapping.

@Test
public void testGetDiskBalancerVolumeMapping() throws Exception {
    final int dnIndex = 0;
    DataNode dataNode = cluster.getDataNodes().get(dnIndex);
    String volumeNameJson = dataNode.getDiskBalancerSetting(DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
    Assert.assertNotNull(volumeNameJson);
    ObjectMapper mapper = new ObjectMapper();
    @SuppressWarnings("unchecked") Map<String, String> volumemap = mapper.readValue(volumeNameJson, HashMap.class);
    Assert.assertEquals(2, volumemap.size());
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test)

Example 34 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancerRPC method testQueryPlanWithoutSubmit.

@Test
public void testQueryPlanWithoutSubmit() throws Exception {
    RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
    DataNode dataNode = rpcTestHelper.getDataNode();
    DiskBalancerWorkStatus status = dataNode.queryDiskBalancerPlan();
    Assert.assertTrue(status.getResult() == NO_PLAN);
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) DiskBalancerWorkStatus(org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus) Test(org.junit.Test)

Example 35 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDiskBalancerRPC method testQueryPlan.

@Test
public void testQueryPlan() throws Exception {
    RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
    DataNode dataNode = rpcTestHelper.getDataNode();
    String planHash = rpcTestHelper.getPlanHash();
    int planVersion = rpcTestHelper.getPlanVersion();
    NodePlan plan = rpcTestHelper.getPlan();
    dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
    DiskBalancerWorkStatus status = dataNode.queryDiskBalancerPlan();
    Assert.assertTrue(status.getResult() == PLAN_UNDER_PROGRESS || status.getResult() == PLAN_DONE);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) DiskBalancerWorkStatus(org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus) Test(org.junit.Test)

Aggregations

DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)165 Test (org.junit.Test)110 Path (org.apache.hadoop.fs.Path)78 Configuration (org.apache.hadoop.conf.Configuration)60 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)47 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)37 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)37 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)35 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)29 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)28 FileSystem (org.apache.hadoop.fs.FileSystem)27 IOException (java.io.IOException)24 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 ArrayList (java.util.ArrayList)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)17 File (java.io.File)15 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)14 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)13 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)12