Search in sources :

Example 46 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDiskBalancer method testBalanceDataBetweenMultiplePairsOfVolumes.

@Test
public void testBalanceDataBetweenMultiplePairsOfVolumes() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int blockCount = 1000;
    final int blockSize = 1024;
    // create 3 disks, that means we will have 2 plans
    // Move Data from disk0->disk1 and disk0->disk2.
    final int diskCount = 3;
    final int dataNodeCount = 1;
    final int dataNodeIndex = 0;
    final int sourceDiskIndex = 0;
    final long cap = blockSize * 2L * blockCount;
    MiniDFSCluster cluster = new ClusterBuilder().setBlockCount(blockCount).setBlockSize(blockSize).setDiskCount(diskCount).setNumDatanodes(dataNodeCount).setConf(conf).setCapacities(new long[] { cap, cap, cap }).build();
    try {
        DataMover dataMover = new DataMover(cluster, dataNodeIndex, sourceDiskIndex, conf, blockSize, blockCount);
        dataMover.moveDataToSourceDisk();
        NodePlan plan = dataMover.generatePlan();
        // 3 disks , The plan should move data both disks,
        // so we must have 2 plan steps.
        assertEquals(plan.getVolumeSetPlans().size(), 2);
        dataMover.executePlan(plan);
        dataMover.verifyPlanExectionDone();
        dataMover.verifyAllVolumesHaveData();
        dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
    } finally {
        cluster.shutdown();
    }
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 47 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDiskBalancerRPC method setUp.

@Before
public void setUp() throws Exception {
    conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    cluster.waitActive();
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Before(org.junit.Before)

Example 48 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDiskBalancerRPC method testMoveBlockAcrossVolume.

@Test
public void testMoveBlockAcrossVolume() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final int defaultBlockSize = 100;
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultBlockSize);
    conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, defaultBlockSize);
    String fileName = "/tmp.txt";
    Path filePath = new Path(fileName);
    final int numDatanodes = 1;
    final int dnIndex = 0;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    FsVolumeImpl source = null;
    FsVolumeImpl dest = null;
    try {
        cluster.waitActive();
        Random r = new Random();
        FileSystem fs = cluster.getFileSystem(dnIndex);
        DFSTestUtil.createFile(fs, filePath, 10 * 1024, (short) 1, r.nextLong());
        DataNode dnNode = cluster.getDataNodes().get(dnIndex);
        FsDatasetSpi.FsVolumeReferences refs = dnNode.getFSDataset().getFsVolumeReferences();
        try {
            source = (FsVolumeImpl) refs.get(0);
            dest = (FsVolumeImpl) refs.get(1);
            DiskBalancerTestUtil.moveAllDataToDestVolume(dnNode.getFSDataset(), source, dest);
            assertTrue(DiskBalancerTestUtil.getBlockCount(source) == 0);
        } finally {
            refs.close();
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Random(java.util.Random) FsVolumeImpl(org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 49 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDiskBalancerWithMockMover method testDiskBalancerEnabled.

/**
   * Checks that Enable flag works correctly.
   *
   * @throws DiskBalancerException
   */
@Test
public void testDiskBalancerEnabled() throws DiskBalancerException {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    TestMover blockMover = new TestMover(cluster.getDataNodes().get(0).getFSDataset());
    DiskBalancer balancer = new DiskBalancerBuilder(conf).setMover(blockMover).build();
    DiskBalancerWorkStatus status = balancer.queryWorkStatus();
    assertEquals(NO_PLAN, status.getResult());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DiskBalancer(org.apache.hadoop.hdfs.server.datanode.DiskBalancer) DiskBalancerWorkStatus(org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 50 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDiskBalancerCommand method testDiskBalancerQueryWithoutSubmit.

/**
   * Making sure that we can query the node without having done a submit.
   * @throws Exception
   */
@Test
public void testDiskBalancerQueryWithoutSubmit() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int numDatanodes = 2;
    MiniDFSCluster miniDFSCluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    try {
        miniDFSCluster.waitActive();
        DataNode dataNode = miniDFSCluster.getDataNodes().get(0);
        final String queryArg = String.format("-query localhost:%d", dataNode.getIpcPort());
        final String cmdLine = String.format("hdfs diskbalancer %s", queryArg);
        runCommand(cmdLine);
    } finally {
        miniDFSCluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Aggregations

HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)454 Configuration (org.apache.hadoop.conf.Configuration)311 Test (org.junit.Test)311 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)267 Path (org.apache.hadoop.fs.Path)152 FileSystem (org.apache.hadoop.fs.FileSystem)94 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)92 File (java.io.File)72 IOException (java.io.IOException)69 Before (org.junit.Before)56 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)40 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)35 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)30 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)27 RandomAccessFile (java.io.RandomAccessFile)22 ArrayList (java.util.ArrayList)20 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)20 URI (java.net.URI)19 FsPermission (org.apache.hadoop.fs.permission.FsPermission)19