Search in sources :

Example 41 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDiskBalancerCommand method testRunMultipleCommandsUnderOneSetup.

/**
   * Tests running multiple commands under on setup. This mainly covers
   * {@link org.apache.hadoop.hdfs.server.diskbalancer.command.Command#close}
   */
@Test(timeout = 60000)
public void testRunMultipleCommandsUnderOneSetup() throws Exception {
    final int numDatanodes = 1;
    MiniDFSCluster miniCluster = null;
    final Configuration hdfsConf = new HdfsConfiguration();
    try {
        /* new cluster with imbalanced capacity */
        miniCluster = DiskBalancerTestUtil.newImbalancedCluster(hdfsConf, numDatanodes, CAPACITIES, DEFAULT_BLOCK_SIZE, FILE_LEN);
        /* get full path of plan */
        final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf);
        /* run execute command */
        final String cmdLine = String.format("hdfs diskbalancer -%s %s", EXECUTE, planFileFullName);
        runCommand(cmdLine, hdfsConf, miniCluster);
    } finally {
        if (miniCluster != null) {
            miniCluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 42 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDiskBalancerCommand method testSubmitPlanInNonRegularStatus.

/**
   * Tests if it's allowed to submit and execute plan when Datanode is in status
   * other than REGULAR.
   */
@Test(timeout = 60000)
public void testSubmitPlanInNonRegularStatus() throws Exception {
    final int numDatanodes = 1;
    MiniDFSCluster miniCluster = null;
    final Configuration hdfsConf = new HdfsConfiguration();
    try {
        /* new cluster with imbalanced capacity */
        miniCluster = DiskBalancerTestUtil.newImbalancedCluster(hdfsConf, numDatanodes, CAPACITIES, DEFAULT_BLOCK_SIZE, FILE_LEN, StartupOption.ROLLBACK);
        /* get full path of plan */
        final String planFileFullName = runAndVerifyPlan(miniCluster, hdfsConf);
        try {
            /* run execute command */
            final String cmdLine = String.format("hdfs diskbalancer -%s %s", EXECUTE, planFileFullName);
            runCommand(cmdLine, hdfsConf, miniCluster);
        } catch (RemoteException e) {
            assertThat(e.getClassName(), containsString("DiskBalancerException"));
            assertThat(e.toString(), is(allOf(containsString("Datanode is in special state"), containsString("Disk balancing not permitted."))));
        }
    } finally {
        if (miniCluster != null) {
            miniCluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) CoreMatchers.containsString(org.hamcrest.CoreMatchers.containsString) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) RemoteException(org.apache.hadoop.ipc.RemoteException) Test(org.junit.Test)

Example 43 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestMover method testMoverFailedRetryWithPinnedBlocks.

/**
   * Test to verify that mover should work well with pinned blocks as well as
   * failed blocks. Mover should continue retrying the failed blocks only.
   */
@Test(timeout = 90000)
public void testMoverFailedRetryWithPinnedBlocks() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    initConf(conf);
    conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).storageTypes(new StorageType[][] { { StorageType.DISK, StorageType.ARCHIVE }, { StorageType.DISK, StorageType.ARCHIVE } }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final String parenDir = "/parent";
        dfs.mkdirs(new Path(parenDir));
        final String file1 = "/parent/testMoverFailedRetryWithPinnedBlocks1";
        // write to DISK
        final FSDataOutputStream out = dfs.create(new Path(file1), (short) 2);
        byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
        out.write(fileData);
        out.close();
        // Adding pinned blocks.
        createFileWithFavoredDatanodes(conf, cluster, dfs);
        // Delete block file so, block move will fail with FileNotFoundException
        LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file1, 0);
        Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
        LocatedBlock lb = locatedBlocks.get(0);
        cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
        // move to ARCHIVE
        dfs.setStoragePolicy(new Path(parenDir), "COLD");
        int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] { "-p", parenDir.toString() });
        Assert.assertEquals("Movement should fail after some retry", ExitStatus.NO_MOVE_PROGRESS.getExitCode(), rc);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 44 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestConnectors method setup.

@Before
public void setup() throws IOException {
    conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Before(org.junit.Before)

Example 45 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDiskBalancer method testDiskBalancerNameNodeConnectivity.

@Test
public void testDiskBalancerNameNodeConnectivity() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int numDatanodes = 2;
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
    try {
        cluster.waitActive();
        ClusterConnector nameNodeConnector = ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf);
        DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(nameNodeConnector);
        diskBalancerCluster.readClusterInfo();
        assertEquals(diskBalancerCluster.getNodes().size(), numDatanodes);
        DataNode dnNode = cluster.getDataNodes().get(0);
        DiskBalancerDataNode dbDnNode = diskBalancerCluster.getNodeByUUID(dnNode.getDatanodeUuid());
        assertEquals(dnNode.getDatanodeUuid(), dbDnNode.getDataNodeUUID());
        assertEquals(dnNode.getDatanodeId().getIpAddr(), dbDnNode.getDataNodeIP());
        assertEquals(dnNode.getDatanodeId().getHostName(), dbDnNode.getDataNodeName());
        try (FsDatasetSpi.FsVolumeReferences ref = dnNode.getFSDataset().getFsVolumeReferences()) {
            assertEquals(ref.size(), dbDnNode.getVolumeCount());
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)454 Configuration (org.apache.hadoop.conf.Configuration)311 Test (org.junit.Test)311 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)267 Path (org.apache.hadoop.fs.Path)152 FileSystem (org.apache.hadoop.fs.FileSystem)94 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)92 File (java.io.File)72 IOException (java.io.IOException)69 Before (org.junit.Before)56 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)40 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)35 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)30 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)27 RandomAccessFile (java.io.RandomAccessFile)22 ArrayList (java.util.ArrayList)20 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)20 URI (java.net.URI)19 FsPermission (org.apache.hadoop.fs.permission.FsPermission)19