Search in sources :

Example 16 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestDatasetVolumeChecker method testCheckOneVolume.

/**
   * Test {@link DatasetVolumeChecker#checkVolume} propagates the
   * check to the delegate checker.
   *
   * @throws Exception
   */
@Test(timeout = 10000)
public void testCheckOneVolume() throws Exception {
    LOG.info("Executing {}", testName.getMethodName());
    final FsVolumeSpi volume = makeVolumes(1, expectedVolumeHealth).get(0);
    final DatasetVolumeChecker checker = new DatasetVolumeChecker(new HdfsConfiguration(), new FakeTimer());
    checker.setDelegateChecker(new DummyChecker());
    final AtomicLong numCallbackInvocations = new AtomicLong(0);
    /**
     * Request a check and ensure it triggered {@link FsVolumeSpi#check}.
     */
    boolean result = checker.checkVolume(volume, new DatasetVolumeChecker.Callback() {

        @Override
        public void call(Set<FsVolumeSpi> healthyVolumes, Set<FsVolumeSpi> failedVolumes) {
            numCallbackInvocations.incrementAndGet();
            if (expectedVolumeHealth != null && expectedVolumeHealth != FAILED) {
                assertThat(healthyVolumes.size(), is(1));
                assertThat(failedVolumes.size(), is(0));
            } else {
                assertThat(healthyVolumes.size(), is(0));
                assertThat(failedVolumes.size(), is(1));
            }
        }
    });
    // Ensure that the check was invoked at least once.
    verify(volume, times(1)).check(anyObject());
    if (result) {
        assertThat(numCallbackInvocations.get(), is(1L));
    }
}
Also used : AtomicLong(java.util.concurrent.atomic.AtomicLong) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Example 17 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestFsDatasetImpl method testDeletingBlocks.

@Test
public void testDeletingBlocks() throws IOException {
    HdfsConfiguration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
        ds.addBlockPool(BLOCKPOOL, conf);
        FsVolumeImpl vol;
        try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
            vol = (FsVolumeImpl) volumes.get(0);
        }
        ExtendedBlock eb;
        ReplicaInfo info;
        List<Block> blockList = new ArrayList<>();
        for (int i = 1; i <= 63; i++) {
            eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
            cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
            blockList.add(eb.getLocalBlock());
        }
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
        blockList.clear();
        eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
        cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
        blockList.add(eb.getLocalBlock());
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 18 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestFsDatasetImpl method testReportBadBlocks.

@Test(timeout = 30000)
public void testReportBadBlocks() throws Exception {
    boolean threwException = false;
    MiniDFSCluster cluster = null;
    try {
        Configuration config = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
        DataNode dataNode = cluster.getDataNodes().get(0);
        ExtendedBlock block = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), 0);
        try {
            // Test the reportBadBlocks when the volume is null
            dataNode.reportBadBlocks(block);
        } catch (NullPointerException npe) {
            threwException = true;
        }
        Thread.sleep(3000);
        Assert.assertFalse(threwException);
        Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
        FileSystem fs = cluster.getFileSystem();
        Path filePath = new Path("testData");
        DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
        block = DFSTestUtil.getFirstBlock(fs, filePath);
        // Test for the overloaded method reportBadBlocks
        dataNode.reportBadBlocks(block, dataNode.getFSDataset().getFsVolumeReferences().get(0));
        Thread.sleep(3000);
        BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager());
        // Verify the bad block has been reported to namenode
        Assert.assertEquals(1, cluster.getNamesystem().getCorruptReplicaBlocks());
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 19 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestStorageLocationChecker method testFailedLocationsBelowThreshold.

/**
   * Test handling when the number of failed locations is below the
   * max volume failure threshold.
   *
   * @throws Exception
   */
@Test(timeout = 30000)
public void testFailedLocationsBelowThreshold() throws Exception {
    final List<StorageLocation> locations = // 2 healthy, 1 failed.
    makeMockLocations(HEALTHY, HEALTHY, FAILED);
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
    StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
    List<StorageLocation> filteredLocations = checker.check(conf, locations);
    assertThat(filteredLocations.size(), is(2));
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Example 20 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestStorageLocationChecker method testFailedLocationsAboveThreshold.

/**
   * Test handling when the number of volume failures tolerated is the
   * same as the number of volumes.
   *
   * @throws Exception
   */
@Test(timeout = 30000)
public void testFailedLocationsAboveThreshold() throws Exception {
    final List<StorageLocation> locations = // 1 healthy, 2 failed.
    makeMockLocations(HEALTHY, FAILED, FAILED);
    final Configuration conf = new HdfsConfiguration();
    conf.setInt(DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
    thrown.expect(IOException.class);
    thrown.expectMessage("Too many failed volumes - current valid volumes: 1," + " volumes configured: 3, volumes failed: 2, volume failures" + " tolerated: 1");
    StorageLocationChecker checker = new StorageLocationChecker(conf, new FakeTimer());
    checker.check(conf, locations);
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) StorageLocation(org.apache.hadoop.hdfs.server.datanode.StorageLocation) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FakeTimer(org.apache.hadoop.util.FakeTimer) Test(org.junit.Test)

Aggregations

HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)454 Configuration (org.apache.hadoop.conf.Configuration)311 Test (org.junit.Test)311 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)267 Path (org.apache.hadoop.fs.Path)152 FileSystem (org.apache.hadoop.fs.FileSystem)94 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)92 File (java.io.File)72 IOException (java.io.IOException)69 Before (org.junit.Before)56 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)40 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)35 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)30 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)27 RandomAccessFile (java.io.RandomAccessFile)22 ArrayList (java.util.ArrayList)20 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)20 URI (java.net.URI)19 FsPermission (org.apache.hadoop.fs.permission.FsPermission)19