Search in sources :

Example 16 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestFsDatasetImpl method testDeletingBlocks.

@Test
public void testDeletingBlocks() throws IOException {
    HdfsConfiguration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
        ds.addBlockPool(BLOCKPOOL, conf);
        FsVolumeImpl vol;
        try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
            vol = (FsVolumeImpl) volumes.get(0);
        }
        ExtendedBlock eb;
        ReplicaInfo info;
        List<Block> blockList = new ArrayList<>();
        for (int i = 1; i <= 63; i++) {
            eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
            cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
            blockList.add(eb.getLocalBlock());
        }
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
        blockList.clear();
        eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
        cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
        blockList.add(eb.getLocalBlock());
        ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        // Nothing to do
        }
        assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) FsDatasetSpi(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsVolumeReferences(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 17 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestFsDatasetImpl method testReportBadBlocks.

@Test(timeout = 30000)
public void testReportBadBlocks() throws Exception {
    boolean threwException = false;
    MiniDFSCluster cluster = null;
    try {
        Configuration config = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
        DataNode dataNode = cluster.getDataNodes().get(0);
        ExtendedBlock block = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), 0);
        try {
            // Test the reportBadBlocks when the volume is null
            dataNode.reportBadBlocks(block);
        } catch (NullPointerException npe) {
            threwException = true;
        }
        Thread.sleep(3000);
        Assert.assertFalse(threwException);
        Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
        FileSystem fs = cluster.getFileSystem();
        Path filePath = new Path("testData");
        DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
        block = DFSTestUtil.getFirstBlock(fs, filePath);
        // Test for the overloaded method reportBadBlocks
        dataNode.reportBadBlocks(block, dataNode.getFSDataset().getFsVolumeReferences().get(0));
        Thread.sleep(3000);
        BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager());
        // Verify the bad block has been reported to namenode
        Assert.assertEquals(1, cluster.getNamesystem().getCorruptReplicaBlocks());
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 18 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestPipelinesFailover method doTestWriteOverFailoverWithDnFail.

private void doTestWriteOverFailoverWithDnFail(TestScenario scenario) throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    FSDataOutputStream stm = null;
    MiniDFSCluster cluster = newMiniCluster(conf, 5);
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        Thread.sleep(500);
        LOG.info("Starting with NN 0 active");
        FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        stm = fs.create(TEST_PATH);
        // write a block and a half
        AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
        // Make sure all the blocks are written before failover
        stm.hflush();
        int nextActive = failover(cluster, scenario);
        assertTrue(fs.exists(TEST_PATH));
        cluster.stopDataNode(0);
        // write another block and a half
        AppendTestUtil.write(stm, BLOCK_AND_A_HALF, BLOCK_AND_A_HALF);
        stm.hflush();
        LOG.info("Failing back from NN " + nextActive + " to NN 0");
        cluster.transitionToStandby(nextActive);
        cluster.transitionToActive(0);
        cluster.stopDataNode(1);
        AppendTestUtil.write(stm, BLOCK_AND_A_HALF * 2, BLOCK_AND_A_HALF);
        stm.hflush();
        stm.close();
        stm = null;
        AppendTestUtil.check(fs, TEST_PATH, BLOCK_AND_A_HALF * 3);
    } finally {
        IOUtils.closeStream(stm);
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 19 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestPipelinesFailover method doWriteOverFailoverTest.

private void doWriteOverFailoverTest(TestScenario scenario, MethodToTestIdempotence methodToTest) throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    // Don't check low redundancy periodically.
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1000);
    FSDataOutputStream stm = null;
    MiniDFSCluster cluster = newMiniCluster(conf, 3);
    try {
        int sizeWritten = 0;
        cluster.waitActive();
        cluster.transitionToActive(0);
        Thread.sleep(500);
        LOG.info("Starting with NN 0 active");
        FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        stm = fs.create(TEST_PATH);
        // write a block and a half
        AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
        sizeWritten += BLOCK_AND_A_HALF;
        // Make sure all of the blocks are written out before failover.
        stm.hflush();
        LOG.info("Failing over to another NN");
        int activeIndex = failover(cluster, scenario);
        // NOTE: explicitly do *not* make any further metadata calls
        // to the NN here. The next IPC call should be to allocate the next
        // block. Any other call would notice the failover and not test
        // idempotence of the operation (HDFS-3031)
        FSNamesystem ns1 = cluster.getNameNode(activeIndex).getNamesystem();
        BlockManagerTestUtil.updateState(ns1.getBlockManager());
        assertEquals(0, ns1.getPendingReplicationBlocks());
        assertEquals(0, ns1.getCorruptReplicaBlocks());
        assertEquals(0, ns1.getMissingBlocksCount());
        // completeFile() if we're testing idempotence of that operation.
        if (methodToTest == MethodToTestIdempotence.ALLOCATE_BLOCK) {
            // write another block and a half
            AppendTestUtil.write(stm, sizeWritten, BLOCK_AND_A_HALF);
            sizeWritten += BLOCK_AND_A_HALF;
        }
        stm.close();
        stm = null;
        AppendTestUtil.check(fs, TEST_PATH, sizeWritten);
    } finally {
        IOUtils.closeStream(stm);
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem)

Example 20 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestPipelinesFailover method testFailoverRightBeforeCommitSynchronization.

/**
   * Test the scenario where the NN fails over after issuing a block
   * synchronization request, but before it is committed. The
   * DN running the recovery should then fail to commit the synchronization
   * and a later retry will succeed.
   */
@Test(timeout = 30000)
public void testFailoverRightBeforeCommitSynchronization() throws Exception {
    final Configuration conf = new Configuration();
    // Disable permissions so that another user can recover the lease.
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    FSDataOutputStream stm = null;
    final MiniDFSCluster cluster = newMiniCluster(conf, 3);
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        Thread.sleep(500);
        LOG.info("Starting with NN 0 active");
        FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        stm = fs.create(TEST_PATH);
        // write a half block
        AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
        stm.hflush();
        // Look into the block manager on the active node for the block
        // under construction.
        NameNode nn0 = cluster.getNameNode(0);
        ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
        DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn0, blk);
        LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
        // Find the corresponding DN daemon, and spy on its connection to the
        // active.
        DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
        DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
        // Delay the commitBlockSynchronization call
        DelayAnswer delayer = new DelayAnswer(LOG);
        Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk), // new genstamp
        Mockito.anyInt(), // new length
        Mockito.anyLong(), // close file
        Mockito.eq(true), // delete block
        Mockito.eq(false), // new targets
        (DatanodeID[]) Mockito.anyObject(), // new target storages
        (String[]) Mockito.anyObject());
        DistributedFileSystem fsOtherUser = createFsAsOtherUser(cluster, conf);
        assertFalse(fsOtherUser.recoverLease(TEST_PATH));
        LOG.info("Waiting for commitBlockSynchronization call from primary");
        delayer.waitForCall();
        LOG.info("Failing over to NN 1");
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        // Let the commitBlockSynchronization call go through, and check that
        // it failed with the correct exception.
        delayer.proceed();
        delayer.waitForResult();
        Throwable t = delayer.getThrown();
        if (t == null) {
            fail("commitBlockSynchronization call did not fail on standby");
        }
        GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported", t);
        // Now, if we try again to recover the block, it should succeed on the new
        // active.
        loopRecoverLease(fsOtherUser, TEST_PATH);
        AppendTestUtil.check(fs, TEST_PATH, BLOCK_SIZE / 2);
    } finally {
        IOUtils.closeStream(stm);
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24