Search in sources :

Example 16 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestFsDatasetImpl method testReportBadBlocks.

@Test(timeout = 30000)
public void testReportBadBlocks() throws Exception {
    boolean threwException = false;
    MiniDFSCluster cluster = null;
    try {
        Configuration config = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
        cluster.waitActive();
        Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
        DataNode dataNode = cluster.getDataNodes().get(0);
        ExtendedBlock block = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), 0);
        try {
            // Test the reportBadBlocks when the volume is null
            dataNode.reportBadBlocks(block);
        } catch (NullPointerException npe) {
            threwException = true;
        }
        Thread.sleep(3000);
        Assert.assertFalse(threwException);
        Assert.assertEquals(0, cluster.getNamesystem().getCorruptReplicaBlocks());
        FileSystem fs = cluster.getFileSystem();
        Path filePath = new Path("testData");
        DFSTestUtil.createFile(fs, filePath, 1, (short) 1, 0);
        block = DFSTestUtil.getFirstBlock(fs, filePath);
        // Test for the overloaded method reportBadBlocks
        dataNode.reportBadBlocks(block, dataNode.getFSDataset().getFsVolumeReferences().get(0));
        Thread.sleep(3000);
        BlockManagerTestUtil.updateState(cluster.getNamesystem().getBlockManager());
        // Verify the bad block has been reported to namenode
        Assert.assertEquals(1, cluster.getNamesystem().getCorruptReplicaBlocks());
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 17 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestPipelinesFailover method testFailoverRightBeforeCommitSynchronization.

/**
   * Test the scenario where the NN fails over after issuing a block
   * synchronization request, but before it is committed. The
   * DN running the recovery should then fail to commit the synchronization
   * and a later retry will succeed.
   */
@Test(timeout = 30000)
public void testFailoverRightBeforeCommitSynchronization() throws Exception {
    final Configuration conf = new Configuration();
    // Disable permissions so that another user can recover the lease.
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    FSDataOutputStream stm = null;
    final MiniDFSCluster cluster = newMiniCluster(conf, 3);
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        Thread.sleep(500);
        LOG.info("Starting with NN 0 active");
        FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        stm = fs.create(TEST_PATH);
        // write a half block
        AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
        stm.hflush();
        // Look into the block manager on the active node for the block
        // under construction.
        NameNode nn0 = cluster.getNameNode(0);
        ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
        DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn0, blk);
        LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
        // Find the corresponding DN daemon, and spy on its connection to the
        // active.
        DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
        DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
        // Delay the commitBlockSynchronization call
        DelayAnswer delayer = new DelayAnswer(LOG);
        Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk), // new genstamp
        Mockito.anyInt(), // new length
        Mockito.anyLong(), // close file
        Mockito.eq(true), // delete block
        Mockito.eq(false), // new targets
        (DatanodeID[]) Mockito.anyObject(), // new target storages
        (String[]) Mockito.anyObject());
        DistributedFileSystem fsOtherUser = createFsAsOtherUser(cluster, conf);
        assertFalse(fsOtherUser.recoverLease(TEST_PATH));
        LOG.info("Waiting for commitBlockSynchronization call from primary");
        delayer.waitForCall();
        LOG.info("Failing over to NN 1");
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        // Let the commitBlockSynchronization call go through, and check that
        // it failed with the correct exception.
        delayer.proceed();
        delayer.waitForResult();
        Throwable t = delayer.getThrown();
        if (t == null) {
            fail("commitBlockSynchronization call did not fail on standby");
        }
        GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported", t);
        // Now, if we try again to recover the block, it should succeed on the new
        // active.
        loopRecoverLease(fsOtherUser, TEST_PATH);
        AppendTestUtil.check(fs, TEST_PATH, BLOCK_SIZE / 2);
    } finally {
        IOUtils.closeStream(stm);
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 18 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestDNFencing method testRBWReportArrivesAfterEdits.

/**
   * Another regression test for HDFS-2742. This tests the following sequence:
   * - DN does a block report while file is open. This BR contains
   *   the block in RBW state.
   * - The block report is delayed in reaching the standby.
   * - The file is closed.
   * - The standby processes the OP_ADD and OP_CLOSE operations before
   *   the RBW block report arrives.
   * - The standby should not mark the block as corrupt.
   */
@Test
public void testRBWReportArrivesAfterEdits() throws Exception {
    final CountDownLatch brFinished = new CountDownLatch(1);
    DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG) {

        @Override
        protected Object passThrough(InvocationOnMock invocation) throws Throwable {
            try {
                return super.passThrough(invocation);
            } finally {
                // inform the test that our block report went through.
                brFinished.countDown();
            }
        }
    };
    FSDataOutputStream out = fs.create(TEST_FILE_PATH);
    try {
        AppendTestUtil.write(out, 0, 10);
        out.hflush();
        DataNode dn = cluster.getDataNodes().get(0);
        DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn2);
        Mockito.doAnswer(delayer).when(spy).blockReport(Mockito.<DatanodeRegistration>anyObject(), Mockito.anyString(), Mockito.<StorageBlockReport[]>anyObject(), Mockito.<BlockReportContext>anyObject());
        dn.scheduleAllBlockReport(0);
        delayer.waitForCall();
    } finally {
        IOUtils.closeStream(out);
    }
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    delayer.proceed();
    brFinished.await();
    // Verify that no replicas are marked corrupt, and that the
    // file is readable from the failed-over standby.
    BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
    BlockManagerTestUtil.updateState(nn2.getNamesystem().getBlockManager());
    assertEquals(0, nn1.getNamesystem().getCorruptReplicaBlocks());
    assertEquals(0, nn2.getNamesystem().getCorruptReplicaBlocks());
    DFSTestUtil.readFile(fs, TEST_FILE_PATH);
}
Also used : DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) InvocationOnMock(org.mockito.invocation.InvocationOnMock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 19 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestFileAppend4 method testAppendInsufficientLocations.

/**
   * Test that an append with no locations fails with an exception
   * showing insufficient locations.
   */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
    Configuration conf = new Configuration();
    // lower heartbeat interval for fast recognition of DN
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    DistributedFileSystem fileSystem = null;
    try {
        // create a file with replication 3
        fileSystem = cluster.getFileSystem();
        Path f = new Path("/testAppend");
        FSDataOutputStream create = fileSystem.create(f, (short) 2);
        create.write("/testAppend".getBytes());
        create.close();
        // Check for replications
        DFSTestUtil.waitReplication(fileSystem, f, (short) 2);
        // Shut down all DNs that have the last block location for the file
        LocatedBlocks lbs = fileSystem.dfs.getNamenode().getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
        List<DataNode> dnsOfCluster = cluster.getDataNodes();
        DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().getLocations();
        for (DataNode dn : dnsOfCluster) {
            for (DatanodeInfo loc : dnsWithLocations) {
                if (dn.getDatanodeId().equals(loc)) {
                    dn.shutdown();
                    DFSTestUtil.waitForDatanodeDeath(dn);
                }
            }
        }
        // Wait till 0 replication is recognized
        DFSTestUtil.waitReplication(fileSystem, f, (short) 0);
        // have the block.
        try {
            fileSystem.append(f);
            fail("Append should fail because insufficient locations");
        } catch (IOException e) {
            LOG.info("Expected exception: ", e);
        }
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        final INodeFile inode = INodeFile.valueOf(dir.getINode("/testAppend"), "/testAppend");
        assertTrue("File should remain closed", !inode.isUnderConstruction());
    } finally {
        if (null != fileSystem) {
            fileSystem.close();
        }
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDirectory(org.apache.hadoop.hdfs.server.namenode.FSDirectory) IOException(java.io.IOException) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 20 with DataNode

use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.

the class TestFileChecksum method getFileChecksum.

private FileChecksum getFileChecksum(String filePath, int range, boolean killDn) throws Exception {
    int dnIdxToDie = -1;
    if (killDn) {
        dnIdxToDie = getDataNodeToKill(filePath);
        DataNode dnToDie = cluster.getDataNodes().get(dnIdxToDie);
        shutdownDataNode(dnToDie);
    }
    Path testPath = new Path(filePath);
    FileChecksum fc;
    if (range >= 0) {
        fc = fs.getFileChecksum(testPath, range);
    } else {
        fc = fs.getFileChecksum(testPath);
    }
    if (dnIdxToDie != -1) {
        cluster.restartDataNode(dnIdxToDie);
    }
    return fc;
}
Also used : Path(org.apache.hadoop.fs.Path) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileChecksum(org.apache.hadoop.fs.FileChecksum)

Aggregations

DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)165 Test (org.junit.Test)110 Path (org.apache.hadoop.fs.Path)78 Configuration (org.apache.hadoop.conf.Configuration)60 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)47 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)37 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)37 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)35 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)29 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)28 FileSystem (org.apache.hadoop.fs.FileSystem)27 IOException (java.io.IOException)24 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 ArrayList (java.util.ArrayList)17 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)17 File (java.io.File)15 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)14 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)13 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)12