Search in sources :

Example 36 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestWriteToReplica method testClose.

// test close
@Test
public void testClose() throws Exception {
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
    try {
        cluster.waitActive();
        DataNode dn = cluster.getDataNodes().get(0);
        FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
        // set up replicasMap
        String bpid = cluster.getNamesystem().getBlockPoolId();
        ExtendedBlock[] blocks = setup(bpid, cluster.getFsDatasetTestUtils(dn));
        // test close
        testClose(dataSet, blocks);
    } finally {
        cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 37 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestInterDatanodeProtocol method checkMetaInfo.

public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException {
    Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock(b.getBlockPoolId(), b.getBlockId());
    Assert.assertEquals(b.getBlockId(), metainfo.getBlockId());
    Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 38 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestInterDatanodeProtocol method testUpdateReplicaUnderRecovery.

/** 
   * Test  for
   * {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)} 
   * */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
        cluster.waitActive();
        //create a file
        DistributedFileSystem dfs = cluster.getFileSystem();
        String filestr = "/foo";
        Path filepath = new Path(filestr);
        DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
        //get block info
        final LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
        final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
        Assert.assertTrue(datanodeinfo.length > 0);
        //get DataNode and FSDataset objects
        final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
        Assert.assertTrue(datanode != null);
        //initReplicaRecovery
        final ExtendedBlock b = locatedblock.getBlock();
        final long recoveryid = b.getGenerationStamp() + 1;
        final long newlength = b.getNumBytes() - 1;
        final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
        final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));
        //check replica
        final Replica replica = cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
        Assert.assertEquals(ReplicaState.RUR, replica.getState());
        //check meta data before update
        cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);
        //case "THIS IS NOT SUPPOSED TO HAPPEN"
        //with (block length) != (stored replica's on disk length). 
        {
            //create a block with same id and gs but different length.
            final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
            try {
                //update should fail
                fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength);
                Assert.fail();
            } catch (IOException ioe) {
                System.out.println("GOOD: getting " + ioe);
            }
        }
        //update
        final Replica r = fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength);
        assertTrue(r != null);
        assertTrue(r.getStorageUuid() != null);
    } finally {
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FinalizedReplica(org.apache.hadoop.hdfs.server.datanode.FinalizedReplica) Replica(org.apache.hadoop.hdfs.server.datanode.Replica) ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) Test(org.junit.Test)

Example 39 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestNameNodeMetadataConsistency method testGenerationStampInFuture.

/**
   * This test creates a file and modifies the block generation stamp to number
   * that name node has not seen yet. It then asserts that name node moves into
   * safe mode while it is in startup mode.
   */
@Test
public void testGenerationStampInFuture() throws Exception {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    OutputStream ostream = fs.create(filePath1);
    ostream.write(TEST_DATA_IN_FUTURE.getBytes());
    ostream.close();
    // Re-write the Generation Stamp to a Generation Stamp in future.
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath1);
    final long genStamp = block.getGenerationStamp();
    final int datanodeIndex = 0;
    cluster.changeGenStampOfBlock(datanodeIndex, block, genStamp + 1);
    // stop the data node so that it won't remove block
    final DataNodeProperties dnProps = cluster.stopDataNode(datanodeIndex);
    // Simulate Namenode forgetting a Block
    cluster.restartNameNode(true);
    cluster.getNameNode().getNamesystem().writeLock();
    BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
    bInfo.delete();
    cluster.getNameNode().getNamesystem().getBlockManager().removeBlock(bInfo);
    cluster.getNameNode().getNamesystem().writeUnlock();
    // we also need to tell block manager that we are in the startup path
    BlockManagerTestUtil.setStartupSafeModeForTest(cluster.getNameNode().getNamesystem().getBlockManager());
    cluster.restartDataNode(dnProps);
    waitForNumBytes(TEST_DATA_IN_FUTURE.length());
    // Make sure that we find all written bytes in future block
    assertEquals(TEST_DATA_IN_FUTURE.length(), cluster.getNameNode().getBytesWithFutureGenerationStamps());
    // Assert safemode reason
    assertTrue(cluster.getNameNode().getNamesystem().getSafeModeTip().contains("Name node detected blocks with generation stamps in future"));
}
Also used : DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Test(org.junit.Test)

Example 40 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestNameNodeMetadataConsistency method testEnsureGenStampsIsStartupOnly.

/**
   * Pretty much the same tests as above but does not setup safeMode == true,
   * hence we should not have positive count of Blocks in future.
   */
@Test
public void testEnsureGenStampsIsStartupOnly() throws Exception {
    String testData = " This is test data";
    cluster.restartDataNodes();
    cluster.restartNameNodes();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    OutputStream ostream = fs.create(filePath2);
    ostream.write(testData.getBytes());
    ostream.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath2);
    long genStamp = block.getGenerationStamp();
    // Re-write the Generation Stamp to a Generation Stamp in future.
    cluster.changeGenStampOfBlock(0, block, genStamp + 1);
    MiniDFSCluster.DataNodeProperties dnProps = cluster.stopDataNode(0);
    // Simulate  Namenode forgetting a Block
    cluster.restartNameNode(true);
    BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
    cluster.getNameNode().getNamesystem().writeLock();
    bInfo.delete();
    cluster.getNameNode().getNamesystem().getBlockManager().removeBlock(bInfo);
    cluster.getNameNode().getNamesystem().writeUnlock();
    cluster.restartDataNode(dnProps);
    waitForNumBytes(0);
    // Make sure that there are no bytes in future since isInStartupSafe
    // mode is not true.
    assertEquals(0, cluster.getNameNode().getBytesWithFutureGenerationStamps());
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Test(org.junit.Test)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17