Search in sources :

Example 36 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSNamesystem method listCorruptFileBlocks.

/**
   * @param path Restrict corrupt files to this portion of namespace.
   * @param cookieTab Support for continuation; cookieTab  tells where
   *                  to start from
   * @return a list in which each entry describes a corrupt file/block
   * @throws IOException
   */
Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path, String[] cookieTab) throws IOException {
    checkSuperuserPrivilege();
    checkOperation(OperationCategory.READ);
    int count = 0;
    ArrayList<CorruptFileBlockInfo> corruptFiles = new ArrayList<CorruptFileBlockInfo>();
    if (cookieTab == null) {
        cookieTab = new String[] { null };
    }
    // Do a quick check if there are any corrupt files without taking the lock
    if (blockManager.getMissingBlocksCount() == 0) {
        if (cookieTab[0] == null) {
            cookieTab[0] = String.valueOf(getIntCookie(cookieTab[0]));
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("there are no corrupt file blocks.");
        }
        return corruptFiles;
    }
    readLock();
    try {
        checkOperation(OperationCategory.READ);
        if (!blockManager.isPopulatingReplQueues()) {
            throw new IOException("Cannot run listCorruptFileBlocks because " + "replication queues have not been initialized.");
        }
        // print a limited # of corrupt files per call
        final Iterator<BlockInfo> blkIterator = blockManager.getCorruptReplicaBlockIterator();
        int skip = getIntCookie(cookieTab[0]);
        for (int i = 0; i < skip && blkIterator.hasNext(); i++) {
            blkIterator.next();
        }
        while (blkIterator.hasNext()) {
            BlockInfo blk = blkIterator.next();
            final INodeFile inode = getBlockCollection(blk);
            skip++;
            if (inode != null) {
                String src = inode.getFullPathName();
                if (src.startsWith(path)) {
                    corruptFiles.add(new CorruptFileBlockInfo(src, blk));
                    count++;
                    if (count >= DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED)
                        break;
                }
            }
        }
        cookieTab[0] = String.valueOf(skip);
        if (LOG.isDebugEnabled()) {
            LOG.debug("list corrupt file blocks returned: " + count);
        }
        return corruptFiles;
    } finally {
        readUnlock("listCorruptFileBlocks");
    }
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ArrayList(java.util.ArrayList) IOException(java.io.IOException)

Example 37 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestNameNodeMetadataConsistency method testGenerationStampInFuture.

/**
   * This test creates a file and modifies the block generation stamp to number
   * that name node has not seen yet. It then asserts that name node moves into
   * safe mode while it is in startup mode.
   */
@Test
public void testGenerationStampInFuture() throws Exception {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    OutputStream ostream = fs.create(filePath1);
    ostream.write(TEST_DATA_IN_FUTURE.getBytes());
    ostream.close();
    // Re-write the Generation Stamp to a Generation Stamp in future.
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath1);
    final long genStamp = block.getGenerationStamp();
    final int datanodeIndex = 0;
    cluster.changeGenStampOfBlock(datanodeIndex, block, genStamp + 1);
    // stop the data node so that it won't remove block
    final DataNodeProperties dnProps = cluster.stopDataNode(datanodeIndex);
    // Simulate Namenode forgetting a Block
    cluster.restartNameNode(true);
    cluster.getNameNode().getNamesystem().writeLock();
    BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
    bInfo.delete();
    cluster.getNameNode().getNamesystem().getBlockManager().removeBlock(bInfo);
    cluster.getNameNode().getNamesystem().writeUnlock();
    // we also need to tell block manager that we are in the startup path
    BlockManagerTestUtil.setStartupSafeModeForTest(cluster.getNameNode().getNamesystem().getBlockManager());
    cluster.restartDataNode(dnProps);
    waitForNumBytes(TEST_DATA_IN_FUTURE.length());
    // Make sure that we find all written bytes in future block
    assertEquals(TEST_DATA_IN_FUTURE.length(), cluster.getNameNode().getBytesWithFutureGenerationStamps());
    // Assert safemode reason
    assertTrue(cluster.getNameNode().getNamesystem().getSafeModeTip().contains("Name node detected blocks with generation stamps in future"));
}
Also used : DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Test(org.junit.Test)

Example 38 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestNameNodeMetadataConsistency method testEnsureGenStampsIsStartupOnly.

/**
   * Pretty much the same tests as above but does not setup safeMode == true,
   * hence we should not have positive count of Blocks in future.
   */
@Test
public void testEnsureGenStampsIsStartupOnly() throws Exception {
    String testData = " This is test data";
    cluster.restartDataNodes();
    cluster.restartNameNodes();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    OutputStream ostream = fs.create(filePath2);
    ostream.write(testData.getBytes());
    ostream.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath2);
    long genStamp = block.getGenerationStamp();
    // Re-write the Generation Stamp to a Generation Stamp in future.
    cluster.changeGenStampOfBlock(0, block, genStamp + 1);
    MiniDFSCluster.DataNodeProperties dnProps = cluster.stopDataNode(0);
    // Simulate  Namenode forgetting a Block
    cluster.restartNameNode(true);
    BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
    cluster.getNameNode().getNamesystem().writeLock();
    bInfo.delete();
    cluster.getNameNode().getNamesystem().getBlockManager().removeBlock(bInfo);
    cluster.getNameNode().getNamesystem().writeUnlock();
    cluster.restartDataNode(dnProps);
    waitForNumBytes(0);
    // Make sure that there are no bytes in future since isInStartupSafe
    // mode is not true.
    assertEquals(0, cluster.getNameNode().getBytesWithFutureGenerationStamps());
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Test(org.junit.Test)

Example 39 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestFsck method testFsckError.

/** Test if fsck can return -1 in case of failure.
   * 
   * @throws Exception
   */
@Test
public void testFsckError() throws Exception {
    // bring up a one-node cluster
    cluster = new MiniDFSCluster.Builder(conf).build();
    String fileName = "/test.txt";
    Path filePath = new Path(fileName);
    FileSystem fs = cluster.getFileSystem();
    // create a one-block file
    DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
    DFSTestUtil.waitReplication(fs, filePath, (short) 1);
    // intentionally corrupt NN data structure
    INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode(fileName, DirOp.READ);
    final BlockInfo[] blocks = node.getBlocks();
    assertEquals(blocks.length, 1);
    // set the block length to be negative
    blocks[0].setNumBytes(-1L);
    // run fsck and expect a failure with -1 as the error code
    String outStr = runFsck(conf, -1, true, fileName);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
    // clean up file system
    fs.delete(filePath, true);
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Matchers.anyString(org.mockito.Matchers.anyString) Test(org.junit.Test)

Example 40 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestFSEditLogLoader method testUpdateStripedBlocks.

@Test
public void testUpdateStripedBlocks() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, testECPolicy.getName());
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/ec";
        String testFile = "testfile_002";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser2";
        String clientMachine = "testMachine2";
        long blkId = 1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        short blockNum = (short) testECPolicy.getNumDataUnits();
        short parityNum = (short) testECPolicy.getNumParityUnits();
        //set the storage policy of the directory
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        fs.getClient().getNamenode().setErasureCodingPolicy(testDir, testECPolicy.getName());
        //create a file with striped blocks
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoStriped stripedBlk = new BlockInfoStriped(new Block(blkId, blkNumBytes, timestamp), testECPolicy);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(stripedBlk);
        fns.getEditLog().logAddBlock(testFilePath, file);
        TestINodeFile.toCompleteFile(file);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        fns.leaveSafeMode(false);
        //update the last block
        long newBlkNumBytes = 1024 * 8;
        long newTimestamp = 1426222918 + 3600;
        file.toUnderConstruction(clientName, clientMachine);
        file.getLastBlock().setNumBytes(newBlkNumBytes);
        file.getLastBlock().setGenerationStamp(newTimestamp);
        fns.getEditLog().logUpdateBlocks(testFilePath, file, true);
        TestINodeFile.toCompleteFile(file);
        //After the namenode restarts if the block by loaded is the same as above
        //(new block size and timestamp) it means that we have successfully
        //applied the edit log to the fsimage.
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        INodeFile inodeLoaded = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        assertTrue(inodeLoaded.isStriped());
        BlockInfo[] blks = inodeLoaded.getBlocks();
        assertEquals(1, blks.length);
        assertTrue(blks[0].isStriped());
        assertEquals(blkId, blks[0].getBlockId());
        assertEquals(newBlkNumBytes, blks[0].getNumBytes());
        assertEquals(newTimestamp, blks[0].getGenerationStamp());
        assertEquals(blockNum, ((BlockInfoStriped) blks[0]).getDataBlockNum());
        assertEquals(parityNum, ((BlockInfoStriped) blks[0]).getParityBlockNum());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5