Search in sources :

Example 11 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSNamesystem method recoverLeaseInternal.

boolean recoverLeaseInternal(RecoverLeaseOp op, INodesInPath iip, String src, String holder, String clientMachine, boolean force) throws IOException {
    assert hasWriteLock();
    INodeFile file = iip.getLastINode().asFile();
    if (file.isUnderConstruction()) {
        //
        // If the file is under construction , then it must be in our
        // leases. Find the appropriate lease record.
        //
        Lease lease = leaseManager.getLease(holder);
        if (!force && lease != null) {
            Lease leaseFile = leaseManager.getLease(file);
            if (leaseFile != null && leaseFile.equals(lease)) {
                // holder is trying to obtain it again.
                throw new AlreadyBeingCreatedException(op.getExceptionMessage(src, holder, clientMachine, holder + " is already the current lease holder."));
            }
        }
        //
        // Find the original holder.
        //
        FileUnderConstructionFeature uc = file.getFileUnderConstructionFeature();
        String clientName = uc.getClientName();
        lease = leaseManager.getLease(clientName);
        if (lease == null) {
            throw new AlreadyBeingCreatedException(op.getExceptionMessage(src, holder, clientMachine, "the file is under construction but no leases found."));
        }
        if (force) {
            // close now: no need to wait for soft lease expiration and 
            // close only the file src
            LOG.info("recoverLease: " + lease + ", src=" + src + " from client " + clientName);
            return internalReleaseLease(lease, src, iip, holder);
        } else {
            assert lease.getHolder().equals(clientName) : "Current lease holder " + lease.getHolder() + " does not match file creator " + clientName;
            //
            if (lease.expiredSoftLimit()) {
                LOG.info("startFile: recover " + lease + ", src=" + src + " client " + clientName);
                if (internalReleaseLease(lease, src, iip, null)) {
                    return true;
                } else {
                    throw new RecoveryInProgressException(op.getExceptionMessage(src, holder, clientMachine, "lease recovery is in progress. Try again later."));
                }
            } else {
                final BlockInfo lastBlock = file.getLastBlock();
                if (lastBlock != null && lastBlock.getBlockUCState() == BlockUCState.UNDER_RECOVERY) {
                    throw new RecoveryInProgressException(op.getExceptionMessage(src, holder, clientMachine, "another recovery is in progress by " + clientName + " on " + uc.getClientMachine()));
                } else {
                    throw new AlreadyBeingCreatedException(op.getExceptionMessage(src, holder, clientMachine, "this file lease is currently owned by " + clientName + " on " + uc.getClientMachine()));
                }
            }
        }
    } else {
        return true;
    }
}
Also used : AlreadyBeingCreatedException(org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException) Lease(org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) RecoveryInProgressException(org.apache.hadoop.hdfs.protocol.RecoveryInProgressException)

Example 12 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class FSNamesystem method listCorruptFileBlocks.

/**
   * @param path Restrict corrupt files to this portion of namespace.
   * @param cookieTab Support for continuation; cookieTab  tells where
   *                  to start from
   * @return a list in which each entry describes a corrupt file/block
   * @throws IOException
   */
Collection<CorruptFileBlockInfo> listCorruptFileBlocks(String path, String[] cookieTab) throws IOException {
    checkSuperuserPrivilege();
    checkOperation(OperationCategory.READ);
    int count = 0;
    ArrayList<CorruptFileBlockInfo> corruptFiles = new ArrayList<CorruptFileBlockInfo>();
    if (cookieTab == null) {
        cookieTab = new String[] { null };
    }
    // Do a quick check if there are any corrupt files without taking the lock
    if (blockManager.getMissingBlocksCount() == 0) {
        if (cookieTab[0] == null) {
            cookieTab[0] = String.valueOf(getIntCookie(cookieTab[0]));
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("there are no corrupt file blocks.");
        }
        return corruptFiles;
    }
    readLock();
    try {
        checkOperation(OperationCategory.READ);
        if (!blockManager.isPopulatingReplQueues()) {
            throw new IOException("Cannot run listCorruptFileBlocks because " + "replication queues have not been initialized.");
        }
        // print a limited # of corrupt files per call
        final Iterator<BlockInfo> blkIterator = blockManager.getCorruptReplicaBlockIterator();
        int skip = getIntCookie(cookieTab[0]);
        for (int i = 0; i < skip && blkIterator.hasNext(); i++) {
            blkIterator.next();
        }
        while (blkIterator.hasNext()) {
            BlockInfo blk = blkIterator.next();
            final INodeFile inode = getBlockCollection(blk);
            skip++;
            if (inode != null) {
                String src = inode.getFullPathName();
                if (src.startsWith(path)) {
                    corruptFiles.add(new CorruptFileBlockInfo(src, blk));
                    count++;
                    if (count >= DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED)
                        break;
                }
            }
        }
        cookieTab[0] = String.valueOf(skip);
        if (LOG.isDebugEnabled()) {
            LOG.debug("list corrupt file blocks returned: " + count);
        }
        return corruptFiles;
    } finally {
        readUnlock("listCorruptFileBlocks");
    }
}
Also used : BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ArrayList(java.util.ArrayList) IOException(java.io.IOException)

Example 13 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestNameNodeMetadataConsistency method testGenerationStampInFuture.

/**
   * This test creates a file and modifies the block generation stamp to number
   * that name node has not seen yet. It then asserts that name node moves into
   * safe mode while it is in startup mode.
   */
@Test
public void testGenerationStampInFuture() throws Exception {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    OutputStream ostream = fs.create(filePath1);
    ostream.write(TEST_DATA_IN_FUTURE.getBytes());
    ostream.close();
    // Re-write the Generation Stamp to a Generation Stamp in future.
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath1);
    final long genStamp = block.getGenerationStamp();
    final int datanodeIndex = 0;
    cluster.changeGenStampOfBlock(datanodeIndex, block, genStamp + 1);
    // stop the data node so that it won't remove block
    final DataNodeProperties dnProps = cluster.stopDataNode(datanodeIndex);
    // Simulate Namenode forgetting a Block
    cluster.restartNameNode(true);
    cluster.getNameNode().getNamesystem().writeLock();
    BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
    bInfo.delete();
    cluster.getNameNode().getNamesystem().getBlockManager().removeBlock(bInfo);
    cluster.getNameNode().getNamesystem().writeUnlock();
    // we also need to tell block manager that we are in the startup path
    BlockManagerTestUtil.setStartupSafeModeForTest(cluster.getNameNode().getNamesystem().getBlockManager());
    cluster.restartDataNode(dnProps);
    waitForNumBytes(TEST_DATA_IN_FUTURE.length());
    // Make sure that we find all written bytes in future block
    assertEquals(TEST_DATA_IN_FUTURE.length(), cluster.getNameNode().getBytesWithFutureGenerationStamps());
    // Assert safemode reason
    assertTrue(cluster.getNameNode().getNamesystem().getSafeModeTip().contains("Name node detected blocks with generation stamps in future"));
}
Also used : DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Test(org.junit.Test)

Example 14 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestNameNodeMetadataConsistency method testEnsureGenStampsIsStartupOnly.

/**
   * Pretty much the same tests as above but does not setup safeMode == true,
   * hence we should not have positive count of Blocks in future.
   */
@Test
public void testEnsureGenStampsIsStartupOnly() throws Exception {
    String testData = " This is test data";
    cluster.restartDataNodes();
    cluster.restartNameNodes();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    OutputStream ostream = fs.create(filePath2);
    ostream.write(testData.getBytes());
    ostream.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath2);
    long genStamp = block.getGenerationStamp();
    // Re-write the Generation Stamp to a Generation Stamp in future.
    cluster.changeGenStampOfBlock(0, block, genStamp + 1);
    MiniDFSCluster.DataNodeProperties dnProps = cluster.stopDataNode(0);
    // Simulate  Namenode forgetting a Block
    cluster.restartNameNode(true);
    BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
    cluster.getNameNode().getNamesystem().writeLock();
    bInfo.delete();
    cluster.getNameNode().getNamesystem().getBlockManager().removeBlock(bInfo);
    cluster.getNameNode().getNamesystem().writeUnlock();
    cluster.restartDataNode(dnProps);
    waitForNumBytes(0);
    // Make sure that there are no bytes in future since isInStartupSafe
    // mode is not true.
    assertEquals(0, cluster.getNameNode().getBytesWithFutureGenerationStamps());
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) OutputStream(java.io.OutputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Test(org.junit.Test)

Example 15 with BlockInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.

the class TestFsck method testFsckError.

/** Test if fsck can return -1 in case of failure.
   * 
   * @throws Exception
   */
@Test
public void testFsckError() throws Exception {
    // bring up a one-node cluster
    cluster = new MiniDFSCluster.Builder(conf).build();
    String fileName = "/test.txt";
    Path filePath = new Path(fileName);
    FileSystem fs = cluster.getFileSystem();
    // create a one-block file
    DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
    DFSTestUtil.waitReplication(fs, filePath, (short) 1);
    // intentionally corrupt NN data structure
    INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode(fileName, DirOp.READ);
    final BlockInfo[] blocks = node.getBlocks();
    assertEquals(blocks.length, 1);
    // set the block length to be negative
    blocks[0].setNumBytes(-1L);
    // run fsck and expect a failure with -1 as the error code
    String outStr = runFsck(conf, -1, true, fileName);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
    // clean up file system
    fs.delete(filePath, true);
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Matchers.anyString(org.mockito.Matchers.anyString) Test(org.junit.Test)

Aggregations

BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)84 Test (org.junit.Test)28 Path (org.apache.hadoop.fs.Path)27 Block (org.apache.hadoop.hdfs.protocol.Block)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)16 INodeFile (org.apache.hadoop.hdfs.server.namenode.INodeFile)14 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)13 IOException (java.io.IOException)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)11 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)11 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)10 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)7 Configuration (org.apache.hadoop.conf.Configuration)6 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)6 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 BlockStoragePolicy (org.apache.hadoop.hdfs.protocol.BlockStoragePolicy)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)5 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)5