Search in sources :

Example 61 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestCommitBlockSynchronization method makeNameSystemSpy.

private FSNamesystem makeNameSystemSpy(Block block, INodeFile file) throws IOException {
    Configuration conf = new Configuration();
    FSEditLog editlog = mock(FSEditLog.class);
    FSImage image = new FSImage(conf);
    Whitebox.setInternalState(image, "editLog", editlog);
    final DatanodeStorageInfo[] targets = {};
    FSNamesystem namesystem = new FSNamesystem(conf, image);
    namesystem.setImageLoaded(true);
    // FSNamesystem's isFileDeleted() method will return false on this file
    if (file.getParent() == null) {
        INodeDirectory mparent = mock(INodeDirectory.class);
        INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0], mparent.getPermissionStatus(), mparent.getAccessTime());
        parent.setLocalName(new byte[0]);
        parent.addChild(file);
        file.setParent(parent);
    }
    namesystem.dir.getINodeMap().put(file);
    FSNamesystem namesystemSpy = spy(namesystem);
    BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 1);
    blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
    blockInfo.setBlockCollectionId(file.getId());
    blockInfo.setGenerationStamp(genStamp);
    blockInfo.getUnderConstructionFeature().initializeBlockRecovery(blockInfo, genStamp);
    doReturn(blockInfo).when(file).removeLastBlock(any(Block.class));
    doReturn(true).when(file).isUnderConstruction();
    doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();
    doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(blockInfo).when(file).getLastBlock();
    doNothing().when(namesystemSpy).closeFileCommitBlocks(any(String.class), any(INodeFile.class), any(BlockInfo.class));
    doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
    return namesystemSpy;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) Configuration(org.apache.hadoop.conf.Configuration) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 62 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestCommitBlockSynchronization method testCommitBlockSynchronizationWithDelete.

@Test
public void testCommitBlockSynchronizationWithDelete() throws IOException {
    INodeFile file = mockFileUnderConstruction();
    Block block = new Block(blockId, length, genStamp);
    FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
    DatanodeID[] newTargets = new DatanodeID[0];
    ExtendedBlock lastBlock = new ExtendedBlock();
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
    // Simulate removing the last block from the file.
    doReturn(null).when(file).removeLastBlock(any(Block.class));
    // Repeat the call to make sure it does not throw
    namesystemSpy.commitBlockSynchronization(lastBlock, genStamp, length, false, true, newTargets, null);
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 63 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestDeadDatanode method testDeadDatanode.

/**
   * Test to ensure namenode rejects request from dead datanode
   * - Start a cluster
   * - Shutdown the datanode and wait for it to be marked dead at the namenode
   * - Send datanode requests to Namenode and make sure it is rejected 
   *   appropriately.
   */
@Test
public void testDeadDatanode() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // wait for datanode to be marked live
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
    // Shutdown and wait for datanode to be marked dead
    dn.shutdown();
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
    DatanodeProtocol dnp = cluster.getNameNodeRpc();
    ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(new Block(0), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
    StorageReceivedDeletedBlocks[] storageBlocks = { new StorageReceivedDeletedBlocks(new DatanodeStorage(reg.getDatanodeUuid()), blocks) };
    // Ensure blockReceived call from dead datanode is not rejected with
    // IOException, since it's async, but the node remains unregistered.
    dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    // IBRs are async, make sure the NN processes all of them.
    bm.flushBlockOps();
    assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());
    // Ensure blockReport from dead datanode is rejected with IOException
    StorageBlockReport[] report = { new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()), BlockListAsLongs.EMPTY) };
    try {
        dnp.blockReport(reg, poolId, report, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
        fail("Expected IOException is not thrown");
    } catch (IOException ex) {
    // Expected
    }
    // Ensure heartbeat from dead datanode is rejected with a command
    // that asks datanode to register again
    StorageReport[] rep = { new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()), false, 0, 0, 0, 0, 0) };
    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT).getCommands();
    assertEquals(1, cmd.length);
    assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER.getAction());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 64 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class NamenodeFsck method blockIdCK.

/**
   * Check block information given a blockId number
   *
  */
public void blockIdCK(String blockId) {
    if (blockId == null) {
        out.println("Please provide valid blockId!");
        return;
    }
    try {
        //get blockInfo
        Block block = new Block(Block.getBlockId(blockId));
        //find which file this block belongs to
        BlockInfo blockInfo = blockManager.getStoredBlock(block);
        if (blockInfo == null) {
            out.println("Block " + blockId + " " + NONEXISTENT_STATUS);
            LOG.warn("Block " + blockId + " " + NONEXISTENT_STATUS);
            return;
        }
        final INodeFile iNode = namenode.getNamesystem().getBlockCollection(blockInfo);
        NumberReplicas numberReplicas = blockManager.countNodes(blockInfo);
        out.println("Block Id: " + blockId);
        out.println("Block belongs to: " + iNode.getFullPathName());
        out.println("No. of Expected Replica: " + blockManager.getExpectedRedundancyNum(blockInfo));
        out.println("No. of live Replica: " + numberReplicas.liveReplicas());
        out.println("No. of excess Replica: " + numberReplicas.excessReplicas());
        out.println("No. of stale Replica: " + numberReplicas.replicasOnStaleNodes());
        out.println("No. of decommissioned Replica: " + numberReplicas.decommissioned());
        out.println("No. of decommissioning Replica: " + numberReplicas.decommissioning());
        if (this.showMaintenanceState) {
            out.println("No. of entering maintenance Replica: " + numberReplicas.liveEnteringMaintenanceReplicas());
            out.println("No. of in maintenance Replica: " + numberReplicas.maintenanceNotForReadReplicas());
        }
        out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas());
        //record datanodes that have corrupted block replica
        Collection<DatanodeDescriptor> corruptionRecord = null;
        if (blockManager.getCorruptReplicas(block) != null) {
            corruptionRecord = blockManager.getCorruptReplicas(block);
        }
        //report block replicas status on datanodes
        for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
            DatanodeDescriptor dn = blockInfo.getDatanode(idx);
            out.print("Block replica on datanode/rack: " + dn.getHostName() + dn.getNetworkLocation() + " ");
            if (corruptionRecord != null && corruptionRecord.contains(dn)) {
                out.print(CORRUPT_STATUS + "\t ReasonCode: " + blockManager.getCorruptReason(block, dn));
            } else if (dn.isDecommissioned()) {
                out.print(DECOMMISSIONED_STATUS);
            } else if (dn.isDecommissionInProgress()) {
                out.print(DECOMMISSIONING_STATUS);
            } else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
                out.print(ENTERING_MAINTENANCE_STATUS);
            } else if (this.showMaintenanceState && dn.isInMaintenance()) {
                out.print(IN_MAINTENANCE_STATUS);
            } else {
                out.print(HEALTHY_STATUS);
            }
            out.print("\n");
        }
    } catch (Exception e) {
        String errMsg = "Fsck on blockId '" + blockId;
        LOG.warn(errMsg, e);
        out.println(e.getMessage());
        out.print("\n\n" + errMsg);
        LOG.warn("Error in looking up block", e);
    }
}
Also used : DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) NumberReplicas(org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) AccessControlException(org.apache.hadoop.security.AccessControlException)

Example 65 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class FsDatasetImpl method checkAndUpdate.

/**
   * Reconcile the difference between blocks on the disk and blocks in
   * volumeMap
   *
   * Check the given block for inconsistencies. Look at the
   * current state of the block and reconcile the differences as follows:
   * <ul>
   * <li>If the block file is missing, delete the block from volumeMap</li>
   * <li>If the block file exists and the block is missing in volumeMap,
   * add the block to volumeMap <li>
   * <li>If generation stamp does not match, then update the block with right
   * generation stamp</li>
   * <li>If the block length in memory does not match the actual block file length
   * then mark the block as corrupt and update the block length in memory</li>
   * <li>If the file in {@link ReplicaInfo} does not match the file on
   * the disk, update {@link ReplicaInfo} with the correct file</li>
   * </ul>
   *
   * @param blockId Block that differs
   * @param diskFile Block file on the disk
   * @param diskMetaFile Metadata file from on the disk
   * @param vol Volume of the block file
   */
@Override
public void checkAndUpdate(String bpid, long blockId, File diskFile, File diskMetaFile, FsVolumeSpi vol) throws IOException {
    Block corruptBlock = null;
    ReplicaInfo memBlockInfo;
    try (AutoCloseableLock lock = datasetLock.acquire()) {
        memBlockInfo = volumeMap.get(bpid, blockId);
        if (memBlockInfo != null && memBlockInfo.getState() != ReplicaState.FINALIZED) {
            // Block is not finalized - ignore the difference
            return;
        }
        final FileIoProvider fileIoProvider = datanode.getFileIoProvider();
        final boolean diskMetaFileExists = diskMetaFile != null && fileIoProvider.exists(vol, diskMetaFile);
        final boolean diskFileExists = diskFile != null && fileIoProvider.exists(vol, diskFile);
        final long diskGS = diskMetaFileExists ? Block.getGenerationStamp(diskMetaFile.getName()) : HdfsConstants.GRANDFATHER_GENERATION_STAMP;
        if (!diskFileExists) {
            if (memBlockInfo == null) {
                // If metadata file exists then delete it
                if (diskMetaFileExists && fileIoProvider.delete(vol, diskMetaFile)) {
                    LOG.warn("Deleted a metadata file without a block " + diskMetaFile.getAbsolutePath());
                }
                return;
            }
            if (!memBlockInfo.blockDataExists()) {
                // Block is in memory and not on the disk
                // Remove the block from volumeMap
                volumeMap.remove(bpid, blockId);
                if (vol.isTransientStorage()) {
                    ramDiskReplicaTracker.discardReplica(bpid, blockId, true);
                }
                LOG.warn("Removed block " + blockId + " from memory with missing block file on the disk");
                // Finally remove the metadata file
                if (diskMetaFileExists && fileIoProvider.delete(vol, diskMetaFile)) {
                    LOG.warn("Deleted a metadata file for the deleted block " + diskMetaFile.getAbsolutePath());
                }
            }
            return;
        }
        /*
       * Block file exists on the disk
       */
        if (memBlockInfo == null) {
            // Block is missing in memory - add the block to volumeMap
            ReplicaInfo diskBlockInfo = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(diskFile.length()).setGenerationStamp(diskGS).setFsVolume(vol).setDirectoryToUse(diskFile.getParentFile()).build();
            volumeMap.add(bpid, diskBlockInfo);
            if (vol.isTransientStorage()) {
                long lockedBytesReserved = cacheManager.reserve(diskBlockInfo.getNumBytes()) > 0 ? diskBlockInfo.getNumBytes() : 0;
                ramDiskReplicaTracker.addReplica(bpid, blockId, (FsVolumeImpl) vol, lockedBytesReserved);
            }
            LOG.warn("Added missing block to memory " + diskBlockInfo);
            return;
        }
        // Compare block files
        if (memBlockInfo.blockDataExists()) {
            if (memBlockInfo.getBlockURI().compareTo(diskFile.toURI()) != 0) {
                if (diskMetaFileExists) {
                    if (memBlockInfo.metadataExists()) {
                        // We have two sets of block+meta files. Decide which one to
                        // keep.
                        ReplicaInfo diskBlockInfo = new ReplicaBuilder(ReplicaState.FINALIZED).setBlockId(blockId).setLength(diskFile.length()).setGenerationStamp(diskGS).setFsVolume(vol).setDirectoryToUse(diskFile.getParentFile()).build();
                        ((FsVolumeImpl) vol).resolveDuplicateReplicas(bpid, memBlockInfo, diskBlockInfo, volumeMap);
                    }
                } else {
                    if (!fileIoProvider.delete(vol, diskFile)) {
                        LOG.warn("Failed to delete " + diskFile);
                    }
                }
            }
        } else {
            // Block refers to a block file that does not exist.
            // Update the block with the file found on the disk. Since the block
            // file and metadata file are found as a pair on the disk, update
            // the block based on the metadata file found on the disk
            LOG.warn("Block file in replica " + memBlockInfo.getBlockURI() + " does not exist. Updating it to the file found during scan " + diskFile.getAbsolutePath());
            memBlockInfo.updateWithReplica(StorageLocation.parse(diskFile.toString()));
            LOG.warn("Updating generation stamp for block " + blockId + " from " + memBlockInfo.getGenerationStamp() + " to " + diskGS);
            memBlockInfo.setGenerationStamp(diskGS);
        }
        // Compare generation stamp
        if (memBlockInfo.getGenerationStamp() != diskGS) {
            File memMetaFile = FsDatasetUtil.getMetaFile(diskFile, memBlockInfo.getGenerationStamp());
            if (fileIoProvider.exists(vol, memMetaFile)) {
                String warningPrefix = "Metadata file in memory " + memMetaFile.getAbsolutePath() + " does not match file found by scan ";
                if (!diskMetaFileExists) {
                    LOG.warn(warningPrefix + "null");
                } else if (memMetaFile.compareTo(diskMetaFile) != 0) {
                    LOG.warn(warningPrefix + diskMetaFile.getAbsolutePath());
                }
            } else {
                // as the block file, then use the generation stamp from it
                try {
                    File memFile = new File(memBlockInfo.getBlockURI());
                    long gs = diskMetaFileExists && diskMetaFile.getParent().equals(memFile.getParent()) ? diskGS : HdfsConstants.GRANDFATHER_GENERATION_STAMP;
                    LOG.warn("Updating generation stamp for block " + blockId + " from " + memBlockInfo.getGenerationStamp() + " to " + gs);
                    memBlockInfo.setGenerationStamp(gs);
                } catch (IllegalArgumentException e) {
                    //exception arises because the URI cannot be converted to a file
                    LOG.warn("Block URI could not be resolved to a file", e);
                }
            }
        }
        // Compare block size
        if (memBlockInfo.getNumBytes() != memBlockInfo.getBlockDataLength()) {
            // Update the length based on the block file
            corruptBlock = new Block(memBlockInfo);
            LOG.warn("Updating size of block " + blockId + " from " + memBlockInfo.getNumBytes() + " to " + memBlockInfo.getBlockDataLength());
            memBlockInfo.setNumBytes(memBlockInfo.getBlockDataLength());
        }
    }
    // Send corrupt block report outside the lock
    if (corruptBlock != null) {
        LOG.warn("Reporting the block " + corruptBlock + " as corrupt due to length mismatch");
        try {
            datanode.reportBadBlocks(new ExtendedBlock(bpid, corruptBlock), memBlockInfo.getVolume());
        } catch (IOException e) {
            LOG.warn("Failed to repot bad block " + corruptBlock, e);
        }
    }
}
Also used : ReplicaInfo(org.apache.hadoop.hdfs.server.datanode.ReplicaInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) MultipleIOException(org.apache.hadoop.io.MultipleIOException) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) ReplicaBuilder(org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) FileIoProvider(org.apache.hadoop.hdfs.server.datanode.FileIoProvider) File(java.io.File)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10