Search in sources :

Example 6 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestCommitBlockSynchronization method makeNameSystemSpy.

private FSNamesystem makeNameSystemSpy(Block block, INodeFile file) throws IOException {
    Configuration conf = new Configuration();
    FSEditLog editlog = mock(FSEditLog.class);
    FSImage image = new FSImage(conf);
    Whitebox.setInternalState(image, "editLog", editlog);
    final DatanodeStorageInfo[] targets = {};
    FSNamesystem namesystem = new FSNamesystem(conf, image);
    namesystem.setImageLoaded(true);
    // FSNamesystem's isFileDeleted() method will return false on this file
    if (file.getParent() == null) {
        INodeDirectory mparent = mock(INodeDirectory.class);
        INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0], mparent.getPermissionStatus(), mparent.getAccessTime());
        parent.setLocalName(new byte[0]);
        parent.addChild(file);
        file.setParent(parent);
    }
    namesystem.dir.getINodeMap().put(file);
    FSNamesystem namesystemSpy = spy(namesystem);
    BlockInfo blockInfo = new BlockInfoContiguous(block, (short) 1);
    blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
    blockInfo.setBlockCollectionId(file.getId());
    blockInfo.setGenerationStamp(genStamp);
    blockInfo.getUnderConstructionFeature().initializeBlockRecovery(blockInfo, genStamp);
    doReturn(blockInfo).when(file).removeLastBlock(any(Block.class));
    doReturn(true).when(file).isUnderConstruction();
    doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();
    doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
    doReturn(blockInfo).when(file).getLastBlock();
    doNothing().when(namesystemSpy).closeFileCommitBlocks(any(String.class), any(INodeFile.class), any(BlockInfo.class));
    doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
    return namesystemSpy;
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) Configuration(org.apache.hadoop.conf.Configuration) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 7 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestDeadDatanode method testDeadNodeAsBlockTarget.

@Test
public void testDeadNodeAsBlockTarget() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    cluster.waitActive();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // wait for datanode to be marked live
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    // Get the updated datanode descriptor
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    DatanodeManager dm = bm.getDatanodeManager();
    Node clientNode = dm.getDatanode(reg);
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
    // Shutdown and wait for datanode to be marked dead
    dn.shutdown();
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
    // Get the updated datanode descriptor available in DNM
    // choose the targets, but local node should not get selected as this is not
    // part of the cluster anymore
    DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3, clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7, BlockType.CONTIGUOUS, null);
    for (DatanodeStorageInfo datanodeStorageInfo : results) {
        assertFalse("Dead node should not be choosen", datanodeStorageInfo.getDatanodeDescriptor().equals(clientNode));
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) Node(org.apache.hadoop.net.Node) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) Test(org.junit.Test)

Example 8 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class NamenodeFsck method getReplicaInfo.

/**
   * Display info of each replica for replication block.
   * For striped block group, display info of each internal block.
   */
private String getReplicaInfo(BlockInfo storedBlock) {
    if (!(showLocations || showRacks || showReplicaDetails || showUpgradeDomains)) {
        return "";
    }
    final boolean isComplete = storedBlock.isComplete();
    Iterator<DatanodeStorageInfo> storagesItr;
    StringBuilder sb = new StringBuilder(" [");
    final boolean isStriped = storedBlock.isStriped();
    Map<DatanodeStorageInfo, Long> storage2Id = new HashMap<>();
    if (isComplete) {
        if (isStriped) {
            long blockId = storedBlock.getBlockId();
            Iterable<StorageAndBlockIndex> sis = ((BlockInfoStriped) storedBlock).getStorageAndIndexInfos();
            for (StorageAndBlockIndex si : sis) {
                storage2Id.put(si.getStorage(), blockId + si.getBlockIndex());
            }
        }
        storagesItr = storedBlock.getStorageInfos();
    } else {
        storagesItr = storedBlock.getUnderConstructionFeature().getExpectedStorageLocationsIterator();
    }
    while (storagesItr.hasNext()) {
        DatanodeStorageInfo storage = storagesItr.next();
        if (isStriped && isComplete) {
            long index = storage2Id.get(storage);
            sb.append("blk_" + index + ":");
        }
        DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor();
        if (showRacks) {
            sb.append(NodeBase.getPath(dnDesc));
        } else {
            sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(), storage.getStorageType()));
        }
        if (showUpgradeDomains) {
            String upgradeDomain = (dnDesc.getUpgradeDomain() != null) ? dnDesc.getUpgradeDomain() : UNDEFINED;
            sb.append("(ud=" + upgradeDomain + ")");
        }
        if (showReplicaDetails) {
            Collection<DatanodeDescriptor> corruptReplicas = blockManager.getCorruptReplicas(storedBlock);
            sb.append("(");
            if (dnDesc.isDecommissioned()) {
                sb.append("DECOMMISSIONED)");
            } else if (dnDesc.isDecommissionInProgress()) {
                sb.append("DECOMMISSIONING)");
            } else if (this.showMaintenanceState && dnDesc.isEnteringMaintenance()) {
                sb.append("ENTERING MAINTENANCE)");
            } else if (this.showMaintenanceState && dnDesc.isInMaintenance()) {
                sb.append("IN MAINTENANCE)");
            } else if (corruptReplicas != null && corruptReplicas.contains(dnDesc)) {
                sb.append("CORRUPT)");
            } else if (blockManager.isExcess(dnDesc, storedBlock)) {
                sb.append("EXCESS)");
            } else if (dnDesc.isStale(this.staleInterval)) {
                sb.append("STALE_NODE)");
            } else if (storage.areBlockContentsStale()) {
                sb.append("STALE_BLOCK_CONTENT)");
            } else {
                sb.append("LIVE)");
            }
        }
        if (storagesItr.hasNext()) {
            sb.append(", ");
        }
    }
    sb.append(']');
    return sb.toString();
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) HashMap(java.util.HashMap) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DatanodeInfoWithStorage(org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage) StorageAndBlockIndex(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped.StorageAndBlockIndex)

Example 9 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class FSNamesystem method commitBlockSynchronization.

void commitBlockSynchronization(ExtendedBlock oldBlock, long newgenerationstamp, long newlength, boolean closeFile, boolean deleteblock, DatanodeID[] newtargets, String[] newtargetstorages) throws IOException {
    LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock + ", newgenerationstamp=" + newgenerationstamp + ", newlength=" + newlength + ", newtargets=" + Arrays.asList(newtargets) + ", closeFile=" + closeFile + ", deleteBlock=" + deleteblock + ")");
    checkOperation(OperationCategory.WRITE);
    final String src;
    writeLock();
    boolean copyTruncate = false;
    BlockInfo truncatedBlock = null;
    try {
        checkOperation(OperationCategory.WRITE);
        // If a DN tries to commit to the standby, the recovery will
        // fail, and the next retry will succeed on the new NN.
        checkNameNodeSafeMode("Cannot commitBlockSynchronization while in safe mode");
        final BlockInfo storedBlock = getStoredBlock(ExtendedBlock.getLocalBlock(oldBlock));
        if (storedBlock == null) {
            if (deleteblock) {
                // to locate the block.
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Block (=" + oldBlock + ") not found");
                }
                return;
            } else {
                throw new IOException("Block (=" + oldBlock + ") not found");
            }
        }
        final long oldGenerationStamp = storedBlock.getGenerationStamp();
        final long oldNumBytes = storedBlock.getNumBytes();
        //
        if (storedBlock.isDeleted()) {
            throw new IOException("The blockCollection of " + storedBlock + " is null, likely because the file owning this block was" + " deleted and the block removal is delayed");
        }
        final INodeFile iFile = getBlockCollection(storedBlock);
        src = iFile.getFullPathName();
        if (isFileDeleted(iFile)) {
            throw new FileNotFoundException("File not found: " + src + ", likely due to delayed block removal");
        }
        if ((!iFile.isUnderConstruction() || storedBlock.isComplete()) && iFile.getLastBlock().isComplete()) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Unexpected block (=" + oldBlock + ") since the file (=" + iFile.getLocalName() + ") is not under construction");
            }
            return;
        }
        truncatedBlock = iFile.getLastBlock();
        final long recoveryId = truncatedBlock.getUnderConstructionFeature().getBlockRecoveryId();
        copyTruncate = truncatedBlock.getBlockId() != storedBlock.getBlockId();
        if (recoveryId != newgenerationstamp) {
            throw new IOException("The recovery id " + newgenerationstamp + " does not match current recovery id " + recoveryId + " for block " + oldBlock);
        }
        if (deleteblock) {
            Block blockToDel = ExtendedBlock.getLocalBlock(oldBlock);
            boolean remove = iFile.removeLastBlock(blockToDel) != null;
            if (remove) {
                blockManager.removeBlock(storedBlock);
            }
        } else {
            // update last block
            if (!copyTruncate) {
                storedBlock.setGenerationStamp(newgenerationstamp);
                storedBlock.setNumBytes(newlength);
            }
            // Find the target DatanodeStorageInfos. If not found because of invalid
            // or empty DatanodeID/StorageID, the slot of same offset in dsInfos is
            // null
            final DatanodeStorageInfo[] dsInfos = blockManager.getDatanodeManager().getDatanodeStorageInfos(newtargets, newtargetstorages, "src=%s, oldBlock=%s, newgenerationstamp=%d, newlength=%d", src, oldBlock, newgenerationstamp, newlength);
            if (closeFile && dsInfos != null) {
                // blocksReceived from Datanodes take a long time to arrive.
                for (int i = 0; i < dsInfos.length; i++) {
                    if (dsInfos[i] != null) {
                        if (copyTruncate) {
                            dsInfos[i].addBlock(truncatedBlock, truncatedBlock);
                        } else {
                            Block bi = new Block(storedBlock);
                            if (storedBlock.isStriped()) {
                                bi.setBlockId(bi.getBlockId() + i);
                            }
                            dsInfos[i].addBlock(storedBlock, bi);
                        }
                    }
                }
            }
            // add pipeline locations into the INodeUnderConstruction
            if (copyTruncate) {
                iFile.convertLastBlockToUC(truncatedBlock, dsInfos);
            } else {
                iFile.convertLastBlockToUC(storedBlock, dsInfos);
                if (closeFile) {
                    blockManager.markBlockReplicasAsCorrupt(oldBlock.getLocalBlock(), storedBlock, oldGenerationStamp, oldNumBytes, dsInfos);
                }
            }
        }
        if (closeFile) {
            if (copyTruncate) {
                closeFileCommitBlocks(src, iFile, truncatedBlock);
                if (!iFile.isBlockInLatestSnapshot(storedBlock)) {
                    blockManager.removeBlock(storedBlock);
                }
            } else {
                closeFileCommitBlocks(src, iFile, storedBlock);
            }
        } else {
            // If this commit does not want to close the file, persist blocks
            FSDirWriteFileOp.persistBlocks(dir, src, iFile, false);
        }
    } finally {
        writeUnlock("commitBlockSynchronization");
    }
    getEditLog().logSync();
    if (closeFile) {
        LOG.info("commitBlockSynchronization(oldBlock=" + oldBlock + ", file=" + src + (copyTruncate ? ", newBlock=" + truncatedBlock : ", newgenerationstamp=" + newgenerationstamp) + ", newlength=" + newlength + ", newtargets=" + Arrays.asList(newtargets) + ") successful");
    } else {
        LOG.info("commitBlockSynchronization(" + oldBlock + ") successful");
    }
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) FileNotFoundException(java.io.FileNotFoundException) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException)

Example 10 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class FSNamesystem method getAdditionalDatanode.

/** @see ClientProtocol#getAdditionalDatanode */
LocatedBlock getAdditionalDatanode(String src, long fileId, final ExtendedBlock blk, final DatanodeInfo[] existings, final String[] storageIDs, final Set<Node> excludes, final int numAdditionalNodes, final String clientName) throws IOException {
    //check if the feature is enabled
    dtpReplaceDatanodeOnFailure.checkEnabled();
    Node clientnode = null;
    String clientMachine;
    final long preferredblocksize;
    final byte storagePolicyID;
    final List<DatanodeStorageInfo> chosen;
    final BlockType blockType;
    checkOperation(OperationCategory.READ);
    FSPermissionChecker pc = getPermissionChecker();
    readLock();
    try {
        checkOperation(OperationCategory.READ);
        //check safe mode
        checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
        final INodesInPath iip = dir.resolvePath(pc, src, fileId);
        src = iip.getPath();
        //check lease
        final INodeFile file = checkLease(iip, clientName, fileId);
        clientMachine = file.getFileUnderConstructionFeature().getClientMachine();
        clientnode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
        preferredblocksize = file.getPreferredBlockSize();
        storagePolicyID = file.getStoragePolicyID();
        blockType = file.getBlockType();
        //find datanode storages
        final DatanodeManager dm = blockManager.getDatanodeManager();
        chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs, "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s", src, fileId, blk, clientName, clientMachine));
    } finally {
        readUnlock("getAdditionalDatanode");
    }
    if (clientnode == null) {
        clientnode = FSDirWriteFileOp.getClientNode(blockManager, clientMachine);
    }
    // choose new datanodes.
    final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode(src, numAdditionalNodes, clientnode, chosen, excludes, preferredblocksize, storagePolicyID, blockType);
    final LocatedBlock lb = BlockManager.newLocatedBlock(blk, targets, -1, false);
    blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY);
    return lb;
}
Also used : Node(org.apache.hadoop.net.Node) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockType(org.apache.hadoop.hdfs.protocol.BlockType)

Aggregations

DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)25 Test (org.junit.Test)10 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)8 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ArrayList (java.util.ArrayList)5 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)5 Configuration (org.apache.hadoop.conf.Configuration)4 Block (org.apache.hadoop.hdfs.protocol.Block)4 Node (org.apache.hadoop.net.Node)4 IOException (java.io.IOException)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 BlockECReconstructionInfo (org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo)3 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)3 FileNotFoundException (java.io.FileNotFoundException)2 HashSet (java.util.HashSet)2 StorageType (org.apache.hadoop.fs.StorageType)2