Search in sources :

Example 11 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class FSNamesystem method getAdditionalBlock.

/**
   * The client would like to obtain an additional block for the indicated
   * filename (which is being written-to).  Return an array that consists
   * of the block, plus a set of machines.  The first on this list should
   * be where the client writes data.  Subsequent items in the list must
   * be provided in the connection to the first datanode.
   *
   * Make sure the previous blocks have been reported by datanodes and
   * are replicated.  Will return an empty 2-elt array if we want the
   * client to "try again later".
   */
LocatedBlock getAdditionalBlock(String src, long fileId, String clientName, ExtendedBlock previous, DatanodeInfo[] excludedNodes, String[] favoredNodes, EnumSet<AddBlockFlag> flags) throws IOException {
    final String operationName = "getAdditionalBlock";
    NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: {}  inodeId {}" + " for {}", src, fileId, clientName);
    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
    FSDirWriteFileOp.ValidateAddBlockResult r;
    FSPermissionChecker pc = getPermissionChecker();
    checkOperation(OperationCategory.READ);
    readLock();
    try {
        checkOperation(OperationCategory.READ);
        r = FSDirWriteFileOp.validateAddBlock(this, pc, src, fileId, clientName, previous, onRetryBlock);
    } finally {
        readUnlock(operationName);
    }
    if (r == null) {
        assert onRetryBlock[0] != null : "Retry block is null";
        // This is a retry. Just return the last block.
        return onRetryBlock[0];
    }
    DatanodeStorageInfo[] targets = FSDirWriteFileOp.chooseTargetForNewBlock(blockManager, src, excludedNodes, favoredNodes, flags, r);
    checkOperation(OperationCategory.WRITE);
    writeLock();
    LocatedBlock lb;
    try {
        checkOperation(OperationCategory.WRITE);
        lb = FSDirWriteFileOp.storeAllocatedBlock(this, src, fileId, clientName, previous, targets);
    } finally {
        writeUnlock(operationName);
    }
    getEditLog().logSync();
    return lb;
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 12 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestPBHelper method testBlockECRecoveryCommand.

@Test
public void testBlockECRecoveryCommand() {
    DatanodeInfo[] dnInfos0 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
    DatanodeStorageInfo targetDnInfos_0 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s00"));
    DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s01"));
    DatanodeStorageInfo[] targetDnInfos0 = new DatanodeStorageInfo[] { targetDnInfos_0, targetDnInfos_1 };
    byte[] liveBlkIndices0 = new byte[2];
    BlockECReconstructionInfo blkECRecoveryInfo0 = new BlockECReconstructionInfo(new ExtendedBlock("bp1", 1234), dnInfos0, targetDnInfos0, liveBlkIndices0, StripedFileTestUtil.getDefaultECPolicy());
    DatanodeInfo[] dnInfos1 = new DatanodeInfo[] { DFSTestUtil.getLocalDatanodeInfo(), DFSTestUtil.getLocalDatanodeInfo() };
    DatanodeStorageInfo targetDnInfos_2 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s02"));
    DatanodeStorageInfo targetDnInfos_3 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s03"));
    DatanodeStorageInfo[] targetDnInfos1 = new DatanodeStorageInfo[] { targetDnInfos_2, targetDnInfos_3 };
    byte[] liveBlkIndices1 = new byte[2];
    BlockECReconstructionInfo blkECRecoveryInfo1 = new BlockECReconstructionInfo(new ExtendedBlock("bp2", 3256), dnInfos1, targetDnInfos1, liveBlkIndices1, StripedFileTestUtil.getDefaultECPolicy());
    List<BlockECReconstructionInfo> blkRecoveryInfosList = new ArrayList<BlockECReconstructionInfo>();
    blkRecoveryInfosList.add(blkECRecoveryInfo0);
    blkRecoveryInfosList.add(blkECRecoveryInfo1);
    BlockECReconstructionCommand blkECReconstructionCmd = new BlockECReconstructionCommand(DatanodeProtocol.DNA_ERASURE_CODING_RECONSTRUCTION, blkRecoveryInfosList);
    BlockECReconstructionCommandProto blkECRecoveryCmdProto = PBHelper.convert(blkECReconstructionCmd);
    blkECReconstructionCmd = PBHelper.convert(blkECRecoveryCmdProto);
    Iterator<BlockECReconstructionInfo> iterator = blkECReconstructionCmd.getECTasks().iterator();
    assertBlockECRecoveryInfoEquals(blkECRecoveryInfo0, iterator.next());
    assertBlockECRecoveryInfoEquals(blkECRecoveryInfo1, iterator.next());
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) BlockECReconstructionCommand(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand) BlockECReconstructionCommandProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECReconstructionCommandProto) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Test(org.junit.Test)

Example 13 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class NamenodeWebHdfsMethods method chooseDatanode.

@VisibleForTesting
static DatanodeInfo chooseDatanode(final NameNode namenode, final String path, final HttpOpParam.Op op, final long openOffset, final long blocksize, final String excludeDatanodes, final String remoteAddr) throws IOException {
    FSNamesystem fsn = namenode.getNamesystem();
    if (fsn == null) {
        throw new IOException("Namesystem has not been intialized yet.");
    }
    final BlockManager bm = fsn.getBlockManager();
    HashSet<Node> excludes = new HashSet<Node>();
    if (excludeDatanodes != null) {
        for (String host : StringUtils.getTrimmedStringCollection(excludeDatanodes)) {
            int idx = host.indexOf(":");
            if (idx != -1) {
                excludes.add(bm.getDatanodeManager().getDatanodeByXferAddr(host.substring(0, idx), Integer.parseInt(host.substring(idx + 1))));
            } else {
                excludes.add(bm.getDatanodeManager().getDatanodeByHost(host));
            }
        }
    }
    if (op == PutOpParam.Op.CREATE) {
        //choose a datanode near to client 
        final DatanodeDescriptor clientNode = bm.getDatanodeManager().getDatanodeByHost(remoteAddr);
        if (clientNode != null) {
            final DatanodeStorageInfo[] storages = bm.chooseTarget4WebHDFS(path, clientNode, excludes, blocksize);
            if (storages.length > 0) {
                return storages[0].getDatanodeDescriptor();
            }
        }
    } else if (op == GetOpParam.Op.OPEN || op == GetOpParam.Op.GETFILECHECKSUM || op == PostOpParam.Op.APPEND) {
        //choose a datanode containing a replica 
        final NamenodeProtocols np = getRPCServer(namenode);
        final HdfsFileStatus status = np.getFileInfo(path);
        if (status == null) {
            throw new FileNotFoundException("File " + path + " not found.");
        }
        final long len = status.getLen();
        if (op == GetOpParam.Op.OPEN) {
            if (openOffset < 0L || (openOffset >= len && len > 0)) {
                throw new IOException("Offset=" + openOffset + " out of the range [0, " + len + "); " + op + ", path=" + path);
            }
        }
        if (len > 0) {
            final long offset = op == GetOpParam.Op.OPEN ? openOffset : len - 1;
            final LocatedBlocks locations = np.getBlockLocations(path, offset, 1);
            final int count = locations.locatedBlockCount();
            if (count > 0) {
                return bestNode(locations.get(0).getLocations(), excludes);
            }
        }
    }
    return (DatanodeDescriptor) bm.getDatanodeManager().getNetworkTopology().chooseRandom(NodeBase.ROOT, excludes);
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) Node(org.apache.hadoop.net.Node) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileNotFoundException(java.io.FileNotFoundException) IOException(java.io.IOException) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) HashSet(java.util.HashSet) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 14 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class FSNamesystem method updatePipelineInternal.

private void updatePipelineInternal(String clientName, ExtendedBlock oldBlock, ExtendedBlock newBlock, DatanodeID[] newNodes, String[] newStorageIDs, boolean logRetryCache) throws IOException {
    assert hasWriteLock();
    // check the vadility of the block and lease holder name
    final INodeFile pendingFile = checkUCBlock(oldBlock, clientName);
    final String src = pendingFile.getFullPathName();
    final BlockInfo lastBlock = pendingFile.getLastBlock();
    assert !lastBlock.isComplete();
    // check new GS & length: this is not expected
    if (newBlock.getGenerationStamp() <= lastBlock.getGenerationStamp()) {
        final String msg = "Update " + oldBlock + " but the new block " + newBlock + " does not have a larger generation stamp than the last block " + lastBlock;
        LOG.warn(msg);
        throw new IOException(msg);
    }
    if (newBlock.getNumBytes() < lastBlock.getNumBytes()) {
        final String msg = "Update " + oldBlock + " (size=" + oldBlock.getNumBytes() + ") to a smaller size block " + newBlock + " (size=" + newBlock.getNumBytes() + ")";
        LOG.warn(msg);
        throw new IOException(msg);
    }
    // Update old block with the new generation stamp and new length
    lastBlock.setNumBytes(newBlock.getNumBytes());
    lastBlock.setGenerationStampAndVerifyReplicas(newBlock.getGenerationStamp());
    // find the DatanodeDescriptor objects
    final DatanodeStorageInfo[] storages = blockManager.getDatanodeManager().getDatanodeStorageInfos(newNodes, newStorageIDs, "src=%s, oldBlock=%s, newBlock=%s, clientName=%s", src, oldBlock, newBlock, clientName);
    lastBlock.getUnderConstructionFeature().setExpectedLocations(lastBlock, storages, lastBlock.getBlockType());
    FSDirWriteFileOp.persistBlocks(dir, src, pendingFile, logRetryCache);
}
Also used : DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) IOException(java.io.IOException)

Example 15 with DatanodeStorageInfo

use of org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo in project hadoop by apache.

the class TestReconstructStripedFile method testProcessErasureCodingTasksSubmitionShouldSucceed.

/*
   * Tests that processErasureCodingTasks should not throw exceptions out due to
   * invalid ECTask submission.
   */
@Test
public void testProcessErasureCodingTasksSubmitionShouldSucceed() throws Exception {
    DataNode dataNode = cluster.dataNodes.get(0).datanode;
    // Pack invalid(dummy) parameters in ecTasks. Irrespective of parameters, each task
    // thread pool submission should succeed, so that it will not prevent
    // processing other tasks in the list if any exceptions.
    int size = cluster.dataNodes.size();
    byte[] liveIndices = new byte[size];
    DatanodeInfo[] dataDNs = new DatanodeInfo[size + 1];
    DatanodeStorageInfo targetDnInfos_1 = BlockManagerTestUtil.newDatanodeStorageInfo(DFSTestUtil.getLocalDatanodeDescriptor(), new DatanodeStorage("s01"));
    DatanodeStorageInfo[] dnStorageInfo = new DatanodeStorageInfo[] { targetDnInfos_1 };
    BlockECReconstructionInfo invalidECInfo = new BlockECReconstructionInfo(new ExtendedBlock("bp-id", 123456), dataDNs, dnStorageInfo, liveIndices, StripedFileTestUtil.getDefaultECPolicy());
    List<BlockECReconstructionInfo> ecTasks = new ArrayList<>();
    ecTasks.add(invalidECInfo);
    dataNode.getErasureCodingWorker().processErasureCodingTasks(ecTasks);
}
Also used : BlockECReconstructionInfo(org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Aggregations

DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)25 Test (org.junit.Test)10 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 DatanodeDescriptor (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor)8 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)7 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)6 ArrayList (java.util.ArrayList)5 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)5 Configuration (org.apache.hadoop.conf.Configuration)4 Block (org.apache.hadoop.hdfs.protocol.Block)4 Node (org.apache.hadoop.net.Node)4 IOException (java.io.IOException)3 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 BlockECReconstructionInfo (org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo)3 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)3 FileNotFoundException (java.io.FileNotFoundException)2 HashSet (java.util.HashSet)2 StorageType (org.apache.hadoop.fs.StorageType)2