Search in sources :

Example 1 with BlockType

use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.

the class BlockManager method isPlacementPolicySatisfied.

boolean isPlacementPolicySatisfied(BlockInfo storedBlock) {
    List<DatanodeDescriptor> liveNodes = new ArrayList<>();
    Collection<DatanodeDescriptor> corruptNodes = corruptReplicas.getNodes(storedBlock);
    for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
        final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
        // rack policy point of view.
        if (!cur.isDecommissionInProgress() && !cur.isDecommissioned() && ((corruptNodes == null) || !corruptNodes.contains(cur))) {
            liveNodes.add(cur);
        }
    }
    DatanodeInfo[] locs = liveNodes.toArray(new DatanodeInfo[liveNodes.size()]);
    BlockType blockType = storedBlock.getBlockType();
    BlockPlacementPolicy placementPolicy = placementPolicies.getPolicy(blockType);
    int numReplicas = blockType == STRIPED ? ((BlockInfoStriped) storedBlock).getRealTotalBlockNum() : storedBlock.getReplication();
    return placementPolicy.verifyBlockPlacement(locs, numReplicas).isPlacementPolicySatisfied();
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) BlockType(org.apache.hadoop.hdfs.protocol.BlockType) ArrayList(java.util.ArrayList)

Example 2 with BlockType

use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.

the class FSDirWriteFileOp method addFileForEditLog.

static INodeFile addFileForEditLog(FSDirectory fsd, long id, INodesInPath existing, byte[] localName, PermissionStatus permissions, List<AclEntry> aclEntries, List<XAttr> xAttrs, short replication, long modificationTime, long atime, long preferredBlockSize, boolean underConstruction, String clientName, String clientMachine, byte storagePolicyId) {
    final INodeFile newNode;
    Preconditions.checkNotNull(existing);
    assert fsd.hasWriteLock();
    try {
        // check if the file has an EC policy
        boolean isStriped = false;
        ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
        if (ecPolicy != null) {
            isStriped = true;
        }
        final BlockType blockType = isStriped ? BlockType.STRIPED : BlockType.CONTIGUOUS;
        final Short replicationFactor = (!isStriped ? replication : null);
        final Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
        if (underConstruction) {
            newNode = newINodeFile(id, permissions, modificationTime, modificationTime, replicationFactor, ecPolicyID, preferredBlockSize, storagePolicyId, blockType);
            newNode.toUnderConstruction(clientName, clientMachine);
        } else {
            newNode = newINodeFile(id, permissions, modificationTime, atime, replicationFactor, ecPolicyID, preferredBlockSize, storagePolicyId, blockType);
        }
        newNode.setLocalName(localName);
        INodesInPath iip = fsd.addINode(existing, newNode, permissions.getPermission());
        if (iip != null) {
            if (aclEntries != null) {
                AclStorage.updateINodeAcl(newNode, aclEntries, CURRENT_STATE_ID);
            }
            if (xAttrs != null) {
                XAttrStorage.updateINodeXAttrs(newNode, xAttrs, CURRENT_STATE_ID);
            }
            return newNode;
        }
    } catch (IOException e) {
        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedAddFile: exception when add " + existing.getPath() + " to the file system", e);
        if (e instanceof FSLimitException.MaxDirectoryItemsExceededException) {
            NameNode.stateChangeLog.warn("Please increase " + "dfs.namenode.fs-limits.max-directory-items and make it " + "consistent across all NameNodes.");
        }
    }
    return null;
}
Also used : FSLimitException(org.apache.hadoop.hdfs.protocol.FSLimitException) BlockType(org.apache.hadoop.hdfs.protocol.BlockType) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) IOException(java.io.IOException)

Example 3 with BlockType

use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.

the class FSDirWriteFileOp method storeAllocatedBlock.

/**
   * Part II of getAdditionalBlock().
   * Should repeat the same analysis of the file state as in Part 1,
   * but under the write lock.
   * If the conditions still hold, then allocate a new block with
   * the new targets, add it to the INode and to the BlocksMap.
   */
static LocatedBlock storeAllocatedBlock(FSNamesystem fsn, String src, long fileId, String clientName, ExtendedBlock previous, DatanodeStorageInfo[] targets) throws IOException {
    long offset;
    // Run the full analysis again, since things could have changed
    // while chooseTarget() was executing.
    LocatedBlock[] onRetryBlock = new LocatedBlock[1];
    INodesInPath iip = fsn.dir.resolvePath(null, src, fileId);
    FileState fileState = analyzeFileState(fsn, iip, fileId, clientName, previous, onRetryBlock);
    final INodeFile pendingFile = fileState.inode;
    src = fileState.path;
    if (onRetryBlock[0] != null) {
        if (onRetryBlock[0].getLocations().length > 0) {
            // This is a retry. Just return the last block if having locations.
            return onRetryBlock[0];
        } else {
            // add new chosen targets to already allocated block and return
            BlockInfo lastBlockInFile = pendingFile.getLastBlock();
            lastBlockInFile.getUnderConstructionFeature().setExpectedLocations(lastBlockInFile, targets, pendingFile.getBlockType());
            offset = pendingFile.computeFileSize();
            return makeLocatedBlock(fsn, lastBlockInFile, targets, offset);
        }
    }
    // commit the last block and complete it if it has minimum replicas
    fsn.commitOrCompleteLastBlock(pendingFile, fileState.iip, ExtendedBlock.getLocalBlock(previous));
    // allocate new block, record block locations in INode.
    final BlockType blockType = pendingFile.getBlockType();
    // allocate new block, record block locations in INode.
    Block newBlock = fsn.createNewBlock(blockType);
    INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
    saveAllocatedBlock(fsn, src, inodesInPath, newBlock, targets, blockType);
    persistNewBlock(fsn, src, pendingFile);
    offset = pendingFile.computeFileSize();
    // Return located block
    return makeLocatedBlock(fsn, fsn.getStoredBlock(newBlock), targets, offset);
}
Also used : BlockType(org.apache.hadoop.hdfs.protocol.BlockType) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Example 4 with BlockType

use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.

the class FSNamesystem method getAdditionalDatanode.

/** @see ClientProtocol#getAdditionalDatanode */
LocatedBlock getAdditionalDatanode(String src, long fileId, final ExtendedBlock blk, final DatanodeInfo[] existings, final String[] storageIDs, final Set<Node> excludes, final int numAdditionalNodes, final String clientName) throws IOException {
    //check if the feature is enabled
    dtpReplaceDatanodeOnFailure.checkEnabled();
    Node clientnode = null;
    String clientMachine;
    final long preferredblocksize;
    final byte storagePolicyID;
    final List<DatanodeStorageInfo> chosen;
    final BlockType blockType;
    checkOperation(OperationCategory.READ);
    FSPermissionChecker pc = getPermissionChecker();
    readLock();
    try {
        checkOperation(OperationCategory.READ);
        //check safe mode
        checkNameNodeSafeMode("Cannot add datanode; src=" + src + ", blk=" + blk);
        final INodesInPath iip = dir.resolvePath(pc, src, fileId);
        src = iip.getPath();
        //check lease
        final INodeFile file = checkLease(iip, clientName, fileId);
        clientMachine = file.getFileUnderConstructionFeature().getClientMachine();
        clientnode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
        preferredblocksize = file.getPreferredBlockSize();
        storagePolicyID = file.getStoragePolicyID();
        blockType = file.getBlockType();
        //find datanode storages
        final DatanodeManager dm = blockManager.getDatanodeManager();
        chosen = Arrays.asList(dm.getDatanodeStorageInfos(existings, storageIDs, "src=%s, fileId=%d, blk=%s, clientName=%s, clientMachine=%s", src, fileId, blk, clientName, clientMachine));
    } finally {
        readUnlock("getAdditionalDatanode");
    }
    if (clientnode == null) {
        clientnode = FSDirWriteFileOp.getClientNode(blockManager, clientMachine);
    }
    // choose new datanodes.
    final DatanodeStorageInfo[] targets = blockManager.chooseTarget4AdditionalDatanode(src, numAdditionalNodes, clientnode, chosen, excludes, preferredblocksize, storagePolicyID, blockType);
    final LocatedBlock lb = BlockManager.newLocatedBlock(blk, targets, -1, false);
    blockManager.setBlockToken(lb, BlockTokenIdentifier.AccessMode.COPY);
    return lb;
}
Also used : Node(org.apache.hadoop.net.Node) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockType(org.apache.hadoop.hdfs.protocol.BlockType)

Example 5 with BlockType

use of org.apache.hadoop.hdfs.protocol.BlockType in project hadoop by apache.

the class TestFSImage method testBlockTypeProtoDefaultsToContiguous.

@Test
public void testBlockTypeProtoDefaultsToContiguous() throws Exception {
    INodeSection.INodeFile.Builder builder = INodeSection.INodeFile.newBuilder();
    INodeSection.INodeFile inodeFile = builder.build();
    BlockType defaultBlockType = PBHelperClient.convert(inodeFile.getBlockType());
    assertEquals(defaultBlockType, BlockType.CONTIGUOUS);
}
Also used : INodeSection(org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection) BlockType(org.apache.hadoop.hdfs.protocol.BlockType) Test(org.junit.Test)

Aggregations

BlockType (org.apache.hadoop.hdfs.protocol.BlockType)8 ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)3 IOException (java.io.IOException)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 Test (org.junit.Test)2 ArrayList (java.util.ArrayList)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)1 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)1 FSLimitException (org.apache.hadoop.hdfs.protocol.FSLimitException)1 BlockTypeProto (org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockTypeProto)1 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)1 DatanodeManager (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager)1 DatanodeStorageInfo (org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo)1 INodeSection (org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection)1 Node (org.apache.hadoop.net.Node)1