Search in sources :

Example 31 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class FSDirWriteFileOp method addFile.

/**
   * Add the given filename to the fs.
   * @return the new INodesInPath instance that contains the new INode
   */
private static INodesInPath addFile(FSDirectory fsd, INodesInPath existing, byte[] localName, PermissionStatus permissions, short replication, long preferredBlockSize, String clientName, String clientMachine) throws IOException {
    Preconditions.checkNotNull(existing);
    long modTime = now();
    INodesInPath newiip;
    fsd.writeLock();
    try {
        boolean isStriped = false;
        ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
        if (ecPolicy != null) {
            isStriped = true;
        }
        final BlockType blockType = isStriped ? BlockType.STRIPED : BlockType.CONTIGUOUS;
        final Short replicationFactor = (!isStriped ? replication : null);
        final Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
        INodeFile newNode = newINodeFile(fsd.allocateNewInodeId(), permissions, modTime, modTime, replicationFactor, ecPolicyID, preferredBlockSize, blockType);
        newNode.setLocalName(localName);
        newNode.toUnderConstruction(clientName, clientMachine);
        newiip = fsd.addINode(existing, newNode, permissions.getPermission());
    } finally {
        fsd.writeUnlock();
    }
    if (newiip == null) {
        NameNode.stateChangeLog.info("DIR* addFile: failed to add " + existing.getPath() + "/" + DFSUtil.bytes2String(localName));
        return null;
    }
    if (NameNode.stateChangeLog.isDebugEnabled()) {
        NameNode.stateChangeLog.debug("DIR* addFile: " + DFSUtil.bytes2String(localName) + " is added");
    }
    return newiip;
}
Also used : BlockType(org.apache.hadoop.hdfs.protocol.BlockType) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)

Example 32 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class FSDirWriteFileOp method addBlock.

/**
   * Add a block to the file. Returns a reference to the added block.
   */
private static BlockInfo addBlock(FSDirectory fsd, String path, INodesInPath inodesInPath, Block block, DatanodeStorageInfo[] targets, BlockType blockType) throws IOException {
    fsd.writeLock();
    try {
        final INodeFile fileINode = inodesInPath.getLastINode().asFile();
        Preconditions.checkState(fileINode.isUnderConstruction());
        // associate new last block for the file
        final BlockInfo blockInfo;
        if (blockType == BlockType.STRIPED) {
            ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), inodesInPath);
            short numDataUnits = (short) ecPolicy.getNumDataUnits();
            short numParityUnits = (short) ecPolicy.getNumParityUnits();
            short numLocations = (short) (numDataUnits + numParityUnits);
            // check quota limits and updated space consumed
            fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), numLocations, true);
            blockInfo = new BlockInfoStriped(block, ecPolicy);
            blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
        } else {
            // check quota limits and updated space consumed
            fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), fileINode.getFileReplication(), true);
            short numLocations = fileINode.getFileReplication();
            blockInfo = new BlockInfoContiguous(block, numLocations);
            blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
        }
        fsd.getBlockManager().addBlockCollection(blockInfo, fileINode);
        fileINode.addBlock(blockInfo);
        if (NameNode.stateChangeLog.isDebugEnabled()) {
            NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: " + path + " with " + block + " block is added to the in-memory " + "file system");
        }
        return blockInfo;
    } finally {
        fsd.writeUnlock();
    }
}
Also used : BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)

Example 33 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class FSDirWriteFileOp method validateAddBlock.

/**
   * Part I of getAdditionalBlock().
   * Analyze the state of the file under read lock to determine if the client
   * can add a new block, detect potential retries, lease mismatches,
   * and minimal replication of the penultimate block.
   *
   * Generate target DataNode locations for the new block,
   * but do not create the new block yet.
   */
static ValidateAddBlockResult validateAddBlock(FSNamesystem fsn, FSPermissionChecker pc, String src, long fileId, String clientName, ExtendedBlock previous, LocatedBlock[] onRetryBlock) throws IOException {
    final long blockSize;
    final short numTargets;
    final byte storagePolicyID;
    String clientMachine;
    final BlockType blockType;
    INodesInPath iip = fsn.dir.resolvePath(pc, src, fileId);
    FileState fileState = analyzeFileState(fsn, iip, fileId, clientName, previous, onRetryBlock);
    if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
        // Use the last block if it has locations.
        return null;
    }
    final INodeFile pendingFile = fileState.inode;
    if (!fsn.checkFileProgress(src, pendingFile, false)) {
        throw new NotReplicatedYetException("Not replicated yet: " + src);
    }
    if (pendingFile.getBlocks().length >= fsn.maxBlocksPerFile) {
        throw new IOException("File has reached the limit on maximum number of" + " blocks (" + DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY + "): " + pendingFile.getBlocks().length + " >= " + fsn.maxBlocksPerFile);
    }
    blockSize = pendingFile.getPreferredBlockSize();
    clientMachine = pendingFile.getFileUnderConstructionFeature().getClientMachine();
    blockType = pendingFile.getBlockType();
    ErasureCodingPolicy ecPolicy = null;
    if (blockType == BlockType.STRIPED) {
        ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsn, iip);
        numTargets = (short) (ecPolicy.getSchema().getNumDataUnits() + ecPolicy.getSchema().getNumParityUnits());
    } else {
        numTargets = pendingFile.getFileReplication();
    }
    storagePolicyID = pendingFile.getStoragePolicyID();
    return new ValidateAddBlockResult(blockSize, numTargets, storagePolicyID, clientMachine, blockType);
}
Also used : BlockType(org.apache.hadoop.hdfs.protocol.BlockType) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) IOException(java.io.IOException)

Example 34 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class FSDirStatAndListingOp method createFileStatus.

/**
   * create a hdfs file status from an iip.
   *
   * @param fsd FSDirectory
   * @param iip The INodesInPath containing the INodeFile and its ancestors.
   * @param child for a directory listing of the iip, else null
   * @param storagePolicy for the path or closest ancestor
   * @param needLocation if block locations need to be included or not
   * @param includeStoragePolicy if storage policy should be returned
   * @return a file status
   * @throws java.io.IOException if any error occurs
   */
private static HdfsFileStatus createFileStatus(FSDirectory fsd, INodesInPath iip, INode child, byte storagePolicy, boolean needLocation) throws IOException {
    assert fsd.hasReadLock();
    // only directory listing sets the status name.
    byte[] name = HdfsFileStatus.EMPTY_NAME;
    if (child != null) {
        name = child.getLocalNameBytes();
        // have to do this for EC and EZ lookups...
        iip = INodesInPath.append(iip, child, name);
    }
    // length is zero for directories
    long size = 0;
    short replication = 0;
    long blocksize = 0;
    final INode node = iip.getLastINode();
    final int snapshot = iip.getPathSnapshotId();
    LocatedBlocks loc = null;
    final boolean isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
    FileEncryptionInfo feInfo = null;
    final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
    if (node.isFile()) {
        final INodeFile fileNode = node.asFile();
        size = fileNode.computeFileSize(snapshot);
        replication = fileNode.getFileReplication(snapshot);
        blocksize = fileNode.getPreferredBlockSize();
        if (isEncrypted) {
            feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
        }
        if (needLocation) {
            final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
            final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
            final long fileSize = !inSnapshot && isUc ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
            loc = fsd.getBlockManager().createLocatedBlocks(fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo, ecPolicy);
            if (loc == null) {
                loc = new LocatedBlocks();
            }
        }
    }
    int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0;
    INodeAttributes nodeAttrs = fsd.getAttributes(iip);
    return createFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), node.getAccessTime(snapshot), getPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.getUserName(), nodeAttrs.getGroupName(), node.isSymlink() ? node.asSymlink().getSymlink() : null, name, node.getId(), childrenNum, feInfo, storagePolicy, ecPolicy, loc);
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo)

Example 35 with ErasureCodingPolicy

use of org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy in project hadoop by apache.

the class FSDirStatAndListingOp method getBlockLocations.

/**
   * Get block locations within the specified range.
   * @see ClientProtocol#getBlockLocations(String, long, long)
   * @throws IOException
   */
static GetBlockLocationsResult getBlockLocations(FSDirectory fsd, FSPermissionChecker pc, String src, long offset, long length, boolean needBlockToken) throws IOException {
    Preconditions.checkArgument(offset >= 0, "Negative offset is not supported. File: " + src);
    Preconditions.checkArgument(length >= 0, "Negative length is not supported. File: " + src);
    BlockManager bm = fsd.getBlockManager();
    fsd.readLock();
    try {
        final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
        src = iip.getPath();
        final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.READ);
            fsd.checkUnreadableBySuperuser(pc, iip);
        }
        final long fileSize = iip.isSnapshot() ? inode.computeFileSize(iip.getPathSnapshotId()) : inode.computeFileSizeNotIncludingLastUcBlock();
        boolean isUc = inode.isUnderConstruction();
        if (iip.isSnapshot()) {
            // if src indicates a snapshot file, we need to make sure the returned
            // blocks do not exceed the size of the snapshot file.
            length = Math.min(length, fileSize - offset);
            isUc = false;
        }
        final FileEncryptionInfo feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
        final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
        final LocatedBlocks blocks = bm.createLocatedBlocks(inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
        final long now = now();
        boolean updateAccessTime = fsd.isAccessTimeSupported() && !iip.isSnapshot() && now > inode.getAccessTime() + fsd.getAccessTimePrecision();
        return new GetBlockLocationsResult(updateAccessTime, blocks);
    } finally {
        fsd.readUnlock();
    }
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo)

Aggregations

ErasureCodingPolicy (org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy)46 Path (org.apache.hadoop.fs.Path)18 Test (org.junit.Test)16 IOException (java.io.IOException)9 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)5 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)4 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)4 ServiceException (com.google.protobuf.ServiceException)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 BlockType (org.apache.hadoop.hdfs.protocol.BlockType)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)3 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)3 ActionException (org.smartdata.action.ActionException)3 ByteString (com.google.protobuf.ByteString)2 HashSet (java.util.HashSet)2 List (java.util.List)2 Random (java.util.Random)2