Search in sources :

Example 36 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestINodeFile method checkEquals.

private static void checkEquals(LocatedBlocks l1, LocatedBlocks l2) {
    List<LocatedBlock> list1 = l1.getLocatedBlocks();
    List<LocatedBlock> list2 = l2.getLocatedBlocks();
    assertEquals(list1.size(), list2.size());
    for (int i = 0; i < list1.size(); i++) {
        LocatedBlock b1 = list1.get(i);
        LocatedBlock b2 = list2.get(i);
        assertEquals(b1.getBlock(), b2.getBlock());
        assertEquals(b1.getBlockSize(), b2.getBlockSize());
    }
}
Also used : LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 37 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class JsonUtilClient method toLocatedBlock.

/** Convert a Json map to LocatedBlock. */
static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
    if (m == null) {
        return null;
    }
    final ExtendedBlock b = toExtendedBlock((Map<?, ?>) m.get("block"));
    final DatanodeInfo[] locations = toDatanodeInfoArray(getList(m, "locations"));
    final long startOffset = ((Number) m.get("startOffset")).longValue();
    final boolean isCorrupt = (Boolean) m.get("isCorrupt");
    final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(getList(m, "cachedLocations"));
    final StorageType[] storageTypes = toStorageTypeArray(getList(m, "storageTypes"));
    final LocatedBlock locatedblock = new LocatedBlock(b, locations, null, storageTypes, startOffset, isCorrupt, cachedLocations);
    locatedblock.setBlockToken(toBlockToken((Map<?, ?>) m.get("blockToken")));
    return locatedblock;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Map(java.util.Map)

Example 38 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class PBHelperClient method convertLocatedBlockProto.

public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
    if (proto == null)
        return null;
    List<DatanodeInfoProto> locs = proto.getLocsList();
    DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
    for (int i = 0; i < locs.size(); i++) {
        targets[i] = convert(locs.get(i));
    }
    final StorageType[] storageTypes = convertStorageTypes(proto.getStorageTypesList(), locs.size());
    final int storageIDsCount = proto.getStorageIDsCount();
    final String[] storageIDs;
    if (storageIDsCount == 0) {
        storageIDs = null;
    } else {
        Preconditions.checkState(storageIDsCount == locs.size());
        storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
    }
    byte[] indices = null;
    if (proto.hasBlockIndices()) {
        indices = proto.getBlockIndices().toByteArray();
    }
    // Set values from the isCached list, re-using references from loc
    List<DatanodeInfo> cachedLocs = new ArrayList<>(locs.size());
    List<Boolean> isCachedList = proto.getIsCachedList();
    for (int i = 0; i < isCachedList.size(); i++) {
        if (isCachedList.get(i)) {
            cachedLocs.add(targets[i]);
        }
    }
    final LocatedBlock lb;
    if (indices == null) {
        lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
    } else {
        lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets, storageIDs, storageTypes, indices, proto.getOffset(), proto.getCorrupt(), cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
        List<TokenProto> tokenProtos = proto.getBlockTokensList();
        Token<BlockTokenIdentifier>[] blockTokens = convertTokens(tokenProtos);
        ((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
    }
    lb.setBlockToken(convert(proto.getBlockToken()));
    return lb;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) TokenProto(org.apache.hadoop.security.proto.SecurityProtos.TokenProto) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Token(org.apache.hadoop.security.token.Token) ByteString(com.google.protobuf.ByteString) DatanodeInfoProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)

Example 39 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class PBHelperClient method convertLocatedBlock.

public static List<LocatedBlock> convertLocatedBlock(List<LocatedBlockProto> lb) {
    if (lb == null)
        return null;
    final int len = lb.size();
    List<LocatedBlock> result = new ArrayList<>(len);
    for (LocatedBlockProto aLb : lb) {
        result.add(convertLocatedBlockProto(aLb));
    }
    return result;
}
Also used : LocatedBlockProto(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 40 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class FSDirAppendOp method appendFile.

/**
   * Append to an existing file.
   * <p>
   *
   * The method returns the last block of the file if this is a partial block,
   * which can still be used for writing more data. The client uses the
   * returned block locations to form the data pipeline for this block.<br>
   * The {@link LocatedBlock} will be null if the last block is full.
   * The client then allocates a new block with the next call using
   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#addBlock}.
   * <p>
   *
   * For description of parameters and exceptions thrown see
   * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#append}
   *
   * @param fsn namespace
   * @param srcArg path name
   * @param pc permission checker to check fs permission
   * @param holder client name
   * @param clientMachine client machine info
   * @param newBlock if the data is appended to a new block
   * @param logRetryCache whether to record RPC ids in editlog for retry cache
   *                      rebuilding
   * @return the last block with status
   */
static LastBlockWithStatus appendFile(final FSNamesystem fsn, final String srcArg, final FSPermissionChecker pc, final String holder, final String clientMachine, final boolean newBlock, final boolean logRetryCache) throws IOException {
    assert fsn.hasWriteLock();
    final LocatedBlock lb;
    final FSDirectory fsd = fsn.getFSDirectory();
    final INodesInPath iip;
    fsd.writeLock();
    try {
        iip = fsd.resolvePath(pc, srcArg, DirOp.WRITE);
        // Verify that the destination does not exist as a directory already
        final INode inode = iip.getLastINode();
        final String path = iip.getPath();
        if (inode != null && inode.isDirectory()) {
            throw new FileAlreadyExistsException("Cannot append to directory " + path + "; already exists as a directory.");
        }
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.WRITE);
        }
        if (inode == null) {
            throw new FileNotFoundException("Failed to append to non-existent file " + path + " for client " + clientMachine);
        }
        final INodeFile file = INodeFile.valueOf(inode, path, true);
        // not support appending file with striped blocks
        if (file.isStriped()) {
            throw new UnsupportedOperationException("Cannot append to files with striped block " + path);
        }
        BlockManager blockManager = fsd.getBlockManager();
        final BlockStoragePolicy lpPolicy = blockManager.getStoragePolicy("LAZY_PERSIST");
        if (lpPolicy != null && lpPolicy.getId() == file.getStoragePolicyID()) {
            throw new UnsupportedOperationException("Cannot append to lazy persist file " + path);
        }
        // Opening an existing file for append - may need to recover lease.
        fsn.recoverLeaseInternal(RecoverLeaseOp.APPEND_FILE, iip, path, holder, clientMachine, false);
        final BlockInfo lastBlock = file.getLastBlock();
        // Check that the block has at least minimum replication.
        if (lastBlock != null) {
            if (lastBlock.getBlockUCState() == BlockUCState.COMMITTED) {
                throw new RetriableException(new NotReplicatedYetException("append: lastBlock=" + lastBlock + " of src=" + path + " is COMMITTED but not yet COMPLETE."));
            } else if (lastBlock.isComplete() && !blockManager.isSufficientlyReplicated(lastBlock)) {
                throw new IOException("append: lastBlock=" + lastBlock + " of src=" + path + " is not sufficiently replicated yet.");
            }
        }
        lb = prepareFileForAppend(fsn, iip, holder, clientMachine, newBlock, true, logRetryCache);
    } catch (IOException ie) {
        NameNode.stateChangeLog.warn("DIR* NameSystem.append: " + ie.getMessage());
        throw ie;
    } finally {
        fsd.writeUnlock();
    }
    HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
    if (lb != null) {
        NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: file {} for {} at {} block {} block" + " size {}", srcArg, holder, clientMachine, lb.getBlock(), lb.getBlock().getNumBytes());
    }
    return new LastBlockWithStatus(lb, stat);
}
Also used : FileAlreadyExistsException(org.apache.hadoop.fs.FileAlreadyExistsException) LastBlockWithStatus(org.apache.hadoop.hdfs.protocol.LastBlockWithStatus) FileNotFoundException(java.io.FileNotFoundException) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) RetriableException(org.apache.hadoop.ipc.RetriableException)

Aggregations

LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)196 Test (org.junit.Test)92 Path (org.apache.hadoop.fs.Path)86 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)72 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)49 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)49 Configuration (org.apache.hadoop.conf.Configuration)40 IOException (java.io.IOException)34 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)33 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)25 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)25 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)24 StorageType (org.apache.hadoop.fs.StorageType)23 ArrayList (java.util.ArrayList)22 Block (org.apache.hadoop.hdfs.protocol.Block)16 FileSystem (org.apache.hadoop.fs.FileSystem)15 InetSocketAddress (java.net.InetSocketAddress)11 File (java.io.File)9