Search in sources :

Example 61 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class JsonUtilClient method toLocatedBlocks.

/** Convert a Json map to LocatedBlock. */
static LocatedBlocks toLocatedBlocks(final Map<?, ?> json) throws IOException {
    if (json == null) {
        return null;
    }
    final Map<?, ?> m = (Map<?, ?>) json.get(LocatedBlocks.class.getSimpleName());
    final long fileLength = ((Number) m.get("fileLength")).longValue();
    final boolean isUnderConstruction = (Boolean) m.get("isUnderConstruction");
    final List<LocatedBlock> locatedBlocks = toLocatedBlockList(getList(m, "locatedBlocks"));
    final LocatedBlock lastLocatedBlock = toLocatedBlock((Map<?, ?>) m.get("lastLocatedBlock"));
    final boolean isLastBlockComplete = (Boolean) m.get("isLastBlockComplete");
    return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks, lastLocatedBlock, isLastBlockComplete, null, null);
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Map(java.util.Map)

Example 62 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class FSDirStatAndListingOp method createFileStatus.

/**
   * create a hdfs file status from an iip.
   *
   * @param fsd FSDirectory
   * @param iip The INodesInPath containing the INodeFile and its ancestors.
   * @param child for a directory listing of the iip, else null
   * @param storagePolicy for the path or closest ancestor
   * @param needLocation if block locations need to be included or not
   * @param includeStoragePolicy if storage policy should be returned
   * @return a file status
   * @throws java.io.IOException if any error occurs
   */
private static HdfsFileStatus createFileStatus(FSDirectory fsd, INodesInPath iip, INode child, byte storagePolicy, boolean needLocation) throws IOException {
    assert fsd.hasReadLock();
    // only directory listing sets the status name.
    byte[] name = HdfsFileStatus.EMPTY_NAME;
    if (child != null) {
        name = child.getLocalNameBytes();
        // have to do this for EC and EZ lookups...
        iip = INodesInPath.append(iip, child, name);
    }
    // length is zero for directories
    long size = 0;
    short replication = 0;
    long blocksize = 0;
    final INode node = iip.getLastINode();
    final int snapshot = iip.getPathSnapshotId();
    LocatedBlocks loc = null;
    final boolean isEncrypted = FSDirEncryptionZoneOp.isInAnEZ(fsd, iip);
    FileEncryptionInfo feInfo = null;
    final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
    if (node.isFile()) {
        final INodeFile fileNode = node.asFile();
        size = fileNode.computeFileSize(snapshot);
        replication = fileNode.getFileReplication(snapshot);
        blocksize = fileNode.getPreferredBlockSize();
        if (isEncrypted) {
            feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
        }
        if (needLocation) {
            final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID;
            final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
            final long fileSize = !inSnapshot && isUc ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
            loc = fsd.getBlockManager().createLocatedBlocks(fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false, inSnapshot, feInfo, ecPolicy);
            if (loc == null) {
                loc = new LocatedBlocks();
            }
        }
    }
    int childrenNum = node.isDirectory() ? node.asDirectory().getChildrenNum(snapshot) : 0;
    INodeAttributes nodeAttrs = fsd.getAttributes(iip);
    return createFileStatus(size, node.isDirectory(), replication, blocksize, node.getModificationTime(snapshot), node.getAccessTime(snapshot), getPermissionForFileStatus(nodeAttrs, isEncrypted), nodeAttrs.getUserName(), nodeAttrs.getGroupName(), node.isSymlink() ? node.asSymlink().getSymlink() : null, name, node.getId(), childrenNum, feInfo, storagePolicy, ecPolicy, loc);
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo)

Example 63 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class FSDirStatAndListingOp method getBlockLocations.

/**
   * Get block locations within the specified range.
   * @see ClientProtocol#getBlockLocations(String, long, long)
   * @throws IOException
   */
static GetBlockLocationsResult getBlockLocations(FSDirectory fsd, FSPermissionChecker pc, String src, long offset, long length, boolean needBlockToken) throws IOException {
    Preconditions.checkArgument(offset >= 0, "Negative offset is not supported. File: " + src);
    Preconditions.checkArgument(length >= 0, "Negative length is not supported. File: " + src);
    BlockManager bm = fsd.getBlockManager();
    fsd.readLock();
    try {
        final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
        src = iip.getPath();
        final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.READ);
            fsd.checkUnreadableBySuperuser(pc, iip);
        }
        final long fileSize = iip.isSnapshot() ? inode.computeFileSize(iip.getPathSnapshotId()) : inode.computeFileSizeNotIncludingLastUcBlock();
        boolean isUc = inode.isUnderConstruction();
        if (iip.isSnapshot()) {
            // if src indicates a snapshot file, we need to make sure the returned
            // blocks do not exceed the size of the snapshot file.
            length = Math.min(length, fileSize - offset);
            isUc = false;
        }
        final FileEncryptionInfo feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
        final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
        final LocatedBlocks blocks = bm.createLocatedBlocks(inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
        final long now = now();
        boolean updateAccessTime = fsd.isAccessTimeSupported() && !iip.isSnapshot() && now > inode.getAccessTime() + fsd.getAccessTimePrecision();
        return new GetBlockLocationsResult(updateAccessTime, blocks);
    } finally {
        fsd.readUnlock();
    }
}
Also used : BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) ErasureCodingPolicy(org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo)

Example 64 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class NamenodeFsck method check.

@VisibleForTesting
void check(String parent, HdfsFileStatus file, Result replRes, Result ecRes) throws IOException {
    String path = file.getFullName(parent);
    if (file.isDir()) {
        checkDir(path, replRes, ecRes);
        return;
    }
    if (file.isSymlink()) {
        if (showFiles) {
            out.println(path + " <symlink>");
        }
        totalSymlinks++;
        return;
    }
    LocatedBlocks blocks = getBlockLocations(path, file);
    if (blocks == null) {
        // the file is deleted
        return;
    }
    final Result r = file.getErasureCodingPolicy() != null ? ecRes : replRes;
    collectFileSummary(path, file, r, blocks);
    if (showprogress && (replRes.totalFiles + ecRes.totalFiles) % 100 == 0) {
        out.println();
        out.flush();
    }
    collectBlocksSummary(parent, file, r, blocks);
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 65 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class DFSTestUtil method runOperations.

/**
   * Run a set of operations and generate all edit logs
   */
public static void runOperations(MiniDFSCluster cluster, DistributedFileSystem filesystem, Configuration conf, long blockSize, int nnIndex) throws IOException {
    // create FileContext for rename2
    FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
    // OP_ADD 0
    final Path pathFileCreate = new Path("/file_create");
    FSDataOutputStream s = filesystem.create(pathFileCreate);
    // OP_CLOSE 9
    s.close();
    // OP_APPEND 47
    FSDataOutputStream s2 = filesystem.append(pathFileCreate, 4096, null);
    s2.close();
    // OP_UPDATE_BLOCKS 25
    final String updateBlockFile = "/update_blocks";
    FSDataOutputStream fout = filesystem.create(new Path(updateBlockFile), true, 4096, (short) 1, 4096L);
    fout.write(1);
    fout.hflush();
    long fileId = ((DFSOutputStream) fout.getWrappedStream()).getFileId();
    DFSClient dfsclient = DFSClientAdapter.getDFSClient(filesystem);
    LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(updateBlockFile, 0, Integer.MAX_VALUE);
    dfsclient.getNamenode().abandonBlock(blocks.get(0).getBlock(), fileId, updateBlockFile, dfsclient.clientName);
    fout.close();
    // OP_SET_STORAGE_POLICY 45
    filesystem.setStoragePolicy(pathFileCreate, HdfsConstants.HOT_STORAGE_POLICY_NAME);
    // OP_RENAME_OLD 1
    final Path pathFileMoved = new Path("/file_moved");
    filesystem.rename(pathFileCreate, pathFileMoved);
    // OP_DELETE 2
    filesystem.delete(pathFileMoved, false);
    // OP_MKDIR 3
    Path pathDirectoryMkdir = new Path("/directory_mkdir");
    filesystem.mkdirs(pathDirectoryMkdir);
    // OP_ALLOW_SNAPSHOT 29
    filesystem.allowSnapshot(pathDirectoryMkdir);
    // OP_DISALLOW_SNAPSHOT 30
    filesystem.disallowSnapshot(pathDirectoryMkdir);
    // OP_CREATE_SNAPSHOT 26
    String ssName = "snapshot1";
    filesystem.allowSnapshot(pathDirectoryMkdir);
    filesystem.createSnapshot(pathDirectoryMkdir, ssName);
    // OP_RENAME_SNAPSHOT 28
    String ssNewName = "snapshot2";
    filesystem.renameSnapshot(pathDirectoryMkdir, ssName, ssNewName);
    // OP_DELETE_SNAPSHOT 27
    filesystem.deleteSnapshot(pathDirectoryMkdir, ssNewName);
    // OP_SET_REPLICATION 4
    s = filesystem.create(pathFileCreate);
    s.close();
    filesystem.setReplication(pathFileCreate, (short) 1);
    // OP_SET_PERMISSIONS 7
    Short permission = 0777;
    filesystem.setPermission(pathFileCreate, new FsPermission(permission));
    // OP_SET_OWNER 8
    filesystem.setOwner(pathFileCreate, new String("newOwner"), null);
    // OP_CLOSE 9 see above
    // OP_SET_GENSTAMP 10 see above
    // OP_SET_NS_QUOTA 11 obsolete
    // OP_CLEAR_NS_QUOTA 12 obsolete
    // OP_TIMES 13
    // Wed, 22 Sep 2010 22:45:27 GMT
    long mtime = 1285195527000L;
    long atime = mtime;
    filesystem.setTimes(pathFileCreate, mtime, atime);
    // OP_SET_QUOTA 14
    filesystem.setQuota(pathDirectoryMkdir, 1000L, HdfsConstants.QUOTA_DONT_SET);
    // OP_SET_QUOTA_BY_STORAGETYPE
    filesystem.setQuotaByStorageType(pathDirectoryMkdir, StorageType.SSD, 888L);
    // OP_RENAME 15
    fc.rename(pathFileCreate, pathFileMoved, Rename.NONE);
    // OP_CONCAT_DELETE 16
    Path pathConcatTarget = new Path("/file_concat_target");
    Path[] pathConcatFiles = new Path[2];
    pathConcatFiles[0] = new Path("/file_concat_0");
    pathConcatFiles[1] = new Path("/file_concat_1");
    // multiple of blocksize for concat
    long length = blockSize * 3;
    short replication = 1;
    long seed = 1;
    DFSTestUtil.createFile(filesystem, pathConcatTarget, length, replication, seed);
    DFSTestUtil.createFile(filesystem, pathConcatFiles[0], length, replication, seed);
    DFSTestUtil.createFile(filesystem, pathConcatFiles[1], length, replication, seed);
    filesystem.concat(pathConcatTarget, pathConcatFiles);
    // OP_TRUNCATE 46
    length = blockSize * 2;
    DFSTestUtil.createFile(filesystem, pathFileCreate, length, replication, seed);
    filesystem.truncate(pathFileCreate, blockSize);
    // OP_SYMLINK 17
    Path pathSymlink = new Path("/file_symlink");
    fc.createSymlink(pathConcatTarget, pathSymlink, false);
    // OP_REASSIGN_LEASE 22
    String filePath = "/hard-lease-recovery-test";
    byte[] bytes = "foo-bar-baz".getBytes();
    DFSClientAdapter.stopLeaseRenewer(filesystem);
    FSDataOutputStream leaseRecoveryPath = filesystem.create(new Path(filePath));
    leaseRecoveryPath.write(bytes);
    leaseRecoveryPath.hflush();
    // Set the hard lease timeout to 1 second.
    cluster.setLeasePeriod(60 * 1000, 1000, nnIndex);
    // wait for lease recovery to complete
    LocatedBlocks locatedBlocks;
    do {
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
        }
        locatedBlocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(nnIndex), filePath, 0L, bytes.length);
    } while (locatedBlocks.isUnderConstruction());
    // OP_ADD_CACHE_POOL
    filesystem.addCachePool(new CachePoolInfo("pool1"));
    // OP_MODIFY_CACHE_POOL
    filesystem.modifyCachePool(new CachePoolInfo("pool1").setLimit(99l));
    // OP_ADD_PATH_BASED_CACHE_DIRECTIVE
    long id = filesystem.addCacheDirective(new CacheDirectiveInfo.Builder().setPath(new Path("/path")).setReplication((short) 1).setPool("pool1").build(), EnumSet.of(CacheFlag.FORCE));
    // OP_MODIFY_PATH_BASED_CACHE_DIRECTIVE
    filesystem.modifyCacheDirective(new CacheDirectiveInfo.Builder().setId(id).setReplication((short) 2).build(), EnumSet.of(CacheFlag.FORCE));
    // OP_REMOVE_PATH_BASED_CACHE_DIRECTIVE
    filesystem.removeCacheDirective(id);
    // OP_REMOVE_CACHE_POOL
    filesystem.removeCachePool("pool1");
    // OP_SET_ACL
    List<AclEntry> aclEntryList = Lists.newArrayList();
    aclEntryList.add(new AclEntry.Builder().setPermission(FsAction.READ_WRITE).setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).build());
    aclEntryList.add(new AclEntry.Builder().setName("user").setPermission(FsAction.READ_WRITE).setScope(AclEntryScope.ACCESS).setType(AclEntryType.USER).build());
    aclEntryList.add(new AclEntry.Builder().setPermission(FsAction.WRITE).setScope(AclEntryScope.ACCESS).setType(AclEntryType.GROUP).build());
    aclEntryList.add(new AclEntry.Builder().setPermission(FsAction.NONE).setScope(AclEntryScope.ACCESS).setType(AclEntryType.OTHER).build());
    filesystem.setAcl(pathConcatTarget, aclEntryList);
    // OP_SET_XATTR
    filesystem.setXAttr(pathConcatTarget, "user.a1", new byte[] { 0x31, 0x32, 0x33 });
    filesystem.setXAttr(pathConcatTarget, "user.a2", new byte[] { 0x37, 0x38, 0x39 });
    // OP_REMOVE_XATTR
    filesystem.removeXAttr(pathConcatTarget, "user.a2");
}
Also used : Path(org.apache.hadoop.fs.Path) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) AclEntry(org.apache.hadoop.fs.permission.AclEntry) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FsPermission(org.apache.hadoop.fs.permission.FsPermission) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) FileContext(org.apache.hadoop.fs.FileContext)

Aggregations

LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)118 Test (org.junit.Test)67 Path (org.apache.hadoop.fs.Path)65 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)52 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)33 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)32 Configuration (org.apache.hadoop.conf.Configuration)29 IOException (java.io.IOException)20 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)20 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)20 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)18 FileSystem (org.apache.hadoop.fs.FileSystem)17 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)17 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)13 Block (org.apache.hadoop.hdfs.protocol.Block)11 InetSocketAddress (java.net.InetSocketAddress)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)9 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7