Search in sources :

Example 91 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSDirectory method createReservedStatuses.

/**
   * Create HdfsFileStatuses of the reserved paths: .inodes and raw.
   * These statuses are solely for listing purpose. All other operations
   * on the reserved dirs are disallowed.
   * Operations on sub directories are resolved by
   * {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)}
   * and conducted directly, without the need to check the reserved dirs.
   *
   * This method should only be invoked once during namenode initialization.
   *
   * @param cTime CTime of the file system
   * @return Array of HdfsFileStatus
   */
void createReservedStatuses(long cTime) {
    HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime, new FsPermission((short) 0770), null, supergroup, null, DOT_INODES, -1L, 0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
    HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime, new FsPermission((short) 0770), null, supergroup, null, RAW, -1L, 0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
    reservedStatuses = new HdfsFileStatus[] { inodes, raw };
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Example 92 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSDirRenameOp method createRenameResult.

private static RenameResult createRenameResult(FSDirectory fsd, INodesInPath dst, boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) throws IOException {
    boolean success = (dst != null);
    HdfsFileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null;
    return new RenameResult(success, auditStat, filesDeleted, collectedBlocks);
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus)

Example 93 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSDirStatAndListingOp method getSnapshotsListing.

/**
   * Get a listing of all the snapshots of a snapshottable directory
   */
private static DirectoryListing getSnapshotsListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter) throws IOException {
    Preconditions.checkState(fsd.hasReadLock());
    Preconditions.checkArgument(iip.isDotSnapshotDir(), "%s does not end with %s", iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
    // drop off the null .snapshot component
    iip = iip.getParentINodesInPath();
    final String dirPath = iip.getPath();
    final INode node = iip.getLastINode();
    final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
    final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
    if (sf == null) {
        throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath);
    }
    final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
    int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
    skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
    int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
    final HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
    for (int i = 0; i < numOfListing; i++) {
        Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
        listing[i] = createFileStatus(fsd, iip, sRoot, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
    }
    return new DirectoryListing(listing, snapshots.size() - skipSize - numOfListing);
}
Also used : DirectoryListing(org.apache.hadoop.hdfs.protocol.DirectoryListing) Snapshot(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) DirectorySnapshottableFeature(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature) SnapshotException(org.apache.hadoop.hdfs.protocol.SnapshotException)

Example 94 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSDirEncryptionZoneOp method getEZForPath.

/**
   * Get the encryption zone for the specified path.
   *
   * @param fsd fsdirectory
   * @param srcArg the path of a file or directory to get the EZ for
   * @param pc permission checker to check fs permission
   * @return the EZ with file status.
   */
static Map.Entry<EncryptionZone, HdfsFileStatus> getEZForPath(final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc) throws IOException {
    final INodesInPath iip;
    final EncryptionZone ret;
    fsd.readLock();
    try {
        iip = fsd.resolvePath(pc, srcArg, DirOp.READ);
        if (fsd.isPermissionEnabled()) {
            fsd.checkPathAccess(pc, iip, FsAction.READ);
        }
        ret = fsd.ezManager.getEZINodeForPath(iip);
    } finally {
        fsd.readUnlock();
    }
    HdfsFileStatus auditStat = fsd.getAuditFileInfo(iip);
    return new AbstractMap.SimpleImmutableEntry<>(ret, auditStat);
}
Also used : EncryptionZone(org.apache.hadoop.hdfs.protocol.EncryptionZone) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus)

Example 95 with HdfsFileStatus

use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.

the class FSNamesystem method logAuditEvent.

private void logAuditEvent(boolean succeeded, UserGroupInformation ugi, InetAddress addr, String cmd, String src, String dst, HdfsFileStatus stat) {
    FileStatus status = null;
    if (stat != null) {
        Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
        Path path = dst != null ? new Path(dst) : new Path(src);
        status = new FileStatus(stat.getLen(), stat.isDir(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getGroup(), symlink, path);
    }
    final String ugiStr = ugi.toString();
    for (AuditLogger logger : auditLoggers) {
        if (logger instanceof HdfsAuditLogger) {
            HdfsAuditLogger hdfsLogger = (HdfsAuditLogger) logger;
            hdfsLogger.logAuditEvent(succeeded, ugiStr, addr, cmd, src, dst, status, CallerContext.getCurrent(), ugi, dtSecretManager);
        } else {
            logger.logAuditEvent(succeeded, ugiStr, addr, cmd, src, dst, status);
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) TopAuditLogger(org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger)

Aggregations

HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)124 Test (org.junit.Test)51 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)34 IOException (java.io.IOException)28 InetSocketAddress (java.net.InetSocketAddress)28 XDR (org.apache.hadoop.oncrpc.XDR)28 AccessControlException (org.apache.hadoop.security.AccessControlException)26 Path (org.apache.hadoop.fs.Path)23 SnapshotAccessControlException (org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException)23 FileNotFoundException (java.io.FileNotFoundException)16 DFSClient (org.apache.hadoop.hdfs.DFSClient)11 DirectoryListing (org.apache.hadoop.hdfs.protocol.DirectoryListing)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)9 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)8 SetAttr3 (org.apache.hadoop.nfs.nfs3.request.SetAttr3)8 FileStatus (org.apache.hadoop.fs.FileStatus)7 Matchers.anyString (org.mockito.Matchers.anyString)7 Configuration (org.apache.hadoop.conf.Configuration)6