use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class FSDirectory method createReservedStatuses.
/**
* Create HdfsFileStatuses of the reserved paths: .inodes and raw.
* These statuses are solely for listing purpose. All other operations
* on the reserved dirs are disallowed.
* Operations on sub directories are resolved by
* {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)}
* and conducted directly, without the need to check the reserved dirs.
*
* This method should only be invoked once during namenode initialization.
*
* @param cTime CTime of the file system
* @return Array of HdfsFileStatus
*/
void createReservedStatuses(long cTime) {
HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime, new FsPermission((short) 0770), null, supergroup, null, DOT_INODES, -1L, 0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime, new FsPermission((short) 0770), null, supergroup, null, RAW, -1L, 0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
reservedStatuses = new HdfsFileStatus[] { inodes, raw };
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class FSDirRenameOp method createRenameResult.
private static RenameResult createRenameResult(FSDirectory fsd, INodesInPath dst, boolean filesDeleted, BlocksMapUpdateInfo collectedBlocks) throws IOException {
boolean success = (dst != null);
HdfsFileStatus auditStat = success ? fsd.getAuditFileInfo(dst) : null;
return new RenameResult(success, auditStat, filesDeleted, collectedBlocks);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class FSDirStatAndListingOp method getSnapshotsListing.
/**
* Get a listing of all the snapshots of a snapshottable directory
*/
private static DirectoryListing getSnapshotsListing(FSDirectory fsd, INodesInPath iip, byte[] startAfter) throws IOException {
Preconditions.checkState(fsd.hasReadLock());
Preconditions.checkArgument(iip.isDotSnapshotDir(), "%s does not end with %s", iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
// drop off the null .snapshot component
iip = iip.getParentINodesInPath();
final String dirPath = iip.getPath();
final INode node = iip.getLastINode();
final INodeDirectory dirNode = INodeDirectory.valueOf(node, dirPath);
final DirectorySnapshottableFeature sf = dirNode.getDirectorySnapshottableFeature();
if (sf == null) {
throw new SnapshotException("Directory is not a snapshottable directory: " + dirPath);
}
final ReadOnlyList<Snapshot> snapshots = sf.getSnapshotList();
int skipSize = ReadOnlyList.Util.binarySearch(snapshots, startAfter);
skipSize = skipSize < 0 ? -skipSize - 1 : skipSize + 1;
int numOfListing = Math.min(snapshots.size() - skipSize, fsd.getLsLimit());
final HdfsFileStatus[] listing = new HdfsFileStatus[numOfListing];
for (int i = 0; i < numOfListing; i++) {
Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
listing[i] = createFileStatus(fsd, iip, sRoot, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
}
return new DirectoryListing(listing, snapshots.size() - skipSize - numOfListing);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class FSDirEncryptionZoneOp method getEZForPath.
/**
* Get the encryption zone for the specified path.
*
* @param fsd fsdirectory
* @param srcArg the path of a file or directory to get the EZ for
* @param pc permission checker to check fs permission
* @return the EZ with file status.
*/
static Map.Entry<EncryptionZone, HdfsFileStatus> getEZForPath(final FSDirectory fsd, final String srcArg, final FSPermissionChecker pc) throws IOException {
final INodesInPath iip;
final EncryptionZone ret;
fsd.readLock();
try {
iip = fsd.resolvePath(pc, srcArg, DirOp.READ);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
}
ret = fsd.ezManager.getEZINodeForPath(iip);
} finally {
fsd.readUnlock();
}
HdfsFileStatus auditStat = fsd.getAuditFileInfo(iip);
return new AbstractMap.SimpleImmutableEntry<>(ret, auditStat);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class FSNamesystem method logAuditEvent.
private void logAuditEvent(boolean succeeded, UserGroupInformation ugi, InetAddress addr, String cmd, String src, String dst, HdfsFileStatus stat) {
FileStatus status = null;
if (stat != null) {
Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
Path path = dst != null ? new Path(dst) : new Path(src);
status = new FileStatus(stat.getLen(), stat.isDir(), stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(), stat.getAccessTime(), stat.getPermission(), stat.getOwner(), stat.getGroup(), symlink, path);
}
final String ugiStr = ugi.toString();
for (AuditLogger logger : auditLoggers) {
if (logger instanceof HdfsAuditLogger) {
HdfsAuditLogger hdfsLogger = (HdfsAuditLogger) logger;
hdfsLogger.logAuditEvent(succeeded, ugiStr, addr, cmd, src, dst, status, CallerContext.getCurrent(), ugi, dtSecretManager);
} else {
logger.logAuditEvent(succeeded, ugiStr, addr, cmd, src, dst, status);
}
}
}
Aggregations