use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class JsonUtilClient method toFileStatus.
/** Convert a Json map to a HdfsFileStatus object. */
static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
if (json == null) {
return null;
}
final Map<?, ?> m = includesType ? (Map<?, ?>) json.get(FileStatus.class.getSimpleName()) : json;
final String localName = (String) m.get("pathSuffix");
final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK ? null : DFSUtilClient.string2Bytes((String) m.get("symlink"));
final long len = ((Number) m.get("length")).longValue();
final String owner = (String) m.get("owner");
final String group = (String) m.get("group");
final FsPermission permission = toFsPermission((String) m.get("permission"), (Boolean) m.get("aclBit"), (Boolean) m.get("encBit"));
final long aTime = ((Number) m.get("accessTime")).longValue();
final long mTime = ((Number) m.get("modificationTime")).longValue();
final long blockSize = ((Number) m.get("blockSize")).longValue();
final short replication = ((Number) m.get("replication")).shortValue();
final long fileId = m.containsKey("fileId") ? ((Number) m.get("fileId")).longValue() : HdfsConstants.GRANDFATHER_INODE_ID;
final int childrenNum = getInt(m, "childrenNum", -1);
final byte storagePolicy = m.containsKey("storagePolicy") ? (byte) ((Number) m.get("storagePolicy")).longValue() : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication, blockSize, mTime, aTime, permission, owner, group, symlink, DFSUtilClient.string2Bytes(localName), fileId, childrenNum, null, storagePolicy, null);
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class RpcProgramNfs3 method remove.
@VisibleForTesting
REMOVE3Response remove(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
REMOVE3Request request;
try {
request = REMOVE3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid REMOVE request");
return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle dirHandle = request.getHandle();
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS REMOVE dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress);
}
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
Nfs3FileAttributes preOpDirAttr = null;
Nfs3FileAttributes postOpDirAttr = null;
try {
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE);
}
WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
}
String fileIdPath = dirFileIdPath + "/" + fileName;
HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
if (fstat == null) {
return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
}
if (fstat.isDir()) {
return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc);
}
boolean result = dfsClient.delete(fileIdPath, false);
WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
if (!result) {
return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
}
return new REMOVE3Response(Nfs3Status.NFS3_OK, dirWcc);
} catch (IOException e) {
LOG.warn("Exception ", e);
// Try to return correct WccData
if (postOpDirAttr == null) {
try {
postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
} catch (IOException e1) {
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
}
}
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr);
int status = mapErrorStatus(e);
return new REMOVE3Response(status, dirWcc);
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class RpcProgramNfs3 method rmdir.
@VisibleForTesting
RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, SocketAddress remoteAddress) {
RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK);
DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
if (dfsClient == null) {
response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
return response;
}
RMDIR3Request request;
try {
request = RMDIR3Request.deserialize(xdr);
} catch (IOException e) {
LOG.error("Invalid RMDIR request");
return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL);
}
FileHandle dirHandle = request.getHandle();
String fileName = request.getName();
if (LOG.isDebugEnabled()) {
LOG.debug("NFS RMDIR dir fileId: " + dirHandle.getFileId() + " fileName: " + fileName + " client: " + remoteAddress);
}
String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle);
Nfs3FileAttributes preOpDirAttr = null;
Nfs3FileAttributes postOpDirAttr = null;
try {
preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
if (preOpDirAttr == null) {
LOG.info("Can't get path for dir fileId: " + dirHandle.getFileId());
return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE);
}
WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr);
if (!checkAccessPrivilege(remoteAddress, AccessPrivilege.READ_WRITE)) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc);
}
String fileIdPath = dirFileIdPath + "/" + fileName;
HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath);
if (fstat == null) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
}
if (!fstat.isDir()) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc);
}
if (fstat.getChildrenNum() > 0) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, errWcc);
}
boolean result = dfsClient.delete(fileIdPath, false);
WccData dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug);
if (!result) {
return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc);
}
return new RMDIR3Response(Nfs3Status.NFS3_OK, dirWcc);
} catch (IOException e) {
LOG.warn("Exception ", e);
// Try to return correct WccData
if (postOpDirAttr == null) {
try {
postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug);
} catch (IOException e1) {
LOG.info("Can't get postOpDirAttr for " + dirFileIdPath, e1);
}
}
WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), postOpDirAttr);
int status = mapErrorStatus(e);
return new RMDIR3Response(status, dirWcc);
}
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class TestClientAccessPrivilege method testClientAccessPrivilegeForRemove.
@Test(timeout = 60000)
public void testClientAccessPrivilegeForRemove() throws Exception {
// Configure ro access for nfs1 service
config.set("dfs.nfs.exports.allowed.hosts", "* ro");
// Start nfs
Nfs3 nfs = new Nfs3(config);
nfs.startServiceInternal(false);
RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
// Create a remove request
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
handle.serialize(xdr_req);
xdr_req.writeString("f1");
// Remove operation
REMOVE3Response response = nfsd.remove(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234));
// Assert on return code
assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES, response.getStatus());
}
use of org.apache.hadoop.hdfs.protocol.HdfsFileStatus in project hadoop by apache.
the class RpcProgramMountd method mnt.
@Override
public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
if (hostsMatcher == null) {
return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null);
}
AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
if (accessPrivilege == AccessPrivilege.NONE) {
return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, null);
}
String path = xdr.readString();
if (LOG.isDebugEnabled()) {
LOG.debug("MOUNT MNT path: " + path + " client: " + client);
}
String host = client.getHostName();
if (LOG.isDebugEnabled()) {
LOG.debug("Got host: " + host + " path: " + path);
}
if (!exports.contains(path)) {
LOG.info("Path " + path + " is not shared.");
MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
return out;
}
FileHandle handle = null;
try {
HdfsFileStatus exFileStatus = dfsClient.getFileInfo(path);
handle = new FileHandle(exFileStatus.getFileId());
} catch (IOException e) {
LOG.error("Can't get handle for export:" + path, e);
MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_NOENT, out, xid, null);
return out;
}
assert (handle != null);
LOG.info("Giving handle (fileId:" + handle.getFileId() + ") to client for export " + path);
mounts.add(new MountEntry(host, path));
MountResponse.writeMNTResponse(Nfs3Status.NFS3_OK, out, xid, handle.getContent());
return out;
}
Aggregations