use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class FSDirMkdirOp method addImplicitUwx.
private static PermissionStatus addImplicitUwx(PermissionStatus parentPerm, PermissionStatus perm) {
FsPermission p = parentPerm.getPermission();
FsPermission ancestorPerm = new FsPermission(p.getUserAction().or(FsAction.WRITE_EXECUTE), p.getGroupAction(), p.getOtherAction());
return new PermissionStatus(perm.getUserName(), perm.getGroupName(), ancestorPerm);
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class FSDirSymlinkOp method addSymlink.
/**
* Add the given symbolic link to the fs. Record it in the edits log.
*/
private static INodeSymlink addSymlink(FSDirectory fsd, String path, INodesInPath iip, String target, PermissionStatus dirPerms, boolean createParent, boolean logRetryCache) throws IOException {
final long mtime = now();
final INodesInPath parent;
if (createParent) {
parent = FSDirMkdirOp.createAncestorDirectories(fsd, iip, dirPerms);
if (parent == null) {
return null;
}
} else {
parent = iip.getParentINodesInPath();
}
final String userName = dirPerms.getUserName();
long id = fsd.allocateNewInodeId();
PermissionStatus perm = new PermissionStatus(userName, null, FsPermission.getDefault());
INodeSymlink newNode = unprotectedAddSymlink(fsd, parent, iip.getLastLocalName(), id, target, mtime, mtime, perm);
if (newNode == null) {
NameNode.stateChangeLog.info("addSymlink: failed to add " + path);
return null;
}
fsd.getEditLog().logSymlink(path, target, mtime, mtime, newNode, logRetryCache);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("addSymlink: " + path + " is added");
}
return newNode;
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestDefaultBlockPlacementPolicy method setup.
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks).hosts(hosts).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, FsPermission.getDefault());
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class FSEditLogOp method permissionStatusFromXml.
public static PermissionStatus permissionStatusFromXml(Stanza st) throws InvalidXmlException {
Stanza status = st.getChildren("PERMISSION_STATUS").get(0);
String username = status.getValue("USERNAME");
String groupname = status.getValue("GROUPNAME");
FsPermission mode = fsPermissionFromXml(status);
return new PermissionStatus(username, groupname, mode);
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class FSImageSerialization method readINodeUnderConstruction.
// Helper function that reads in an INodeUnderConstruction
// from the input stream
//
static INodeFile readINodeUnderConstruction(DataInput in, FSNamesystem fsNamesys, int imgVersion) throws IOException {
byte[] name = readBytes(in);
long inodeId = NameNodeLayoutVersion.supports(LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong() : fsNamesys.dir.allocateNewInodeId();
short blockReplication = in.readShort();
long modificationTime = in.readLong();
long preferredBlockSize = in.readLong();
int numBlocks = in.readInt();
final BlockInfoContiguous[] blocksContiguous = new BlockInfoContiguous[numBlocks];
Block blk = new Block();
int i = 0;
for (; i < numBlocks - 1; i++) {
blk.readFields(in);
blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
}
// last block is UNDER_CONSTRUCTION
if (numBlocks > 0) {
blk.readFields(in);
blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
blocksContiguous[i].convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
}
PermissionStatus perm = PermissionStatus.read(in);
String clientName = readString(in);
String clientMachine = readString(in);
// We previously stored locations for the last block, now we
// just record that there are none
int numLocs = in.readInt();
assert numLocs == 0 : "Unexpected block locations";
// Images in the pre-protobuf format will not have the lazyPersist flag,
// so it is safe to pass false always.
INodeFile file = new INodeFile(inodeId, name, perm, modificationTime, modificationTime, blocksContiguous, blockReplication, preferredBlockSize);
file.toUnderConstruction(clientName, clientMachine);
return file;
}
Aggregations