use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestEncryptionZoneManager method setup.
@Before
public void setup() {
this.mockedDir = mock(FSDirectory.class);
this.mockedINodesInPath = mock(INodesInPath.class);
this.defaultPermission = new PermissionStatus("test", "test", new FsPermission((short) 755));
this.rootINode = new INodeDirectory(0L, "".getBytes(), defaultPermission, System.currentTimeMillis());
this.firstINode = new INodeDirectory(1L, "first".getBytes(), defaultPermission, System.currentTimeMillis());
this.secondINode = new INodeDirectory(2L, "second".getBytes(), defaultPermission, System.currentTimeMillis());
when(this.mockedDir.hasReadLock()).thenReturn(true);
when(this.mockedDir.hasWriteLock()).thenReturn(true);
when(this.mockedDir.getInode(0L)).thenReturn(rootINode);
when(this.mockedDir.getInode(1L)).thenReturn(firstINode);
when(this.mockedDir.getInode(2L)).thenReturn(secondINode);
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestFSPermissionChecker method createINodeDirectory.
private static INodeDirectory createINodeDirectory(INodeDirectory parent, String name, String owner, String group, short perm) throws IOException {
PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group, FsPermission.createImmutable(perm));
INodeDirectory inodeDirectory = new INodeDirectory(HdfsConstants.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L);
parent.addChild(inodeDirectory);
return inodeDirectory;
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class FSImageLoader method getAclStatus.
/**
* Return the JSON formatted ACL status of the specified file.
* @param path a path specifies a file
* @return JSON formatted AclStatus
* @throws IOException if failed to serialize fileStatus to JSON.
*/
String getAclStatus(String path) throws IOException {
PermissionStatus p = getPermissionStatus(path);
List<AclEntry> aclEntryList = getAclEntryList(path);
FsPermission permission = p.getPermission();
AclStatus.Builder builder = new AclStatus.Builder();
builder.owner(p.getUserName()).group(p.getGroupName()).addEntries(aclEntryList).setPermission(permission).stickyBit(permission.getStickyBit());
AclStatus aclStatus = builder.build();
return JsonUtil.toJsonString(aclStatus);
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestTruncateQuotaUpdate method createMockFile.
private INodeFile createMockFile(long size, short replication) {
ArrayList<BlockInfo> blocks = new ArrayList<>();
long createdSize = 0;
while (createdSize < size) {
long blockSize = Math.min(BLOCKSIZE, size - createdSize);
BlockInfo bi = newBlock(blockSize, replication);
blocks.add(bi);
createdSize += BLOCKSIZE;
}
PermissionStatus perm = new PermissionStatus("foo", "bar", FsPermission.createImmutable((short) 0x1ff));
return new INodeFile(++nextMockINodeId, new byte[0], perm, 0, 0, blocks.toArray(new BlockInfo[blocks.size()]), replication, BLOCKSIZE);
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestUpgradeDomainBlockPlacementPolicy method setup.
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, BlockPlacementPolicyWithUpgradeDomain.class, BlockPlacementPolicy.class);
conf.setClass(DFSConfigKeys.DFS_NAMENODE_HOSTS_PROVIDER_CLASSNAME_KEY, CombinedHostFileManager.class, HostConfigManager.class);
hostsFileWriter.initialize(conf, "temp/upgradedomainpolicy");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(6).racks(racks).hosts(hosts).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null, FsPermission.getDefault());
refreshDatanodeAdminProperties();
}
Aggregations