use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class FSEditLog method logMkDir.
/**
* Add create directory record to edit log
*/
public void logMkDir(String path, INode newNode) {
PermissionStatus permissions = newNode.getPermissionStatus();
MkdirOp op = MkdirOp.getInstance(cache.get()).setInodeId(newNode.getId()).setPath(path).setTimestamp(newNode.getModificationTime()).setPermissionStatus(permissions);
AclFeature f = newNode.getAclFeature();
if (f != null) {
op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
}
XAttrFeature x = newNode.getXAttrFeature();
if (x != null) {
op.setXAttrs(x.getXAttrs());
}
logEdit(op);
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestGetBlockLocations method setupFileSystem.
private static FSNamesystem setupFileSystem() throws IOException {
Configuration conf = new Configuration();
conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
FSEditLog editlog = mock(FSEditLog.class);
FSImage image = mock(FSImage.class);
when(image.getEditLog()).thenReturn(editlog);
final FSNamesystem fsn = new FSNamesystem(conf, image, true);
final FSDirectory fsd = fsn.getFSDirectory();
INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
PermissionStatus perm = new PermissionStatus("hdfs", "supergroup", FsPermission.createImmutable((short) 0x1ff));
final INodeFile file = new INodeFile(MOCK_INODE_ID, FILE_NAME.getBytes(StandardCharsets.UTF_8), perm, 1, 1, new BlockInfo[] {}, (short) 1, DFS_BLOCK_SIZE_DEFAULT);
fsn.getFSDirectory().addINode(iip, file, null);
return fsn;
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class NameNodeRpcServer method createSymlink.
// ClientProtocol
@Override
public void createSymlink(String target, String link, FsPermission dirPerms, boolean createParent) throws IOException {
checkNNStartup();
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
// Return previous response
return;
}
/* We enforce the MAX_PATH_LENGTH limit even though a symlink target
* URI may refer to a non-HDFS file system.
*/
if (!checkPathLength(link)) {
throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH + " character limit");
}
final UserGroupInformation ugi = getRemoteUser();
boolean success = false;
try {
PermissionStatus perm = new PermissionStatus(ugi.getShortUserName(), null, dirPerms);
namesystem.createSymlink(target, link, perm, createParent, cacheEntry != null);
success = true;
} finally {
RetryCache.setState(cacheEntry, success);
}
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class NameNodeRpcServer method create.
// ClientProtocol
@Override
public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) throws IOException {
checkNNStartup();
String clientMachine = getClientMachine();
if (stateChangeLog.isDebugEnabled()) {
stateChangeLog.debug("*DIR* NameNode.create: file " + src + " for " + clientName + " at " + clientMachine);
}
if (!checkPathLength(src)) {
throw new IOException("create: Pathname too long. Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.checkOperation(OperationCategory.WRITE);
CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (HdfsFileStatus) cacheEntry.getPayload();
}
HdfsFileStatus status = null;
try {
PermissionStatus perm = new PermissionStatus(getRemoteUser().getShortUserName(), null, masked);
status = namesystem.startFile(src, perm, clientName, clientMachine, flag.get(), createParent, replication, blockSize, supportedVersions, cacheEntry != null);
} finally {
RetryCache.setState(cacheEntry, status != null, status);
}
metrics.incrFilesCreated();
metrics.incrCreateFileOps();
return status;
}
use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.
the class TestFSImage method testSaveAndLoadStripedINodeFile.
private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf, boolean isUC) throws IOException {
// Construct an INode with StripedBlock for saving and loading
fsn.setErasureCodingPolicy("/", testECPolicy.getName(), false);
long id = 123456789;
byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
PermissionStatus permissionStatus = new PermissionStatus("testuser_a", "testuser_groups", new FsPermission((short) 0x755));
long mtime = 1426222916 - 3600;
long atime = 1426222916;
BlockInfoContiguous[] blocks = new BlockInfoContiguous[0];
byte erasureCodingPolicyID = testECPolicy.getId();
long preferredBlockSize = 128 * 1024 * 1024;
INodeFile file = new INodeFile(id, name, permissionStatus, mtime, atime, blocks, null, erasureCodingPolicyID, preferredBlockSize, (byte) 0, BlockType.STRIPED);
ByteArrayOutputStream bs = new ByteArrayOutputStream();
// Construct StripedBlocks for the INode
BlockInfoStriped[] stripedBlocks = new BlockInfoStriped[3];
long stripedBlkId = 10000001;
long timestamp = mtime + 3600;
for (int i = 0; i < stripedBlocks.length; i++) {
stripedBlocks[i] = new BlockInfoStriped(new Block(stripedBlkId + i, preferredBlockSize, timestamp), testECPolicy);
file.addBlock(stripedBlocks[i]);
}
final String client = "testClient";
final String clientMachine = "testClientMachine";
final String path = "testUnderConstructionPath";
// Save the INode to byte array
DataOutput out = new DataOutputStream(bs);
if (isUC) {
file.toUnderConstruction(client, clientMachine);
FSImageSerialization.writeINodeUnderConstruction((DataOutputStream) out, file, path);
} else {
FSImageSerialization.writeINodeFile(file, out, false);
}
DataInput in = new DataInputStream(new ByteArrayInputStream(bs.toByteArray()));
// load the INode from the byte array
INodeFile fileByLoaded;
if (isUC) {
fileByLoaded = FSImageSerialization.readINodeUnderConstruction(in, fsn, fsn.getFSImage().getLayoutVersion());
} else {
fileByLoaded = (INodeFile) new FSImageFormat.Loader(conf, fsn).loadINodeWithLocalName(false, in, false);
}
assertEquals(id, fileByLoaded.getId());
assertArrayEquals(isUC ? path.getBytes() : name, fileByLoaded.getLocalName().getBytes());
assertEquals(permissionStatus.getUserName(), fileByLoaded.getPermissionStatus().getUserName());
assertEquals(permissionStatus.getGroupName(), fileByLoaded.getPermissionStatus().getGroupName());
assertEquals(permissionStatus.getPermission(), fileByLoaded.getPermissionStatus().getPermission());
assertEquals(mtime, fileByLoaded.getModificationTime());
assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime());
// TODO for striped blocks, we currently save and load them as contiguous
// blocks to/from legacy fsimage
assertEquals(3, fileByLoaded.getBlocks().length);
assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize());
assertEquals(file.getFileReplication(), fileByLoaded.getFileReplication());
if (isUC) {
assertEquals(client, fileByLoaded.getFileUnderConstructionFeature().getClientName());
assertEquals(clientMachine, fileByLoaded.getFileUnderConstructionFeature().getClientMachine());
}
}
Aggregations