Search in sources :

Example 16 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class FSEditLog method logMkDir.

/** 
   * Add create directory record to edit log
   */
public void logMkDir(String path, INode newNode) {
    PermissionStatus permissions = newNode.getPermissionStatus();
    MkdirOp op = MkdirOp.getInstance(cache.get()).setInodeId(newNode.getId()).setPath(path).setTimestamp(newNode.getModificationTime()).setPermissionStatus(permissions);
    AclFeature f = newNode.getAclFeature();
    if (f != null) {
        op.setAclEntries(AclStorage.readINodeLogicalAcl(newNode));
    }
    XAttrFeature x = newNode.getXAttrFeature();
    if (x != null) {
        op.setXAttrs(x.getXAttrs());
    }
    logEdit(op);
}
Also used : MkdirOp(org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 17 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class TestGetBlockLocations method setupFileSystem.

private static FSNamesystem setupFileSystem() throws IOException {
    Configuration conf = new Configuration();
    conf.setLong(DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, 1L);
    FSEditLog editlog = mock(FSEditLog.class);
    FSImage image = mock(FSImage.class);
    when(image.getEditLog()).thenReturn(editlog);
    final FSNamesystem fsn = new FSNamesystem(conf, image, true);
    final FSDirectory fsd = fsn.getFSDirectory();
    INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
    PermissionStatus perm = new PermissionStatus("hdfs", "supergroup", FsPermission.createImmutable((short) 0x1ff));
    final INodeFile file = new INodeFile(MOCK_INODE_ID, FILE_NAME.getBytes(StandardCharsets.UTF_8), perm, 1, 1, new BlockInfo[] {}, (short) 1, DFS_BLOCK_SIZE_DEFAULT);
    fsn.getFSDirectory().addINode(iip, file, null);
    return fsn;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 18 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class NameNodeRpcServer method createSymlink.

// ClientProtocol
@Override
public void createSymlink(String target, String link, FsPermission dirPerms, boolean createParent) throws IOException {
    checkNNStartup();
    namesystem.checkOperation(OperationCategory.WRITE);
    CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
    if (cacheEntry != null && cacheEntry.isSuccess()) {
        // Return previous response
        return;
    }
    /* We enforce the MAX_PATH_LENGTH limit even though a symlink target
     * URI may refer to a non-HDFS file system. 
     */
    if (!checkPathLength(link)) {
        throw new IOException("Symlink path exceeds " + MAX_PATH_LENGTH + " character limit");
    }
    final UserGroupInformation ugi = getRemoteUser();
    boolean success = false;
    try {
        PermissionStatus perm = new PermissionStatus(ugi.getShortUserName(), null, dirPerms);
        namesystem.createSymlink(target, link, perm, createParent, cacheEntry != null);
        success = true;
    } finally {
        RetryCache.setState(cacheEntry, success);
    }
}
Also used : IOException(java.io.IOException) CacheEntry(org.apache.hadoop.ipc.RetryCache.CacheEntry) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 19 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class NameNodeRpcServer method create.

// ClientProtocol
@Override
public HdfsFileStatus create(String src, FsPermission masked, String clientName, EnumSetWritable<CreateFlag> flag, boolean createParent, short replication, long blockSize, CryptoProtocolVersion[] supportedVersions) throws IOException {
    checkNNStartup();
    String clientMachine = getClientMachine();
    if (stateChangeLog.isDebugEnabled()) {
        stateChangeLog.debug("*DIR* NameNode.create: file " + src + " for " + clientName + " at " + clientMachine);
    }
    if (!checkPathLength(src)) {
        throw new IOException("create: Pathname too long.  Limit " + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
    }
    namesystem.checkOperation(OperationCategory.WRITE);
    CacheEntryWithPayload cacheEntry = RetryCache.waitForCompletion(retryCache, null);
    if (cacheEntry != null && cacheEntry.isSuccess()) {
        return (HdfsFileStatus) cacheEntry.getPayload();
    }
    HdfsFileStatus status = null;
    try {
        PermissionStatus perm = new PermissionStatus(getRemoteUser().getShortUserName(), null, masked);
        status = namesystem.startFile(src, perm, clientName, clientMachine, flag.get(), createParent, replication, blockSize, supportedVersions, cacheEntry != null);
    } finally {
        RetryCache.setState(cacheEntry, status != null, status);
    }
    metrics.incrFilesCreated();
    metrics.incrCreateFileOps();
    return status;
}
Also used : HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) IOException(java.io.IOException) CacheEntryWithPayload(org.apache.hadoop.ipc.RetryCache.CacheEntryWithPayload) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 20 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class TestFSImage method testSaveAndLoadStripedINodeFile.

private void testSaveAndLoadStripedINodeFile(FSNamesystem fsn, Configuration conf, boolean isUC) throws IOException {
    // Construct an INode with StripedBlock for saving and loading
    fsn.setErasureCodingPolicy("/", testECPolicy.getName(), false);
    long id = 123456789;
    byte[] name = "testSaveAndLoadInodeFile_testfile".getBytes();
    PermissionStatus permissionStatus = new PermissionStatus("testuser_a", "testuser_groups", new FsPermission((short) 0x755));
    long mtime = 1426222916 - 3600;
    long atime = 1426222916;
    BlockInfoContiguous[] blocks = new BlockInfoContiguous[0];
    byte erasureCodingPolicyID = testECPolicy.getId();
    long preferredBlockSize = 128 * 1024 * 1024;
    INodeFile file = new INodeFile(id, name, permissionStatus, mtime, atime, blocks, null, erasureCodingPolicyID, preferredBlockSize, (byte) 0, BlockType.STRIPED);
    ByteArrayOutputStream bs = new ByteArrayOutputStream();
    // Construct StripedBlocks for the INode
    BlockInfoStriped[] stripedBlocks = new BlockInfoStriped[3];
    long stripedBlkId = 10000001;
    long timestamp = mtime + 3600;
    for (int i = 0; i < stripedBlocks.length; i++) {
        stripedBlocks[i] = new BlockInfoStriped(new Block(stripedBlkId + i, preferredBlockSize, timestamp), testECPolicy);
        file.addBlock(stripedBlocks[i]);
    }
    final String client = "testClient";
    final String clientMachine = "testClientMachine";
    final String path = "testUnderConstructionPath";
    // Save the INode to byte array
    DataOutput out = new DataOutputStream(bs);
    if (isUC) {
        file.toUnderConstruction(client, clientMachine);
        FSImageSerialization.writeINodeUnderConstruction((DataOutputStream) out, file, path);
    } else {
        FSImageSerialization.writeINodeFile(file, out, false);
    }
    DataInput in = new DataInputStream(new ByteArrayInputStream(bs.toByteArray()));
    // load the INode from the byte array
    INodeFile fileByLoaded;
    if (isUC) {
        fileByLoaded = FSImageSerialization.readINodeUnderConstruction(in, fsn, fsn.getFSImage().getLayoutVersion());
    } else {
        fileByLoaded = (INodeFile) new FSImageFormat.Loader(conf, fsn).loadINodeWithLocalName(false, in, false);
    }
    assertEquals(id, fileByLoaded.getId());
    assertArrayEquals(isUC ? path.getBytes() : name, fileByLoaded.getLocalName().getBytes());
    assertEquals(permissionStatus.getUserName(), fileByLoaded.getPermissionStatus().getUserName());
    assertEquals(permissionStatus.getGroupName(), fileByLoaded.getPermissionStatus().getGroupName());
    assertEquals(permissionStatus.getPermission(), fileByLoaded.getPermissionStatus().getPermission());
    assertEquals(mtime, fileByLoaded.getModificationTime());
    assertEquals(isUC ? mtime : atime, fileByLoaded.getAccessTime());
    // TODO for striped blocks, we currently save and load them as contiguous
    // blocks to/from legacy fsimage
    assertEquals(3, fileByLoaded.getBlocks().length);
    assertEquals(preferredBlockSize, fileByLoaded.getPreferredBlockSize());
    assertEquals(file.getFileReplication(), fileByLoaded.getFileReplication());
    if (isUC) {
        assertEquals(client, fileByLoaded.getFileUnderConstructionFeature().getClientName());
        assertEquals(clientMachine, fileByLoaded.getFileUnderConstructionFeature().getClientMachine());
    }
}
Also used : BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) DataOutput(java.io.DataOutput) DataOutputStream(java.io.DataOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) DataInputStream(java.io.DataInputStream) DataInput(java.io.DataInput) ByteArrayInputStream(java.io.ByteArrayInputStream) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Aggregations

PermissionStatus (org.apache.hadoop.fs.permission.PermissionStatus)33 FsPermission (org.apache.hadoop.fs.permission.FsPermission)11 Configuration (org.apache.hadoop.conf.Configuration)9 IOException (java.io.IOException)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 Test (org.junit.Test)6 Path (org.apache.hadoop.fs.Path)5 Before (org.junit.Before)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 Block (org.apache.hadoop.hdfs.protocol.Block)3 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)3 StorageException (com.microsoft.azure.storage.StorageException)2 DataOutputStream (java.io.DataOutputStream)2 File (java.io.File)2 FileNotFoundException (java.io.FileNotFoundException)2 ArrayList (java.util.ArrayList)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 Mockito.doAnswer (org.mockito.Mockito.doAnswer)2