Search in sources :

Example 21 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class TestINodeFile method createTreeOfInodes.

/**
   * For a given path, build a tree of INodes and return the leaf node.
   */
private INode createTreeOfInodes(String path) throws QuotaExceededException {
    byte[][] components = INode.getPathComponents(path);
    FsPermission perm = FsPermission.createImmutable((short) 0755);
    PermissionStatus permstatus = PermissionStatus.createImmutable("", "", perm);
    long id = 0;
    INodeDirectory prev = new INodeDirectory(++id, new byte[0], permstatus, 0);
    INodeDirectory dir = null;
    for (byte[] component : components) {
        if (component.length == 0) {
            continue;
        }
        System.out.println("Adding component " + DFSUtil.bytes2String(component));
        dir = new INodeDirectory(++id, component, permstatus, 0);
        prev.addChild(dir, false, Snapshot.CURRENT_STATE_ID);
        prev = dir;
    }
    // Last Inode in the chain
    return dir;
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 22 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class TestFSPermissionChecker method createINodeFile.

private static INodeFile createINodeFile(INodeDirectory parent, String name, String owner, String group, short perm) throws IOException {
    PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group, FsPermission.createImmutable(perm));
    INodeFile inodeFile = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION, PREFERRED_BLOCK_SIZE);
    parent.addChild(inodeFile);
    return inodeFile;
}
Also used : PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus)

Example 23 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class TestEditLogTailer method testTailer.

@Test
public void testTailer() throws IOException, InterruptedException, ServiceFailedException {
    Configuration conf = getConf();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_ALL_NAMESNODES_RETRY_KEY, 100);
    HAUtil.setAllowStandbyReads(conf, true);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0).build();
    cluster.waitActive();
    cluster.transitionToActive(0);
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    try {
        for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
            NameNodeAdapter.mkdirs(nn1, getDirPath(i), new PermissionStatus("test", "test", new FsPermission((short) 00755)), true);
        }
        HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
        for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
            assertTrue(NameNodeAdapter.getFileInfo(nn2, getDirPath(i), false).isDir());
        }
        for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
            NameNodeAdapter.mkdirs(nn1, getDirPath(i), new PermissionStatus("test", "test", new FsPermission((short) 00755)), true);
        }
        HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
        for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
            assertTrue(NameNodeAdapter.getFileInfo(nn2, getDirPath(i), false).isDir());
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FsPermission(org.apache.hadoop.fs.permission.FsPermission) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus) Test(org.junit.Test)

Example 24 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class TestStartup method testCompression.

@Test
public void testCompression() throws IOException {
    LOG.info("Test compressing image.");
    Configuration conf = new Configuration();
    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
    File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(base_dir, "name").getPath());
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    DFSTestUtil.formatNameNode(conf);
    // create an uncompressed image
    LOG.info("Create an uncompressed fsimage");
    NameNode namenode = new NameNode(conf);
    namenode.getNamesystem().mkdirs("/test", new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
    NamenodeProtocols nnRpc = namenode.getRpcServer();
    assertTrue(nnRpc.getFileInfo("/test").isDir());
    nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    nnRpc.saveNamespace(0, 0);
    namenode.stop();
    namenode.join();
    namenode.joinHttpServer();
    // compress image using default codec
    LOG.info("Read an uncomressed image and store it compressed using default codec.");
    conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
    checkNameSpace(conf);
    // read image compressed using the default and compress it using Gzip codec
    LOG.info("Read a compressed image and store it using a different codec.");
    conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, "org.apache.hadoop.io.compress.GzipCodec");
    checkNameSpace(conf);
    // read an image compressed in Gzip and store it uncompressed
    LOG.info("Read a compressed image and store it as uncompressed.");
    conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
    checkNameSpace(conf);
    // read an uncomrpessed image and store it uncompressed
    LOG.info("Read an uncompressed image and store it as uncompressed.");
    checkNameSpace(conf);
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus) Test(org.junit.Test)

Example 25 with PermissionStatus

use of org.apache.hadoop.fs.permission.PermissionStatus in project hadoop by apache.

the class TestNestedSnapshots method testIdCmp.

/**
   * Test {@link Snapshot#ID_COMPARATOR}.
   */
@Test(timeout = 300000)
public void testIdCmp() {
    final PermissionStatus perm = PermissionStatus.createImmutable("user", "group", FsPermission.createImmutable((short) 0));
    final INodeDirectory snapshottable = new INodeDirectory(0, DFSUtil.string2Bytes("foo"), perm, 0L);
    snapshottable.addSnapshottableFeature();
    final Snapshot[] snapshots = { new Snapshot(1, "s1", snapshottable), new Snapshot(1, "s1", snapshottable), new Snapshot(2, "s2", snapshottable), new Snapshot(2, "s2", snapshottable) };
    Assert.assertEquals(0, Snapshot.ID_COMPARATOR.compare(null, null));
    for (Snapshot s : snapshots) {
        Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null, s) > 0);
        Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s, null) < 0);
        for (Snapshot t : snapshots) {
            final int expected = s.getRoot().getLocalName().compareTo(t.getRoot().getLocalName());
            final int computed = Snapshot.ID_COMPARATOR.compare(s, t);
            Assert.assertEquals(expected > 0, computed > 0);
            Assert.assertEquals(expected == 0, computed == 0);
            Assert.assertEquals(expected < 0, computed < 0);
        }
    }
}
Also used : INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus) Test(org.junit.Test)

Aggregations

PermissionStatus (org.apache.hadoop.fs.permission.PermissionStatus)33 FsPermission (org.apache.hadoop.fs.permission.FsPermission)11 Configuration (org.apache.hadoop.conf.Configuration)9 IOException (java.io.IOException)7 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)6 Test (org.junit.Test)6 Path (org.apache.hadoop.fs.Path)5 Before (org.junit.Before)5 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 Block (org.apache.hadoop.hdfs.protocol.Block)3 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)3 StorageException (com.microsoft.azure.storage.StorageException)2 DataOutputStream (java.io.DataOutputStream)2 File (java.io.File)2 FileNotFoundException (java.io.FileNotFoundException)2 ArrayList (java.util.ArrayList)2 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)2 FileAlreadyExistsException (org.apache.hadoop.fs.FileAlreadyExistsException)2 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)2 Mockito.doAnswer (org.mockito.Mockito.doAnswer)2