Search in sources :

Example 66 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestEncryptionZoneManager method setup.

@Before
public void setup() {
    this.mockedDir = mock(FSDirectory.class);
    this.mockedINodesInPath = mock(INodesInPath.class);
    this.defaultPermission = new PermissionStatus("test", "test", new FsPermission((short) 755));
    this.rootINode = new INodeDirectory(0L, "".getBytes(), defaultPermission, System.currentTimeMillis());
    this.firstINode = new INodeDirectory(1L, "first".getBytes(), defaultPermission, System.currentTimeMillis());
    this.secondINode = new INodeDirectory(2L, "second".getBytes(), defaultPermission, System.currentTimeMillis());
    when(this.mockedDir.hasReadLock()).thenReturn(true);
    when(this.mockedDir.hasWriteLock()).thenReturn(true);
    when(this.mockedDir.getInode(0L)).thenReturn(rootINode);
    when(this.mockedDir.getInode(1L)).thenReturn(firstINode);
    when(this.mockedDir.getInode(2L)).thenReturn(secondINode);
}
Also used : FsPermission(org.apache.hadoop.fs.permission.FsPermission) PermissionStatus(org.apache.hadoop.fs.permission.PermissionStatus) Before(org.junit.Before)

Example 67 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestFSEditLogLoader method testAddNewStripedBlock.

@Test
public void testAddNewStripedBlock() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, testECPolicy.getName());
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/ec";
        String testFile = "testfile_001";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser1";
        String clientMachine = "testMachine1";
        long blkId = 1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        short blockNum = (short) testECPolicy.getNumDataUnits();
        short parityNum = (short) testECPolicy.getNumParityUnits();
        //set the storage policy of the directory
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        fs.getClient().getNamenode().setErasureCodingPolicy(testDir, testECPolicy.getName());
        // Create a file with striped block
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        fns.leaveSafeMode(false);
        // Add a striped block to the file
        BlockInfoStriped stripedBlk = new BlockInfoStriped(new Block(blkId, blkNumBytes, timestamp), testECPolicy);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(stripedBlk);
        fns.getEditLog().logAddBlock(testFilePath, file);
        TestINodeFile.toCompleteFile(file);
        //If the block by loaded is the same as above it means that
        //we have successfully applied the edit log to the fsimage.
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        INodeFile inodeLoaded = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        assertTrue(inodeLoaded.isStriped());
        BlockInfo[] blks = inodeLoaded.getBlocks();
        assertEquals(1, blks.length);
        assertEquals(blkId, blks[0].getBlockId());
        assertEquals(blkNumBytes, blks[0].getNumBytes());
        assertEquals(timestamp, blks[0].getGenerationStamp());
        assertEquals(blockNum, ((BlockInfoStriped) blks[0]).getDataBlockNum());
        assertEquals(parityNum, ((BlockInfoStriped) blks[0]).getParityBlockNum());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoStriped(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 68 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadUCFile.

@Test
public void testHasNonEcBlockUsingStripedIDForLoadUCFile() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_loaducfile";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_loaducfile";
        String clientMachine = "testMachine_loaducfile";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 69 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadFile.

@Test
public void testHasNonEcBlockUsingStripedIDForLoadFile() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_loadfile";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_loadfile";
        String clientMachine = "testMachine_loadfile";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        fs.mkdir(new Path(testDir), new FsPermission("755"));
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        TestINodeFile.toCompleteFile(file);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        //after nonEcBlockUsingStripedID is deleted
        //the hasNonEcBlockUsingStripedID is set to false
        fs = cluster.getFileSystem();
        fs.delete(p, false);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertFalse(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 70 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadSnapshot.

@Test
public void testHasNonEcBlockUsingStripedIDForLoadSnapshot() throws IOException {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        FSNamesystem fns = cluster.getNamesystem();
        String testDir = "/test_block_manager";
        String testFile = "testfile_loadSnapshot";
        String testFilePath = testDir + "/" + testFile;
        String clientName = "testUser_loadSnapshot";
        String clientMachine = "testMachine_loadSnapshot";
        long blkId = -1;
        long blkNumBytes = 1024;
        long timestamp = 1426222918;
        Path d = new Path(testDir);
        fs.mkdir(d, new FsPermission("755"));
        fs.allowSnapshot(d);
        Path p = new Path(testFilePath);
        DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
        BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
        INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
        file.toUnderConstruction(clientName, clientMachine);
        file.addBlock(cBlk);
        TestINodeFile.toCompleteFile(file);
        fs.createSnapshot(d, "testHasNonEcBlockUsingStripeID");
        fs.truncate(p, 0);
        fns.enterSafeMode(false);
        fns.saveNamespace(0, 0);
        cluster.restartNameNodes();
        cluster.waitActive();
        fns = cluster.getNamesystem();
        assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
        cluster.shutdown();
        cluster = null;
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfoContiguous(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Block(org.apache.hadoop.hdfs.protocol.Block) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15