Search in sources :

Example 51 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestCheckpoint method testSaveNamespace.

/**
   * Tests save namespace.
   */
@Test
public void testSaveNamespace() throws IOException {
    MiniDFSCluster cluster = null;
    DistributedFileSystem fs = null;
    FileContext fc;
    try {
        Configuration conf = new HdfsConfiguration();
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
        cluster.waitActive();
        fs = (cluster.getFileSystem());
        fc = FileContext.getFileContext(cluster.getURI(0));
        // Saving image without safe mode should fail
        DFSAdmin admin = new DFSAdmin(conf);
        String[] args = new String[] { "-saveNamespace" };
        try {
            admin.run(args);
        } catch (IOException eIO) {
            assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
        } catch (Exception e) {
            throw new IOException(e);
        }
        // create new file
        Path file = new Path("namespace.dat");
        DFSTestUtil.createFile(fs, file, fileSize, fileSize, blockSize, replication, seed);
        checkFile(fs, file, replication);
        // create new link
        Path symlink = new Path("file.link");
        fc.createSymlink(file, symlink, false);
        assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
        // verify that the edits file is NOT empty
        Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
        for (URI uri : editsDirs) {
            File ed = new File(uri.getPath());
            assertTrue(new File(ed, "current/" + NNStorage.getInProgressEditsFileName(1)).length() > Integer.SIZE / Byte.SIZE);
        }
        // Saving image in safe mode should succeed
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        try {
            admin.run(args);
        } catch (Exception e) {
            throw new IOException(e);
        }
        // TODO: Fix the test to not require a hard-coded transaction count.
        final int EXPECTED_TXNS_FIRST_SEG = 13;
        //
        for (URI uri : editsDirs) {
            File ed = new File(uri.getPath());
            File curDir = new File(ed, "current");
            LOG.info("Files in " + curDir + ":\n  " + Joiner.on("\n  ").join(curDir.list()));
            // Verify that the first edits file got finalized
            File originalEdits = new File(curDir, NNStorage.getInProgressEditsFileName(1));
            assertFalse(originalEdits.exists());
            File finalizedEdits = new File(curDir, NNStorage.getFinalizedEditsFileName(1, EXPECTED_TXNS_FIRST_SEG));
            GenericTestUtils.assertExists(finalizedEdits);
            assertTrue(finalizedEdits.length() > Integer.SIZE / Byte.SIZE);
            GenericTestUtils.assertExists(new File(ed, "current/" + NNStorage.getInProgressEditsFileName(EXPECTED_TXNS_FIRST_SEG + 1)));
        }
        Collection<URI> imageDirs = cluster.getNameDirs(0);
        for (URI uri : imageDirs) {
            File imageDir = new File(uri.getPath());
            File savedImage = new File(imageDir, "current/" + NNStorage.getImageFileName(EXPECTED_TXNS_FIRST_SEG));
            assertTrue("Should have saved image at " + savedImage, savedImage.exists());
        }
        // restart cluster and verify file exists
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        fs = (cluster.getFileSystem());
        checkFile(fs, file, replication);
        fc = FileContext.getFileContext(cluster.getURI(0));
        assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
    } finally {
        if (fs != null)
            fs.close();
        cleanup(cluster);
        cluster = null;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) MetricsRecordBuilder(org.apache.hadoop.metrics2.MetricsRecordBuilder) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URI(java.net.URI) Util.fileAsURI(org.apache.hadoop.hdfs.server.common.Util.fileAsURI) ExitException(org.apache.hadoop.util.ExitUtil.ExitException) ParseException(org.apache.commons.cli.ParseException) IOException(java.io.IOException) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) RandomAccessFile(java.io.RandomAccessFile) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) NameNodeFile(org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile) File(java.io.File) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Example 52 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestAuditLoggerWithCommands method testSetQuota.

@Test
public void testSetQuota() throws Exception {
    Path path = new Path("/testdir/testdir1");
    fs.mkdirs(path);
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        ((DistributedFileSystem) fileSys).setQuota(path, 10l, 10l);
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    String acePattern = ".*allowed=false.*ugi=theDoctor.*cmd=setQuota.*";
    int length = verifyAuditLogs(acePattern);
    fileSys.close();
    try {
        ((DistributedFileSystem) fileSys).setQuota(path, 10l, 10l);
        fail("The operation should have failed with IOException");
    } catch (IOException ace) {
    }
    assertTrue("Unexpected log from getContentSummary", length == auditlog.getOutput().split("\n").length);
}
Also used : Path(org.apache.hadoop.fs.Path) AccessControlException(org.apache.hadoop.security.AccessControlException) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 53 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestAuditLoggerWithCommands method testRemoveCachePool.

@Test
public void testRemoveCachePool() throws Exception {
    removeExistingCachePools(null);
    CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0));
    ((DistributedFileSystem) fs).addCachePool(cacheInfo);
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        ((DistributedFileSystem) fileSys).removeCachePool("pool1");
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    String aceRemoveCachePoolPattern = ".*allowed=false.*ugi=theDoctor.*cmd=removeCachePool.*";
    int length = verifyAuditLogs(aceRemoveCachePoolPattern);
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
    try {
        fileSys.close();
        ((DistributedFileSystem) fileSys).removeCachePool("pool1");
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Also used : AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 54 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestAuditLoggerWithCommands method testAddCachePool.

@Test
public void testAddCachePool() throws Exception {
    removeExistingCachePools(null);
    CachePoolInfo cacheInfo = new CachePoolInfo("pool1").setMode(new FsPermission((short) 0));
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        ((DistributedFileSystem) fileSys).addCachePool(cacheInfo);
        fail("The operation should have failed with AccessControlException");
    } catch (AccessControlException ace) {
    }
    String aceAddCachePoolPattern = ".*allowed=false.*ugi=theDoctor.*cmd=addCachePool.*";
    int length = verifyAuditLogs(aceAddCachePoolPattern);
    try {
        fileSys.close();
        ((DistributedFileSystem) fileSys).addCachePool(cacheInfo);
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Also used : AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Example 55 with DistributedFileSystem

use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.

the class TestAuditLoggerWithCommands method testRemoveCacheDirective.

@Test
public void testRemoveCacheDirective() throws Exception {
    removeExistingCachePools(null);
    proto.addCachePool(new CachePoolInfo("pool1").setMode(new FsPermission((short) 0)));
    CacheDirectiveInfo alpha = new CacheDirectiveInfo.Builder().setPath(new Path("/alpha")).setPool("pool1").build();
    String aceRemoveCachePattern = ".*allowed=false.*ugi=theDoctor.*cmd=removeCache.*";
    int length = -1;
    Long id = ((DistributedFileSystem) fs).addCacheDirective(alpha);
    fileSys = DFSTestUtil.getFileSystemAs(user1, conf);
    try {
        ((DistributedFileSystem) fileSys).removeCacheDirective(id);
        fail("It should have failed with an AccessControlException");
    } catch (AccessControlException ace) {
        length = verifyAuditLogs(aceRemoveCachePattern);
    }
    try {
        fileSys.close();
        ((DistributedFileSystem) fileSys).removeCacheDirective(id);
        fail("The operation should have failed with IOException");
    } catch (IOException e) {
    }
    assertTrue("Unexpected log!", length == auditlog.getOutput().split("\n").length);
}
Also used : Path(org.apache.hadoop.fs.Path) CacheDirectiveInfo(org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) CachePoolInfo(org.apache.hadoop.hdfs.protocol.CachePoolInfo) Test(org.junit.Test)

Aggregations

DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)252 Test (org.junit.Test)175 Path (org.apache.hadoop.fs.Path)169 Configuration (org.apache.hadoop.conf.Configuration)126 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)126 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)86 IOException (java.io.IOException)63 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)36 FileSystem (org.apache.hadoop.fs.FileSystem)31 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)31 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)26 URI (java.net.URI)24 FsPermission (org.apache.hadoop.fs.permission.FsPermission)22 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)19 AccessControlException (org.apache.hadoop.security.AccessControlException)19 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)18 Matchers.anyString (org.mockito.Matchers.anyString)18 FileStatus (org.apache.hadoop.fs.FileStatus)16 ArrayList (java.util.ArrayList)14 CachePoolInfo (org.apache.hadoop.hdfs.protocol.CachePoolInfo)14