Search in sources :

Example 66 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestNameNodeMXBean method testTopUsersDisabled.

@Test(timeout = 120000)
public void testTopUsersDisabled() throws Exception {
    final Configuration conf = new Configuration();
    // Disable nntop
    conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, false);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
        cluster.waitActive();
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanNameFsns = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState");
        FileSystem fs = cluster.getFileSystem();
        final Path path = new Path("/");
        final int NUM_OPS = 10;
        for (int i = 0; i < NUM_OPS; i++) {
            fs.listStatus(path);
            fs.setTimes(path, 0, 1);
        }
        String topUsers = (String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
        assertNull("Did not expect to find TopUserOpCounts bean!", topUsers);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) MBeanServer(javax.management.MBeanServer) ObjectName(javax.management.ObjectName) Test(org.junit.Test)

Example 67 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestINodeFile method testWriteToDeletedFile.

@Test(timeout = 120000)
public void testWriteToDeletedFile() throws IOException {
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    Path path = new Path("/test1");
    assertTrue(fs.mkdirs(path));
    int size = conf.getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
    byte[] data = new byte[size];
    // Create one file
    Path filePath = new Path("/test1/file");
    FSDataOutputStream fos = fs.create(filePath);
    // Delete the file
    fs.delete(filePath, false);
    // Add new block should fail since /test1/file has been deleted.
    try {
        fos.write(data, 0, data.length);
        // make sure addBlock() request gets to NN immediately
        fos.hflush();
        fail("Write should fail after delete");
    } catch (Exception e) {
    /* Ignore */
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) InvalidPathException(org.apache.hadoop.fs.InvalidPathException) FileNotFoundException(java.io.FileNotFoundException) PathIsNotDirectoryException(org.apache.hadoop.fs.PathIsNotDirectoryException) DirectoryListingStartAfterNotFoundException(org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException) IOException(java.io.IOException) QuotaExceededException(org.apache.hadoop.hdfs.protocol.QuotaExceededException) Test(org.junit.Test)

Example 68 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestINodeFile method testInodeIdBasedPaths.

/**
   * Tests for addressing files using /.reserved/.inodes/<inodeID> in file system
   * operations.
   */
@Test
public void testInodeIdBasedPaths() throws Exception {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
        cluster.waitActive();
        DistributedFileSystem fs = cluster.getFileSystem();
        NamenodeProtocols nnRpc = cluster.getNameNodeRpc();
        // FileSystem#mkdirs "/testInodeIdBasedPaths"
        Path baseDir = getInodePath(INodeId.ROOT_INODE_ID, "testInodeIdBasedPaths");
        Path baseDirRegPath = new Path("/testInodeIdBasedPaths");
        fs.mkdirs(baseDir);
        fs.exists(baseDir);
        long baseDirFileId = nnRpc.getFileInfo(baseDir.toString()).getFileId();
        // FileSystem#create file and FileSystem#close
        Path testFileInodePath = getInodePath(baseDirFileId, "test1");
        Path testFileRegularPath = new Path(baseDir, "test1");
        final int testFileBlockSize = 1024;
        FileSystemTestHelper.createFile(fs, testFileInodePath, 1, testFileBlockSize);
        assertTrue(fs.exists(testFileInodePath));
        // FileSystem#setPermission
        FsPermission perm = new FsPermission((short) 0666);
        fs.setPermission(testFileInodePath, perm);
        // FileSystem#getFileStatus and FileSystem#getPermission
        FileStatus fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(perm, fileStatus.getPermission());
        // FileSystem#setOwner
        fs.setOwner(testFileInodePath, fileStatus.getOwner(), fileStatus.getGroup());
        // FileSystem#setTimes
        fs.setTimes(testFileInodePath, 0, 0);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(0, fileStatus.getModificationTime());
        assertEquals(0, fileStatus.getAccessTime());
        // FileSystem#setReplication
        fs.setReplication(testFileInodePath, (short) 3);
        fileStatus = fs.getFileStatus(testFileInodePath);
        assertEquals(3, fileStatus.getReplication());
        fs.setReplication(testFileInodePath, (short) 1);
        // ClientProtocol#getPreferredBlockSize
        assertEquals(testFileBlockSize, nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
        /*
       * HDFS-6749 added missing calls to FSDirectory.resolvePath in the
       * following four methods. The calls below ensure that
       * /.reserved/.inodes paths work properly. No need to check return
       * values as these methods are tested elsewhere.
       */
        {
            fs.isFileClosed(testFileInodePath);
            fs.getAclStatus(testFileInodePath);
            fs.getXAttrs(testFileInodePath);
            fs.listXAttrs(testFileInodePath);
            fs.access(testFileInodePath, FsAction.READ_WRITE);
        }
        // symbolic link related tests
        // Reserved path is not allowed as a target
        String invalidTarget = new Path(baseDir, "invalidTarget").toString();
        String link = new Path(baseDir, "link").toString();
        testInvalidSymlinkTarget(nnRpc, invalidTarget, link);
        // Test creating a link using reserved inode path
        String validTarget = "/validtarget";
        testValidSymlinkTarget(nnRpc, validTarget, link);
        // FileSystem#append
        fs.append(testFileInodePath);
        // DistributedFileSystem#recoverLease
        fs.recoverLease(testFileInodePath);
        // Namenode#getBlockLocations
        LocatedBlocks l1 = nnRpc.getBlockLocations(testFileInodePath.toString(), 0, Long.MAX_VALUE);
        LocatedBlocks l2 = nnRpc.getBlockLocations(testFileRegularPath.toString(), 0, Long.MAX_VALUE);
        checkEquals(l1, l2);
        // FileSystem#rename - both the variants
        Path renameDst = getInodePath(baseDirFileId, "test2");
        fileStatus = fs.getFileStatus(testFileInodePath);
        // Rename variant 1: rename and rename bacck
        fs.rename(testFileInodePath, renameDst);
        fs.rename(renameDst, testFileInodePath);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // Rename variant 2: rename and rename bacck
        fs.rename(testFileInodePath, renameDst, Rename.OVERWRITE);
        fs.rename(renameDst, testFileInodePath, Rename.OVERWRITE);
        assertEquals(fileStatus, fs.getFileStatus(testFileInodePath));
        // FileSystem#getContentSummary
        assertEquals(fs.getContentSummary(testFileRegularPath).toString(), fs.getContentSummary(testFileInodePath).toString());
        // FileSystem#listFiles
        checkEquals(fs.listFiles(baseDirRegPath, false), fs.listFiles(baseDir, false));
        // FileSystem#delete
        fs.delete(testFileInodePath, true);
        assertFalse(fs.exists(testFileInodePath));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FsPermission(org.apache.hadoop.fs.permission.FsPermission) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 69 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestLeaseManager method testLeaseRestorationOnRestart.

/**
   * Make sure the lease is restored even if only the inode has the record.
   */
@Test
public void testLeaseRestorationOnRestart() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(1).build();
        DistributedFileSystem dfs = cluster.getFileSystem();
        // Create an empty file
        String path = "/testLeaseRestorationOnRestart";
        FSDataOutputStream out = dfs.create(new Path(path));
        // Remove the lease from the lease manager, but leave it in the inode.
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        INodeFile file = dir.getINode(path).asFile();
        cluster.getNamesystem().leaseManager.removeLease(file.getFileUnderConstructionFeature().getClientName(), file);
        // Save a fsimage.
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        cluster.getNameNodeRpc().saveNamespace(0, 0);
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // Restart the namenode.
        cluster.restartNameNode(true);
        // Check whether the lease manager has the lease
        dir = cluster.getNamesystem().getFSDirectory();
        file = dir.getINode(path).asFile();
        assertTrue("Lease should exist.", cluster.getNamesystem().leaseManager.getLease(file) != null);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 70 with MiniDFSCluster

use of org.apache.hadoop.hdfs.MiniDFSCluster in project hadoop by apache.

the class TestListCorruptFileBlocks method testlistCorruptFileBlocks.

// deliberately remove blocks from a file and validate the list-corrupt-file-blocks API
@Test(timeout = 300000)
public void testlistCorruptFileBlocks() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    // datanode scans
    conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
    // directories
    FileSystem fs = null;
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        DFSTestUtil util = new DFSTestUtil.Builder().setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).setMaxSize(1024).build();
        util.createFiles(fs, "/corruptData");
        final NameNode namenode = cluster.getNameNode();
        Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null);
        int numCorrupt = corruptFileBlocks.size();
        assertTrue(numCorrupt == 0);
        // delete the blocks
        String bpid = cluster.getNamesystem().getBlockPoolId();
        for (int i = 0; i < 4; i++) {
            for (int j = 0; j <= 1; j++) {
                File storageDir = cluster.getInstanceStorageDir(i, j);
                File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
                List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
                if (metadataFiles == null)
                    continue;
                // (blocks.length > 0));
                for (File metadataFile : metadataFiles) {
                    File blockFile = Block.metaToBlockFile(metadataFile);
                    LOG.info("Deliberately removing file " + blockFile.getName());
                    assertTrue("Cannot remove file.", blockFile.delete());
                    LOG.info("Deliberately removing file " + metadataFile.getName());
                    assertTrue("Cannot remove file.", metadataFile.delete());
                // break;
                }
            }
        }
        int count = 0;
        corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null);
        numCorrupt = corruptFileBlocks.size();
        while (numCorrupt < 3) {
            Thread.sleep(1000);
            corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null);
            numCorrupt = corruptFileBlocks.size();
            count++;
            if (count > 30)
                break;
        }
        // Validate we get all the corrupt files
        LOG.info("Namenode has bad files. " + numCorrupt);
        assertTrue(numCorrupt == 3);
        // test the paging here
        FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
        // now get the 2nd and 3rd file that is corrupt
        String[] cookie = new String[] { "1" };
        Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/corruptData", cookie);
        FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks.toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
        numCorrupt = nextCorruptFileBlocks.size();
        assertTrue(numCorrupt == 2);
        assertTrue(ncfb[0].block.getBlockName().equalsIgnoreCase(cfb[1].block.getBlockName()));
        corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/corruptData", cookie);
        numCorrupt = corruptFileBlocks.size();
        assertTrue(numCorrupt == 0);
        // Do a listing on a dir which doesn't have any corrupt blocks and
        // validate
        util.createFiles(fs, "/goodData");
        corruptFileBlocks = namenode.getNamesystem().listCorruptFileBlocks("/goodData", null);
        numCorrupt = corruptFileBlocks.size();
        assertTrue(numCorrupt == 0);
        util.cleanup(fs, "/corruptData");
        util.cleanup(fs, "/goodData");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Aggregations

MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)507 Test (org.junit.Test)429 Configuration (org.apache.hadoop.conf.Configuration)403 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)312 Path (org.apache.hadoop.fs.Path)290 FileSystem (org.apache.hadoop.fs.FileSystem)211 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)183 IOException (java.io.IOException)107 File (java.io.File)83 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)64 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)53 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)35 RandomAccessFile (java.io.RandomAccessFile)33 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 URI (java.net.URI)31 ArrayList (java.util.ArrayList)29 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)28 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)26 FsPermission (org.apache.hadoop.fs.permission.FsPermission)25 HttpServerFunctionalTest (org.apache.hadoop.http.HttpServerFunctionalTest)24