Search in sources :

Example 51 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestLeaseManager method testLeaseRestorationOnRestart.

/**
   * Make sure the lease is restored even if only the inode has the record.
   */
@Test
public void testLeaseRestorationOnRestart() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(1).build();
        DistributedFileSystem dfs = cluster.getFileSystem();
        // Create an empty file
        String path = "/testLeaseRestorationOnRestart";
        FSDataOutputStream out = dfs.create(new Path(path));
        // Remove the lease from the lease manager, but leave it in the inode.
        FSDirectory dir = cluster.getNamesystem().getFSDirectory();
        INodeFile file = dir.getINode(path).asFile();
        cluster.getNamesystem().leaseManager.removeLease(file.getFileUnderConstructionFeature().getClientName(), file);
        // Save a fsimage.
        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        cluster.getNameNodeRpc().saveNamespace(0, 0);
        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
        // Restart the namenode.
        cluster.restartNameNode(true);
        // Check whether the lease manager has the lease
        dir = cluster.getNamesystem().getFSDirectory();
        file = dir.getINode(path).asFile();
        assertTrue("Lease should exist.", cluster.getNamesystem().leaseManager.getLease(file) != null);
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 52 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestListCorruptFileBlocks method testMaxCorruptFiles.

/**
   * Test if NN.listCorruptFiles() returns the right number of results.
   * The corrupt blocks are detected by the BlockPoolSliceScanner.
   * Also, test that DFS.listCorruptFileBlocks can make multiple successive
   * calls.
   */
@Test(timeout = 300000)
public void testMaxCorruptFiles() throws Exception {
    MiniDFSCluster cluster = null;
    try {
        Configuration conf = new HdfsConfiguration();
        // datanode sends block reports
        conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000);
        cluster = new MiniDFSCluster.Builder(conf).build();
        FileSystem fs = cluster.getFileSystem();
        final int maxCorruptFileBlocks = FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;
        // create 110 files with one block each
        DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles").setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512).build();
        util.createFiles(fs, "/srcdat2", (short) 1);
        util.waitReplication(fs, "/srcdat2", (short) 1);
        // verify that there are no bad blocks.
        final NameNode namenode = cluster.getNameNode();
        Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
        assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.", badFiles.size() == 0);
        // Now deliberately blocks from all files
        final String bpid = cluster.getNamesystem().getBlockPoolId();
        for (int i = 0; i < 4; i++) {
            for (int j = 0; j <= 1; j++) {
                File storageDir = cluster.getInstanceStorageDir(i, j);
                File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
                LOG.info("Removing files from " + data_dir);
                List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(data_dir);
                if (metadataFiles == null)
                    continue;
                for (File metadataFile : metadataFiles) {
                    File blockFile = Block.metaToBlockFile(metadataFile);
                    assertTrue("Cannot remove file.", blockFile.delete());
                    assertTrue("Cannot remove file.", metadataFile.delete());
                }
            }
        }
        // Run the direcrtoryScanner to update the Datanodes volumeMap
        DataNode dn = cluster.getDataNodes().get(0);
        DataNodeTestUtils.runDirectoryScanner(dn);
        // Occasionally the BlockPoolSliceScanner can run before we have removed
        // the blocks. Restart the Datanode to trigger the scanner into running
        // once more.
        LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner");
        cluster.restartDataNodes();
        cluster.waitActive();
        badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
        while (badFiles.size() < maxCorruptFileBlocks) {
            LOG.info("# of corrupt files is: " + badFiles.size());
            Thread.sleep(10000);
            badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
        }
        badFiles = namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
        LOG.info("Namenode has bad files. " + badFiles.size());
        assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + maxCorruptFileBlocks + ".", badFiles.size() == maxCorruptFileBlocks);
        CorruptFileBlockIterator iter = (CorruptFileBlockIterator) fs.listCorruptFileBlocks(new Path("/srcdat2"));
        int corruptPaths = countPaths(iter);
        assertTrue("Expected more than " + maxCorruptFileBlocks + " corrupt file blocks but got " + corruptPaths, corruptPaths > maxCorruptFileBlocks);
        assertTrue("Iterator should have made more than 1 call but made " + iter.getCallsMade(), iter.getCallsMade() > 1);
        util.cleanup(fs, "/srcdat2");
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DFSTestUtil(org.apache.hadoop.hdfs.DFSTestUtil) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) CorruptFileBlockIterator(org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 53 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestNameEditsConfigs method testNameEditsConfigs.

/**
   * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
   * The test creates files and restarts cluster with different configs.
   * 1. Starts cluster with shared name and edits dirs
   * 2. Restarts cluster by adding additional (different) name and edits dirs
   * 3. Restarts cluster by removing shared name and edits dirs by allowing to 
   *    start using separate name and edits dirs
   * 4. Restart cluster by adding shared directory again, but make sure we 
   *    do not read any stale image or edits. 
   * All along the test, we create and delete files at reach restart to make
   * sure we are reading proper edits and image.
   * @throws Exception 
   */
@Test
public void testNameEditsConfigs() throws Exception {
    Path file1 = new Path("TestNameEditsConfigs1");
    Path file2 = new Path("TestNameEditsConfigs2");
    Path file3 = new Path("TestNameEditsConfigs3");
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary = null;
    Configuration conf = null;
    FileSystem fileSys = null;
    final File newNameDir = new File(base_dir, "name");
    final File newEditsDir = new File(base_dir, "edits");
    final File nameAndEdits = new File(base_dir, "name_and_edits");
    final File checkpointNameDir = new File(base_dir, "secondname");
    final File checkpointEditsDir = new File(base_dir, "secondedits");
    final File checkpointNameAndEdits = new File(base_dir, "second_name_and_edits");
    ImmutableList<File> allCurrentDirs = ImmutableList.of(new File(nameAndEdits, "current"), new File(newNameDir, "current"), new File(newEditsDir, "current"), new File(checkpointNameAndEdits, "current"), new File(checkpointNameDir, "current"), new File(checkpointEditsDir, "current"));
    ImmutableList<File> imageCurrentDirs = ImmutableList.of(new File(nameAndEdits, "current"), new File(newNameDir, "current"), new File(checkpointNameAndEdits, "current"), new File(checkpointNameDir, "current"));
    // Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointNameAndEdits.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    // Manage our own dfs directories
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(!fileSys.exists(file1));
        DFSTestUtil.createFile(fileSys, file1, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file1, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    // Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
    conf = new HdfsConfiguration();
    assertTrue(newNameDir.mkdir());
    assertTrue(newEditsDir.mkdir());
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() + "," + newNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath() + "," + newEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    // Manage our own dfs directories. Do not format.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(fileSys.exists(file1));
        checkFile(fileSys, file1, replication);
        cleanupFile(fileSys, file1);
        DFSTestUtil.createFile(fileSys, file2, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file2, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    FSImageTestUtil.assertParallelFilesAreIdentical(allCurrentDirs, ImmutableSet.of("VERSION"));
    FSImageTestUtil.assertSameNewestImage(imageCurrentDirs);
    // Now remove common directory both have and start namenode with 
    // separate name and edits dirs
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(!fileSys.exists(file1));
        assertTrue(fileSys.exists(file2));
        checkFile(fileSys, file2, replication);
        cleanupFile(fileSys, file2);
        DFSTestUtil.createFile(fileSys, file3, FILE_SIZE, FILE_SIZE, BLOCK_SIZE, replication, SEED);
        checkFile(fileSys, file3, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    // No edit logs in new name dir
    checkImageAndEditsFilesExistence(newNameDir, true, false);
    checkImageAndEditsFilesExistence(newEditsDir, false, true);
    checkImageAndEditsFilesExistence(checkpointNameDir, true, false);
    checkImageAndEditsFilesExistence(checkpointEditsDir, false, true);
    // Add old name_and_edits dir. File system should not read image or edits
    // from old dir
    assertTrue(FileUtil.fullyDelete(new File(nameAndEdits, "current")));
    assertTrue(FileUtil.fullyDelete(new File(checkpointNameAndEdits, "current")));
    conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() + "," + newNameDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits + "," + newEditsDir.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath() + "," + checkpointNameAndEdits.getPath());
    conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath() + "," + checkpointNameAndEdits.getPath());
    replication = (short) conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).manageNameDfsDirs(false).build();
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    fileSys = cluster.getFileSystem();
    try {
        assertTrue(!fileSys.exists(file1));
        assertTrue(!fileSys.exists(file2));
        assertTrue(fileSys.exists(file3));
        checkFile(fileSys, file3, replication);
        secondary.doCheckpoint();
    } finally {
        fileSys.close();
        cluster.shutdown();
        secondary.shutdown();
    }
    checkImageAndEditsFilesExistence(nameAndEdits, true, true);
    checkImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File) Test(org.junit.Test)

Example 54 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestNameNodeHttpServerXFrame method createServerwithXFrame.

private HttpURLConnection createServerwithXFrame(boolean enabled, String value) throws IOException {
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
    conf.setBoolean(DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED, enabled);
    if (value != null) {
        conf.set(DFSConfigKeys.DFS_XFRAME_OPTION_VALUE, value);
    }
    InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
    NameNodeHttpServer server = null;
    server = new NameNodeHttpServer(conf, null, addr);
    server.start();
    URL url = getServerURL(server.getHttpServer());
    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
    conn.connect();
    return conn;
}
Also used : HttpURLConnection(java.net.HttpURLConnection) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) URL(java.net.URL)

Example 55 with HdfsConfiguration

use of org.apache.hadoop.hdfs.HdfsConfiguration in project hadoop by apache.

the class TestNNStorageRetentionFunctional method testPurgingWithNameEditsDirAfterFailure.

/**
  * Test case where two directories are configured as NAME_AND_EDITS
  * and one of them fails to save storage. Since the edits and image
  * failure states are decoupled, the failure of image saving should
  * not prevent the purging of logs from that dir.
  */
@Test
public void testPurgingWithNameEditsDirAfterFailure() throws Exception {
    MiniDFSCluster cluster = null;
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
    File sd0 = new File(TEST_ROOT_DIR, "nn0");
    File sd1 = new File(TEST_ROOT_DIR, "nn1");
    File cd0 = new File(sd0, "current");
    File cd1 = new File(sd1, "current");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, Joiner.on(",").join(sd0, sd1));
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).manageNameDfsDirs(false).format(true).build();
        NameNode nn = cluster.getNameNode();
        doSaveNamespace(nn);
        LOG.info("After first save, images 0 and 2 should exist in both dirs");
        assertGlobEquals(cd0, "fsimage_\\d*", getImageFileName(0), getImageFileName(2));
        assertGlobEquals(cd1, "fsimage_\\d*", getImageFileName(0), getImageFileName(2));
        assertGlobEquals(cd0, "edits_.*", getFinalizedEditsFileName(1, 2), getInProgressEditsFileName(3));
        assertGlobEquals(cd1, "edits_.*", getFinalizedEditsFileName(1, 2), getInProgressEditsFileName(3));
        doSaveNamespace(nn);
        LOG.info("After second save, image 0 should be purged, " + "and image 4 should exist in both.");
        assertGlobEquals(cd0, "fsimage_\\d*", getImageFileName(2), getImageFileName(4));
        assertGlobEquals(cd1, "fsimage_\\d*", getImageFileName(2), getImageFileName(4));
        assertGlobEquals(cd0, "edits_.*", getFinalizedEditsFileName(3, 4), getInProgressEditsFileName(5));
        assertGlobEquals(cd1, "edits_.*", getFinalizedEditsFileName(3, 4), getInProgressEditsFileName(5));
        LOG.info("Failing first storage dir by chmodding it");
        assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "000"));
        doSaveNamespace(nn);
        LOG.info("Restoring accessibility of first storage dir");
        assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "755"));
        LOG.info("nothing should have been purged in first storage dir");
        assertGlobEquals(cd0, "fsimage_\\d*", getImageFileName(2), getImageFileName(4));
        assertGlobEquals(cd0, "edits_.*", getFinalizedEditsFileName(3, 4), getInProgressEditsFileName(5));
        LOG.info("fsimage_2 should be purged in second storage dir");
        assertGlobEquals(cd1, "fsimage_\\d*", getImageFileName(4), getImageFileName(6));
        assertGlobEquals(cd1, "edits_.*", getFinalizedEditsFileName(5, 6), getInProgressEditsFileName(7));
        LOG.info("On next save, we should purge logs from the failed dir," + " but not images, since the image directory is in failed state.");
        doSaveNamespace(nn);
        assertGlobEquals(cd1, "fsimage_\\d*", getImageFileName(6), getImageFileName(8));
        assertGlobEquals(cd1, "edits_.*", getFinalizedEditsFileName(7, 8), getInProgressEditsFileName(9));
        assertGlobEquals(cd0, "fsimage_\\d*", getImageFileName(2), getImageFileName(4));
        assertGlobEquals(cd0, "edits_.*", getInProgressEditsFileName(9));
    } finally {
        FileUtil.chmod(cd0.getAbsolutePath(), "755");
        LOG.info("Shutting down...");
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File) Test(org.junit.Test)

Aggregations

HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)454 Configuration (org.apache.hadoop.conf.Configuration)311 Test (org.junit.Test)311 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)267 Path (org.apache.hadoop.fs.Path)152 FileSystem (org.apache.hadoop.fs.FileSystem)94 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)92 File (java.io.File)72 IOException (java.io.IOException)69 Before (org.junit.Before)56 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)40 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)35 MetricsRecordBuilder (org.apache.hadoop.metrics2.MetricsRecordBuilder)33 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)30 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)27 RandomAccessFile (java.io.RandomAccessFile)22 ArrayList (java.util.ArrayList)20 NameNodeFile (org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile)20 URI (java.net.URI)19 FsPermission (org.apache.hadoop.fs.permission.FsPermission)19