Search in sources :

Example 11 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.

the class TestCheckpointsWithSnapshots method testCheckpoint.

/**
   * Regression test for HDFS-5433 - "When reloading fsimage during
   * checkpointing, we should clear existing snapshottable directories"
   */
@Test
public void testCheckpoint() throws IOException {
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).build();
        cluster.waitActive();
        secondary = new SecondaryNameNode(conf);
        SnapshotManager nnSnapshotManager = cluster.getNamesystem().getSnapshotManager();
        SnapshotManager secondarySnapshotManager = secondary.getFSNamesystem().getSnapshotManager();
        FileSystem fs = cluster.getFileSystem();
        HdfsAdmin admin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
        assertEquals(0, nnSnapshotManager.getNumSnapshots());
        assertEquals(0, nnSnapshotManager.getNumSnapshottableDirs());
        assertEquals(0, secondarySnapshotManager.getNumSnapshots());
        assertEquals(0, secondarySnapshotManager.getNumSnapshottableDirs());
        // 1. Create a snapshottable directory foo on the NN.
        fs.mkdirs(TEST_PATH);
        admin.allowSnapshot(TEST_PATH);
        assertEquals(0, nnSnapshotManager.getNumSnapshots());
        assertEquals(1, nnSnapshotManager.getNumSnapshottableDirs());
        // 2. Create a snapshot of the dir foo. This will be referenced both in
        // the SnapshotManager as well as in the file system tree. The snapshot
        // count will go up to 1.
        Path snapshotPath = fs.createSnapshot(TEST_PATH);
        assertEquals(1, nnSnapshotManager.getNumSnapshots());
        assertEquals(1, nnSnapshotManager.getNumSnapshottableDirs());
        // 3. Start up a 2NN and have it do a checkpoint. It will have foo and its
        // snapshot in its list of snapshottable dirs referenced from the
        // SnapshotManager, as well as in the file system tree.
        secondary.doCheckpoint();
        assertEquals(1, secondarySnapshotManager.getNumSnapshots());
        assertEquals(1, secondarySnapshotManager.getNumSnapshottableDirs());
        // 4. Disallow snapshots on and delete foo on the NN. The snapshot count
        // will go down to 0 and the snapshottable dir will be removed from the fs
        // tree.
        fs.deleteSnapshot(TEST_PATH, snapshotPath.getName());
        admin.disallowSnapshot(TEST_PATH);
        assertEquals(0, nnSnapshotManager.getNumSnapshots());
        assertEquals(0, nnSnapshotManager.getNumSnapshottableDirs());
        // 5. Have the NN do a saveNamespace, writing out a new fsimage with
        // snapshot count 0.
        NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
        NameNodeAdapter.saveNamespace(cluster.getNameNode());
        NameNodeAdapter.leaveSafeMode(cluster.getNameNode());
        // 6. Have the still-running 2NN do a checkpoint. It will notice that the
        // fsimage has changed on the NN and redownload/reload from that image.
        // This will replace all INodes in the file system tree as well as reset
        // the snapshot counter to 0 in the SnapshotManager. However, it will not
        // clear the list of snapshottable dirs referenced from the
        // SnapshotManager. When it writes out an fsimage, the 2NN will write out
        // 0 for the snapshot count, but still serialize the snapshottable dir
        // referenced in the SnapshotManager even though it no longer appears in
        // the file system tree. The NN will not be able to start up with this.
        secondary.doCheckpoint();
        assertEquals(0, secondarySnapshotManager.getNumSnapshots());
        assertEquals(0, secondarySnapshotManager.getNumSnapshottableDirs());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        if (secondary != null) {
            secondary.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) SecondaryNameNode(org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 12 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.

the class TestViewFileSystemHdfs method testTrashRootsAfterEncryptionZoneDeletion.

@Test
public void testTrashRootsAfterEncryptionZoneDeletion() throws Exception {
    final Path zone = new Path("/EZ");
    fsTarget.mkdirs(zone);
    final Path zone1 = new Path("/EZ/zone1");
    fsTarget.mkdirs(zone1);
    DFSTestUtil.createKey("test_key", cluster, CONF);
    HdfsAdmin hdfsAdmin = new HdfsAdmin(cluster.getURI(0), CONF);
    final EnumSet<CreateEncryptionZoneFlag> provisionTrash = EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH);
    hdfsAdmin.createEncryptionZone(zone1, "test_key", provisionTrash);
    final Path encFile = new Path(zone1, "encFile");
    DFSTestUtil.createFile(fsTarget, encFile, 10240, (short) 1, 0xFEED);
    Configuration clientConf = new Configuration(CONF);
    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
    clientConf.set("fs.default.name", fsTarget.getUri().toString());
    FsShell shell = new FsShell(clientConf);
    //Verify file deletion within EZ
    DFSTestUtil.verifyDelete(shell, fsTarget, encFile, true);
    Assert.assertTrue("ViewFileSystem trash roots should include EZ file trash", (fsView.getTrashRoots(true).size() == 1));
    //Verify deletion of EZ
    DFSTestUtil.verifyDelete(shell, fsTarget, zone, true);
    Assert.assertTrue("ViewFileSystem trash roots should include EZ zone trash", (fsView.getTrashRoots(true).size() == 2));
}
Also used : Path(org.apache.hadoop.fs.Path) CreateEncryptionZoneFlag(org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag) FsShell(org.apache.hadoop.fs.FsShell) Configuration(org.apache.hadoop.conf.Configuration) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) Test(org.junit.Test)

Example 13 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.

the class TestHdfsAdmin method testHdfsAdminStoragePolicies.

/**
   * Test that we can set, get, unset storage policies via {@link HdfsAdmin}.
   */
@Test
public void testHdfsAdminStoragePolicies() throws Exception {
    HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
    FileSystem fs = FileSystem.get(conf);
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    final Path wow = new Path(bar, "wow");
    DFSTestUtil.createFile(fs, wow, SIZE, REPL, 0);
    final BlockStoragePolicySuite suite = BlockStoragePolicySuite.createDefaultSuite();
    final BlockStoragePolicy warm = suite.getPolicy("WARM");
    final BlockStoragePolicy cold = suite.getPolicy("COLD");
    final BlockStoragePolicy hot = suite.getPolicy("HOT");
    /*
     * test: set storage policy
     */
    hdfsAdmin.setStoragePolicy(foo, warm.getName());
    hdfsAdmin.setStoragePolicy(bar, cold.getName());
    hdfsAdmin.setStoragePolicy(wow, hot.getName());
    /*
     * test: get storage policy after set
     */
    assertEquals(hdfsAdmin.getStoragePolicy(foo), warm);
    assertEquals(hdfsAdmin.getStoragePolicy(bar), cold);
    assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
    /*
     * test: unset storage policy
     */
    hdfsAdmin.unsetStoragePolicy(foo);
    hdfsAdmin.unsetStoragePolicy(bar);
    hdfsAdmin.unsetStoragePolicy(wow);
    /*
     * test: get storage policy after unset. HOT by default.
     */
    assertEquals(hdfsAdmin.getStoragePolicy(foo), hot);
    assertEquals(hdfsAdmin.getStoragePolicy(bar), hot);
    assertEquals(hdfsAdmin.getStoragePolicy(wow), hot);
    /*
     * test: get all storage policies
     */
    // Get policies via HdfsAdmin
    Set<String> policyNamesSet1 = new HashSet<>();
    for (BlockStoragePolicySpi policy : hdfsAdmin.getAllStoragePolicies()) {
        policyNamesSet1.add(policy.getName());
    }
    // Get policies via BlockStoragePolicySuite
    Set<String> policyNamesSet2 = new HashSet<>();
    for (BlockStoragePolicy policy : suite.getAllPolicies()) {
        policyNamesSet2.add(policy.getName());
    }
    // Ensure that we got the same set of policies in both cases.
    Assert.assertTrue(Sets.difference(policyNamesSet1, policyNamesSet2).isEmpty());
    Assert.assertTrue(Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockStoragePolicySuite(org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) FileSystem(org.apache.hadoop.fs.FileSystem) BlockStoragePolicy(org.apache.hadoop.hdfs.protocol.BlockStoragePolicy) BlockStoragePolicySpi(org.apache.hadoop.fs.BlockStoragePolicySpi) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 14 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.

the class TestSecureEncryptionZoneWithKMS method setup.

@Before
public void setup() throws Exception {
    // Start MiniDFS Cluster
    baseConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI());
    baseConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf = new HdfsConfiguration(baseConf);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    fsWrapper = new FileSystemTestWrapper(fs);
    dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
    // Wait cluster to be active
    cluster.waitActive();
    // Create a test key
    DFSTestUtil.createKey(testKey, cluster, conf);
}
Also used : HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) FileSystemTestWrapper(org.apache.hadoop.fs.FileSystemTestWrapper) Before(org.junit.Before)

Example 15 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hbase by apache.

the class TestHBaseWalOnEC method setUpBeforeClass.

@BeforeClass
public static void setUpBeforeClass() throws Exception {
    // Need 3 DNs for RS-3-2 policy
    MiniDFSCluster cluster = UTIL.startMiniDFSCluster(3);
    DistributedFileSystem fs = cluster.getFileSystem();
    DFSTestUtil.enableAllECPolicies(fs);
    HdfsAdmin hdfsAdmin = new HdfsAdmin(fs.getUri(), UTIL.getConfiguration());
    hdfsAdmin.setErasureCodingPolicy(new Path("/"), "RS-3-2-1024k");
    try (FSDataOutputStream out = fs.create(new Path("/canary"))) {
        // If this comes back as having hflush then some test setup assumption is wrong.
        // Fail the test so that a developer has to look and triage
        assertFalse("Did not enable EC!", out.hasCapability(StreamCapabilities.HFLUSH));
    }
    UTIL.getConfiguration().setBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true);
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) BeforeClass(org.junit.BeforeClass)

Aggregations

HdfsAdmin (org.apache.hadoop.hdfs.client.HdfsAdmin)27 Path (org.apache.hadoop.fs.Path)21 Test (org.junit.Test)16 Configuration (org.apache.hadoop.conf.Configuration)8 File (java.io.File)7 Mockito.anyString (org.mockito.Mockito.anyString)7 FileSystemTestHelper (org.apache.hadoop.fs.FileSystemTestHelper)6 FsShell (org.apache.hadoop.fs.FsShell)6 Before (org.junit.Before)6 FileSystem (org.apache.hadoop.fs.FileSystem)5 FileSystemTestWrapper (org.apache.hadoop.fs.FileSystemTestWrapper)5 IOException (java.io.IOException)4 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 AccessControlException (org.apache.hadoop.security.AccessControlException)4 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)4 ExecutionException (java.util.concurrent.ExecutionException)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 EncryptionZoneManager (org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager)3 WebHdfsFileSystem (org.apache.hadoop.hdfs.web.WebHdfsFileSystem)3 BeforeClass (org.junit.BeforeClass)3