Search in sources :

Example 16 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.

the class TestRpcProgramNfs3 method setup.

@BeforeClass
public static void setup() throws Exception {
    String currentUser = System.getProperty("user.name");
    config.set("fs.permissions.umask-mode", "u=rwx,g=,o=");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(currentUser), "*");
    config.set(DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(currentUser), "*");
    fsHelper = new FileSystemTestHelper();
    // Set up java key store
    String testRoot = fsHelper.getTestRootDir();
    testRootDir = new File(testRoot).getAbsoluteFile();
    final Path jksPath = new Path(testRootDir.toString(), "test.jks");
    config.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();
    hdfs = cluster.getFileSystem();
    nn = cluster.getNameNode();
    dfsAdmin = new HdfsAdmin(cluster.getURI(), config);
    // Use ephemeral ports in case tests are running in parallel
    config.setInt("nfs3.mountd.port", 0);
    config.setInt("nfs3.server.port", 0);
    // Start NFS with allowed.hosts set to "* rw"
    config.set("dfs.nfs.exports.allowed.hosts", "* rw");
    nfs = new Nfs3(config);
    nfs.startServiceInternal(false);
    nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
    hdfs.getClient().setKeyProvider(nn.getNamesystem().getProvider());
    DFSTestUtil.createKey(TEST_KEY, cluster, config);
    // Mock SecurityHandler which returns system user.name
    securityHandler = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandler.getUser()).thenReturn(currentUser);
    // Mock SecurityHandler which returns a dummy username "harry"
    securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class);
    Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry");
}
Also used : FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) Path(org.apache.hadoop.fs.Path) SecurityHandler(org.apache.hadoop.oncrpc.security.SecurityHandler) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Example 17 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.

the class TestEncryptionZones method testVersionAndSuiteNegotiation.

@Test
public void testVersionAndSuiteNegotiation() throws Exception {
    final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
    final Path zone = new Path("/zone");
    fs.mkdirs(zone);
    dfsAdmin.createEncryptionZone(zone, TEST_KEY, NO_TRASH);
    // Create a file in an EZ, which should succeed
    DFSTestUtil.createFile(fs, new Path(zone, "success1"), 0, (short) 1, 0xFEED);
    // Pass no supported versions, fail
    DFSOutputStream.SUPPORTED_CRYPTO_VERSIONS = new CryptoProtocolVersion[] {};
    try {
        DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, 0xFEED);
        fail("Created a file without specifying a crypto protocol version");
    } catch (UnknownCryptoProtocolVersionException e) {
        assertExceptionContains("No crypto protocol versions", e);
    }
    // Pass some unknown versions, fail
    DFSOutputStream.SUPPORTED_CRYPTO_VERSIONS = new CryptoProtocolVersion[] { CryptoProtocolVersion.UNKNOWN, CryptoProtocolVersion.UNKNOWN };
    try {
        DFSTestUtil.createFile(fs, new Path(zone, "fail"), 0, (short) 1, 0xFEED);
        fail("Created a file without specifying a known crypto protocol version");
    } catch (UnknownCryptoProtocolVersionException e) {
        assertExceptionContains("No crypto protocol versions", e);
    }
    // Pass some unknown and a good cipherSuites, success
    DFSOutputStream.SUPPORTED_CRYPTO_VERSIONS = new CryptoProtocolVersion[] { CryptoProtocolVersion.UNKNOWN, CryptoProtocolVersion.UNKNOWN, CryptoProtocolVersion.ENCRYPTION_ZONES };
    DFSTestUtil.createFile(fs, new Path(zone, "success2"), 0, (short) 1, 0xFEED);
    DFSOutputStream.SUPPORTED_CRYPTO_VERSIONS = new CryptoProtocolVersion[] { CryptoProtocolVersion.ENCRYPTION_ZONES, CryptoProtocolVersion.UNKNOWN, CryptoProtocolVersion.UNKNOWN };
    DFSTestUtil.createFile(fs, new Path(zone, "success3"), 4096, (short) 1, 0xFEED);
    // Check KeyProvider state
    // Flushing the KP on the NN, since it caches, and init a test one
    cluster.getNamesystem().getProvider().flush();
    KeyProvider provider = KeyProviderFactory.get(new URI(conf.getTrimmed(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH)), conf);
    List<String> keys = provider.getKeys();
    assertEquals("Expected NN to have created one key per zone", 1, keys.size());
    List<KeyProvider.KeyVersion> allVersions = Lists.newArrayList();
    for (String key : keys) {
        List<KeyProvider.KeyVersion> versions = provider.getKeyVersions(key);
        assertEquals("Should only have one key version per key", 1, versions.size());
        allVersions.addAll(versions);
    }
    // Check that the specified CipherSuite was correctly saved on the NN
    for (int i = 2; i <= 3; i++) {
        FileEncryptionInfo feInfo = getFileEncryptionInfo(new Path(zone.toString() + "/success" + i));
        assertEquals(feInfo.getCipherSuite(), CipherSuite.AES_CTR_NOPADDING);
    }
    DFSClient old = fs.dfs;
    try {
        testCipherSuiteNegotiation(fs, conf);
    } finally {
        fs.dfs = old;
    }
}
Also used : Path(org.apache.hadoop.fs.Path) KeyProvider(org.apache.hadoop.crypto.key.KeyProvider) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) Mockito.anyString(org.mockito.Mockito.anyString) FileEncryptionInfo(org.apache.hadoop.fs.FileEncryptionInfo) URI(java.net.URI) Test(org.junit.Test)

Example 18 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.

the class TestEncryptionZones method testGetTrashRoots.

@Test
public void testGetTrashRoots() throws Exception {
    final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
    Path ezRoot1 = new Path("/ez1");
    fs.mkdirs(ezRoot1);
    dfsAdmin.createEncryptionZone(ezRoot1, TEST_KEY, NO_TRASH);
    Path ezRoot2 = new Path("/ez2");
    fs.mkdirs(ezRoot2);
    dfsAdmin.createEncryptionZone(ezRoot2, TEST_KEY, NO_TRASH);
    Path ezRoot3 = new Path("/ez3");
    fs.mkdirs(ezRoot3);
    dfsAdmin.createEncryptionZone(ezRoot3, TEST_KEY, NO_TRASH);
    Collection<FileStatus> trashRootsBegin = fs.getTrashRoots(true);
    assertEquals("Unexpected getTrashRoots result", 0, trashRootsBegin.size());
    final Path encFile = new Path(ezRoot2, "encFile");
    final int len = 8192;
    DFSTestUtil.createFile(fs, encFile, len, (short) 1, 0xFEED);
    Configuration clientConf = new Configuration(conf);
    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
    FsShell shell = new FsShell(clientConf);
    verifyShellDeleteWithTrash(shell, encFile);
    Collection<FileStatus> trashRootsDelete1 = fs.getTrashRoots(true);
    assertEquals("Unexpected getTrashRoots result", 1, trashRootsDelete1.size());
    final Path nonEncFile = new Path("/nonEncFile");
    DFSTestUtil.createFile(fs, nonEncFile, len, (short) 1, 0xFEED);
    verifyShellDeleteWithTrash(shell, nonEncFile);
    Collection<FileStatus> trashRootsDelete2 = fs.getTrashRoots(true);
    assertEquals("Unexpected getTrashRoots result", 2, trashRootsDelete2.size());
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) Test(org.junit.Test)

Example 19 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.

the class TestEncryptionZones method testEncryptionZoneWithTrash.

@Test
public void testEncryptionZoneWithTrash() throws Exception {
    // Create the encryption zone1
    final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
    final Path zone1 = new Path("/zone1");
    fs.mkdirs(zone1);
    dfsAdmin.createEncryptionZone(zone1, TEST_KEY, NO_TRASH);
    // Create the encrypted file in zone1
    final Path encFile1 = new Path(zone1, "encFile1");
    final int len = 8192;
    DFSTestUtil.createFile(fs, encFile1, len, (short) 1, 0xFEED);
    Configuration clientConf = new Configuration(conf);
    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
    FsShell shell = new FsShell(clientConf);
    // Delete encrypted file from the shell with trash enabled
    // Verify the file is moved to appropriate trash within the zone
    verifyShellDeleteWithTrash(shell, encFile1);
    // Delete encryption zone from the shell with trash enabled
    // Verify the zone is moved to appropriate trash location in user's home dir
    verifyShellDeleteWithTrash(shell, zone1);
    final Path topEZ = new Path("/topEZ");
    fs.mkdirs(topEZ);
    dfsAdmin.createEncryptionZone(topEZ, TEST_KEY, NO_TRASH);
    final String NESTED_EZ_TEST_KEY = "nested_ez_test_key";
    DFSTestUtil.createKey(NESTED_EZ_TEST_KEY, cluster, conf);
    final Path nestedEZ = new Path(topEZ, "nestedEZ");
    fs.mkdirs(nestedEZ);
    dfsAdmin.createEncryptionZone(nestedEZ, NESTED_EZ_TEST_KEY, NO_TRASH);
    final Path topEZFile = new Path(topEZ, "file");
    final Path nestedEZFile = new Path(nestedEZ, "file");
    DFSTestUtil.createFile(fs, topEZFile, len, (short) 1, 0xFEED);
    DFSTestUtil.createFile(fs, nestedEZFile, len, (short) 1, 0xFEED);
    verifyShellDeleteWithTrash(shell, topEZFile);
    verifyShellDeleteWithTrash(shell, nestedEZFile);
    //Test nested EZ with webHDFS
    final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
    final String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
    final Path expectedTopTrash = new Path(topEZ, new Path(FileSystem.TRASH_PREFIX, currentUser));
    final Path expectedNestedTrash = new Path(nestedEZ, new Path(FileSystem.TRASH_PREFIX, currentUser));
    final Path topTrash = webFS.getTrashRoot(topEZFile);
    final Path nestedTrash = webFS.getTrashRoot(nestedEZFile);
    assertEquals(expectedTopTrash.toUri().getPath(), topTrash.toUri().getPath());
    assertEquals(expectedNestedTrash.toUri().getPath(), nestedTrash.toUri().getPath());
    verifyShellDeleteWithTrash(shell, nestedEZ);
    verifyShellDeleteWithTrash(shell, topEZ);
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) Configuration(org.apache.hadoop.conf.Configuration) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) Mockito.anyString(org.mockito.Mockito.anyString) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Test(org.junit.Test)

Example 20 with HdfsAdmin

use of org.apache.hadoop.hdfs.client.HdfsAdmin in project hadoop by apache.

the class TestErasureCodingPolicies method testPermissions.

@Test
public void testPermissions() throws Exception {
    UserGroupInformation user = UserGroupInformation.createUserForTesting("ecuser", new String[] { "ecgroup" });
    FileSystem userfs = user.doAs(new PrivilegedExceptionAction<FileSystem>() {

        @Override
        public FileSystem run() throws Exception {
            return FileSystem.get(conf);
        }
    });
    HdfsAdmin useradmin = user.doAs(new PrivilegedExceptionAction<HdfsAdmin>() {

        @Override
        public HdfsAdmin run() throws Exception {
            return new HdfsAdmin(userfs.getUri(), conf);
        }
    });
    // Create dir and set an EC policy, create an EC file
    Path ecdir = new Path("/ecdir");
    Path ecfile = new Path(ecdir, "ecfile");
    fs.setPermission(new Path("/"), new FsPermission((short) 0777));
    userfs.mkdirs(ecdir);
    final String ecPolicyName = ErasureCodingPolicyManager.getSystemPolicies()[0].getName();
    useradmin.setErasureCodingPolicy(ecdir, ecPolicyName);
    assertEquals("Policy not present on dir", ecPolicyName, useradmin.getErasureCodingPolicy(ecdir).getName());
    userfs.create(ecfile).close();
    assertEquals("Policy not present on file", ecPolicyName, useradmin.getErasureCodingPolicy(ecfile).getName());
    // Unset and re-set
    useradmin.unsetErasureCodingPolicy(ecdir);
    useradmin.setErasureCodingPolicy(ecdir, ecPolicyName);
    // Change write permissions and make sure set and unset are denied
    userfs.setPermission(ecdir, new FsPermission((short) 0555));
    try {
        useradmin.setErasureCodingPolicy(ecdir, ecPolicyName);
        fail("Should not be able to setECPolicy without write permissions");
    } catch (AccessControlException e) {
    // pass
    }
    try {
        useradmin.unsetErasureCodingPolicy(ecdir);
        fail("Should not be able to unsetECPolicy without write permissions");
    } catch (AccessControlException e) {
    // pass
    }
    // Change the permissions again, check that set and unset work
    userfs.setPermission(ecdir, new FsPermission((short) 0640));
    useradmin.unsetErasureCodingPolicy(ecdir);
    useradmin.setErasureCodingPolicy(ecdir, ecPolicyName);
    // Set, unset, and get with another user should be unauthorized
    UserGroupInformation nobody = UserGroupInformation.createUserForTesting("nobody", new String[] { "nogroup" });
    HdfsAdmin noadmin = nobody.doAs(new PrivilegedExceptionAction<HdfsAdmin>() {

        @Override
        public HdfsAdmin run() throws Exception {
            return new HdfsAdmin(userfs.getUri(), conf);
        }
    });
    try {
        noadmin.setErasureCodingPolicy(ecdir, ecPolicyName);
        fail("Should not be able to setECPolicy without write permissions");
    } catch (AccessControlException e) {
    // pass
    }
    try {
        noadmin.unsetErasureCodingPolicy(ecdir);
        fail("Should not be able to unsetECPolicy without write permissions");
    } catch (AccessControlException e) {
    // pass
    }
    try {
        noadmin.getErasureCodingPolicy(ecdir);
        fail("Should not be able to getECPolicy without write permissions");
    } catch (AccessControlException e) {
    // pass
    }
    // superuser can do whatever it wants
    userfs.setPermission(ecdir, new FsPermission((short) 0000));
    HdfsAdmin superadmin = new HdfsAdmin(fs.getUri(), conf);
    superadmin.unsetErasureCodingPolicy(ecdir);
    superadmin.setErasureCodingPolicy(ecdir, ecPolicyName);
    superadmin.getErasureCodingPolicy(ecdir);
    // Normal user no longer has access
    try {
        useradmin.getErasureCodingPolicy(ecdir);
        fail("Normal user should not have access");
    } catch (AccessControlException e) {
    // pass
    }
    try {
        useradmin.setErasureCodingPolicy(ecfile, ecPolicyName);
        fail("Normal user should not have access");
    } catch (AccessControlException e) {
    // pass
    }
    try {
        useradmin.unsetErasureCodingPolicy(ecfile);
        fail("Normal user should not have access");
    } catch (AccessControlException e) {
    // pass
    }
    // Everyone has access to getting the list of EC policies
    useradmin.getErasureCodingPolicies();
    noadmin.getErasureCodingPolicies();
    superadmin.getErasureCodingPolicies();
}
Also used : Path(org.apache.hadoop.fs.Path) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) FileSystem(org.apache.hadoop.fs.FileSystem) AccessControlException(org.apache.hadoop.security.AccessControlException) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) FileNotFoundException(java.io.FileNotFoundException) AccessControlException(org.apache.hadoop.security.AccessControlException) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Aggregations

HdfsAdmin (org.apache.hadoop.hdfs.client.HdfsAdmin)27 Path (org.apache.hadoop.fs.Path)21 Test (org.junit.Test)16 Configuration (org.apache.hadoop.conf.Configuration)8 File (java.io.File)7 Mockito.anyString (org.mockito.Mockito.anyString)7 FileSystemTestHelper (org.apache.hadoop.fs.FileSystemTestHelper)6 FsShell (org.apache.hadoop.fs.FsShell)6 Before (org.junit.Before)6 FileSystem (org.apache.hadoop.fs.FileSystem)5 FileSystemTestWrapper (org.apache.hadoop.fs.FileSystemTestWrapper)5 IOException (java.io.IOException)4 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)4 AccessControlException (org.apache.hadoop.security.AccessControlException)4 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)4 ExecutionException (java.util.concurrent.ExecutionException)3 FsPermission (org.apache.hadoop.fs.permission.FsPermission)3 EncryptionZoneManager (org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager)3 WebHdfsFileSystem (org.apache.hadoop.hdfs.web.WebHdfsFileSystem)3 BeforeClass (org.junit.BeforeClass)3