Search in sources :

Example 26 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestEncryptionZones method testTrashStickyBit.

/**
   * Make sure hdfs crypto -createZone command creates a trash directory
   * with sticky bits.
   * @throws Exception
   */
@Test
public void testTrashStickyBit() throws Exception {
    // create an EZ /zones/zone1, make it world writable.
    final Path zoneParent = new Path("/zones");
    final Path zone1 = new Path(zoneParent, "zone1");
    CryptoAdmin cryptoAdmin = new CryptoAdmin(conf);
    fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
    fsWrapper.setPermission(zone1, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    String[] cryptoArgv = new String[] { "-createZone", "-keyName", TEST_KEY, "-path", zone1.toUri().getPath() };
    cryptoAdmin.run(cryptoArgv);
    // create a file in EZ
    final Path ezfile1 = new Path(zone1, "file1");
    // Create the encrypted file in zone1
    final int len = 8192;
    DFSTestUtil.createFile(fs, ezfile1, len, (short) 1, 0xFEED);
    // enable trash, delete /zones/zone1/file1,
    // which moves the file to
    // /zones/zone1/.Trash/$SUPERUSER/Current/zones/zone1/file1
    Configuration clientConf = new Configuration(conf);
    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
    final FsShell shell = new FsShell(clientConf);
    String[] argv = new String[] { "-rm", ezfile1.toString() };
    int res = ToolRunner.run(shell, argv);
    assertEquals("Can't remove a file in EZ as superuser", 0, res);
    final Path trashDir = new Path(zone1, FileSystem.TRASH_PREFIX);
    assertTrue(fsWrapper.exists(trashDir));
    FileStatus trashFileStatus = fsWrapper.getFileStatus(trashDir);
    assertTrue(trashFileStatus.getPermission().getStickyBit());
    // create a non-privileged user
    final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            final Path ezfile2 = new Path(zone1, "file2");
            final int len = 8192;
            // create a file /zones/zone1/file2 in EZ
            // this file is owned by user:mygroup
            FileSystem fs2 = FileSystem.get(cluster.getConfiguration(0));
            DFSTestUtil.createFile(fs2, ezfile2, len, (short) 1, 0xFEED);
            // delete /zones/zone1/file2,
            // which moves the file to
            // /zones/zone1/.Trash/user/Current/zones/zone1/file2
            String[] argv = new String[] { "-rm", ezfile2.toString() };
            int res = ToolRunner.run(shell, argv);
            assertEquals("Can't remove a file in EZ as user:mygroup", 0, res);
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) CryptoAdmin(org.apache.hadoop.hdfs.tools.CryptoAdmin) Mockito.anyString(org.mockito.Mockito.anyString) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) AccessControlException(org.apache.hadoop.security.AccessControlException) FsShell(org.apache.hadoop.fs.FsShell) FileSystem(org.apache.hadoop.fs.FileSystem) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Matchers.anyObject(org.mockito.Matchers.anyObject) FsPermission(org.apache.hadoop.fs.permission.FsPermission) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 27 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestTrashWithEncryptionZones method testDeleteEZWithMultipleUsers.

@Test
public void testDeleteEZWithMultipleUsers() throws Exception {
    final Path zone = new Path("/zones");
    fs.mkdirs(zone);
    final Path zone1 = new Path("/zones/zone" + zoneCounter.getAndIncrement());
    fs.mkdirs(zone1);
    dfsAdmin.createEncryptionZone(zone1, TEST_KEY, NO_TRASH);
    fsWrapper.setPermission(zone1, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    final Path encFile1 = new Path(zone1, "encFile" + fileCounter.getAndIncrement());
    DFSTestUtil.createFile(fs, encFile1, LEN, (short) 1, 0xFEED);
    // create a non-privileged user
    final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
    final Path encFile2 = new Path(zone1, "encFile" + fileCounter.getAndIncrement());
    user.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws Exception {
            // create a file /zones/zone1/encFile2 in EZ
            // this file is owned by user:mygroup
            FileSystem fs2 = FileSystem.get(cluster.getConfiguration(0));
            DFSTestUtil.createFile(fs2, encFile2, LEN, (short) 1, 0xFEED);
            // Delete /zones/zone1/encFile2, which moves the file to
            // /zones/zone1/.Trash/user/Current/zones/zone1/encFile2
            DFSTestUtil.verifyDelete(shell, fs, encFile2, true);
            // Delete /zones/zone1 should not succeed as current user is not admin
            String[] argv = new String[] { "-rm", "-r", zone1.toString() };
            int res = ToolRunner.run(shell, argv);
            assertEquals("Non-admin could delete an encryption zone with multiple" + " users : " + zone1, 1, res);
            return null;
        }
    });
    shell = new FsShell(clientConf);
    DFSTestUtil.verifyDelete(shell, fs, zone1, true);
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) FileSystem(org.apache.hadoop.fs.FileSystem) FsPermission(org.apache.hadoop.fs.permission.FsPermission) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 28 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestTrashWithEncryptionZones method setup.

@Before
public void setup() throws Exception {
    conf = new HdfsConfiguration();
    fsHelper = new FileSystemTestHelper();
    // Set up java key store
    String testRoot = fsHelper.getTestRootDir();
    testRootDir = new File(testRoot).getAbsoluteFile();
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI());
    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    // Lower the batch size for testing
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    org.apache.log4j.Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE);
    fs = cluster.getFileSystem();
    fsWrapper = new FileSystemTestWrapper(fs);
    dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
    setProvider();
    // Create a test key
    DFSTestUtil.createKey(TEST_KEY, cluster, conf);
    clientConf = new Configuration(conf);
    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
    shell = new FsShell(clientConf);
}
Also used : FileSystemTestHelper(org.apache.hadoop.fs.FileSystemTestHelper) FsShell(org.apache.hadoop.fs.FsShell) Configuration(org.apache.hadoop.conf.Configuration) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) FileSystemTestWrapper(org.apache.hadoop.fs.FileSystemTestWrapper) File(java.io.File) EncryptionZoneManager(org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager) Before(org.junit.Before)

Example 29 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestTrashWithSecureEncryptionZones method init.

@BeforeClass
public static void init() throws Exception {
    baseDir = getTestDir();
    FileUtil.fullyDelete(baseDir);
    assertTrue(baseDir.mkdirs());
    Properties kdcConf = MiniKdc.createConf();
    kdc = new MiniKdc(kdcConf, baseDir);
    kdc.start();
    baseConf = new HdfsConfiguration();
    SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, baseConf);
    UserGroupInformation.setConfiguration(baseConf);
    assertTrue("Expected configuration to enable security", UserGroupInformation.isSecurityEnabled());
    File keytabFile = new File(baseDir, "test.keytab");
    keytab = keytabFile.getAbsolutePath();
    // Windows will not reverse name lookup "127.0.0.1" to "localhost".
    String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost";
    kdc.createPrincipal(keytabFile, HDFS_USER_NAME + "/" + krbInstance, SPNEGO_USER_NAME + "/" + krbInstance, OOZIE_USER_NAME + "/" + krbInstance, OOZIE_PROXIED_USER_NAME + "/" + krbInstance);
    hdfsPrincipal = HDFS_USER_NAME + "/" + krbInstance + "@" + kdc.getRealm();
    spnegoPrincipal = SPNEGO_USER_NAME + "/" + krbInstance + "@" + kdc.getRealm();
    baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
    baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
    baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
    baseConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
    baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
    baseConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
    baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
    baseConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
    baseConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
    baseConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
    baseConf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
    baseConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);
    // Set a small (2=4*0.5) KMSClient EDEK cache size to trigger
    // on demand refill upon the 3rd file creation
    baseConf.set(KMS_CLIENT_ENC_KEY_CACHE_SIZE, "4");
    baseConf.set(KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK, "0.5");
    String keystoresDir = baseDir.getAbsolutePath();
    String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSecureEncryptionZoneWithKMS.class);
    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
    baseConf.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getClientSSLConfigFileName());
    baseConf.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, KeyStoreTestUtil.getServerSSLConfigFileName());
    File kmsFile = new File(baseDir, "kms-site.xml");
    if (kmsFile.exists()) {
        FileUtil.fullyDelete(kmsFile);
    }
    Configuration kmsConf = new Configuration(true);
    kmsConf.set(KMSConfiguration.KEY_PROVIDER_URI, "jceks://file@" + new Path(baseDir.toString(), "kms.keystore").toUri());
    kmsConf.set("hadoop.kms.authentication.type", "kerberos");
    kmsConf.set("hadoop.kms.authentication.kerberos.keytab", keytab);
    kmsConf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
    kmsConf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
    kmsConf.set("hadoop.kms.acl.GENERATE_EEK", "hdfs");
    Writer writer = new FileWriter(kmsFile);
    kmsConf.writeXml(writer);
    writer.close();
    // Start MiniKMS
    MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
    miniKMS = miniKMSBuilder.setKmsConfDir(baseDir).build();
    miniKMS.start();
    baseConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, getKeyProviderURI());
    baseConf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
    conf = new HdfsConfiguration(baseConf);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
    // Wait cluster to be active
    cluster.waitActive();
    // Create a test key
    DFSTestUtil.createKey(TEST_KEY, cluster, conf);
    clientConf = new Configuration(conf);
    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
    shell = new FsShell(clientConf);
    System.setProperty("user.name", HDFS_USER_NAME);
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) KMSConfiguration(org.apache.hadoop.crypto.key.kms.server.KMSConfiguration) FileWriter(java.io.FileWriter) Properties(java.util.Properties) FsShell(org.apache.hadoop.fs.FsShell) MiniKMS(org.apache.hadoop.crypto.key.kms.server.MiniKMS) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) MiniKdc(org.apache.hadoop.minikdc.MiniKdc) File(java.io.File) FileWriter(java.io.FileWriter) Writer(java.io.Writer) BeforeClass(org.junit.BeforeClass)

Example 30 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestSnapshotFileLength method testSnapshotFileLengthWithCatCommand.

/**
   * Adding as part of jira HDFS-5343
   * Test for checking the cat command on snapshot path it
   *  cannot read a file beyond snapshot file length
   * @throws Exception
   */
@Test(timeout = 600000)
public void testSnapshotFileLengthWithCatCommand() throws Exception {
    FSDataInputStream fis = null;
    FileStatus fileStatus = null;
    int bytesRead;
    byte[] buffer = new byte[BLOCKSIZE * 8];
    hdfs.mkdirs(sub);
    Path file1 = new Path(sub, file1Name);
    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
    hdfs.allowSnapshot(sub);
    hdfs.createSnapshot(sub, snapshot1);
    DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);
    // Make sure we can read the entire file via its non-snapshot path.
    fileStatus = hdfs.getFileStatus(file1);
    assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
    fis = hdfs.open(file1);
    bytesRead = fis.read(buffer, 0, buffer.length);
    assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
    fis.close();
    Path file1snap1 = SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
    fis = hdfs.open(file1snap1);
    fileStatus = hdfs.getFileStatus(file1snap1);
    assertEquals(fileStatus.getLen(), BLOCKSIZE);
    // Make sure we can only read up to the snapshot length.
    bytesRead = fis.read(buffer, 0, buffer.length);
    assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
    fis.close();
    PrintStream outBackup = System.out;
    PrintStream errBackup = System.err;
    ByteArrayOutputStream bao = new ByteArrayOutputStream();
    System.setOut(new PrintStream(bao));
    System.setErr(new PrintStream(bao));
    // Make sure we can cat the file upto to snapshot length
    FsShell shell = new FsShell();
    try {
        ToolRunner.run(conf, shell, new String[] { "-cat", "/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
        assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
    } finally {
        System.setOut(outBackup);
        System.setErr(errBackup);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) PrintStream(java.io.PrintStream) FileStatus(org.apache.hadoop.fs.FileStatus) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) ByteArrayOutputStream(java.io.ByteArrayOutputStream) Test(org.junit.Test)

Aggregations

FsShell (org.apache.hadoop.fs.FsShell)37 Path (org.apache.hadoop.fs.Path)27 Test (org.junit.Test)26 Configuration (org.apache.hadoop.conf.Configuration)18 FileSystem (org.apache.hadoop.fs.FileSystem)10 FileStatus (org.apache.hadoop.fs.FileStatus)9 HdfsAdmin (org.apache.hadoop.hdfs.client.HdfsAdmin)6 IOException (java.io.IOException)5 FsPermission (org.apache.hadoop.fs.permission.FsPermission)4 Mockito.anyString (org.mockito.Mockito.anyString)4 ByteArrayOutputStream (java.io.ByteArrayOutputStream)3 PrintStream (java.io.PrintStream)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 SnapshotDiffReport (org.apache.hadoop.hdfs.protocol.SnapshotDiffReport)3 WebHdfsFileSystem (org.apache.hadoop.hdfs.web.WebHdfsFileSystem)3 File (java.io.File)2 FileNotFoundException (java.io.FileNotFoundException)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2