Search in sources :

Example 6 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestAclCommands method testLsNoRpcForGetAclStatus.

@Test
public void testLsNoRpcForGetAclStatus() throws Exception {
    Configuration conf = new Configuration();
    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
    conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
    conf.setBoolean("stubfs.noRpcForGetAclStatus", true);
    assertEquals("ls must succeed even if getAclStatus RPC does not exist.", 0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
Also used : FsShell(org.apache.hadoop.fs.FsShell) Configuration(org.apache.hadoop.conf.Configuration) Test(org.junit.Test)

Example 7 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestEncryptionZones method testRootDirEZTrash.

@Test
public void testRootDirEZTrash() throws Exception {
    final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
    final String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
    final Path rootDir = new Path("/");
    dfsAdmin.createEncryptionZone(rootDir, TEST_KEY, NO_TRASH);
    final Path encFile = new Path("/encFile");
    final int len = 8192;
    DFSTestUtil.createFile(fs, encFile, len, (short) 1, 0xFEED);
    Configuration clientConf = new Configuration(conf);
    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
    FsShell shell = new FsShell(clientConf);
    verifyShellDeleteWithTrash(shell, encFile);
    // Trash path should be consistent
    // if root path is an encryption zone
    Path encFileCurrentTrash = shell.getCurrentTrashDir(encFile);
    Path rootDirCurrentTrash = shell.getCurrentTrashDir(rootDir);
    assertEquals("Root trash should be equal with ezFile trash", encFileCurrentTrash, rootDirCurrentTrash);
    // Use webHDFS client to test trash root path
    final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
    final Path expectedTrash = new Path(rootDir, new Path(FileSystem.TRASH_PREFIX, currentUser));
    Path webHDFSTrash = webFS.getTrashRoot(encFile);
    assertEquals(expectedTrash.toUri().getPath(), webHDFSTrash.toUri().getPath());
    assertEquals(encFileCurrentTrash.getParent().toUri().getPath(), webHDFSTrash.toUri().getPath());
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) Configuration(org.apache.hadoop.conf.Configuration) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) Mockito.anyString(org.mockito.Mockito.anyString) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Test(org.junit.Test)

Example 8 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestEncryptionZones method testProvisionTrash.

/**
   * Make sure hdfs crypto -provisionTrash command creates a trash directory
   * with sticky bits.
   * @throws Exception
   */
@Test
public void testProvisionTrash() throws Exception {
    // create an EZ /zones/zone1
    final Path zoneParent = new Path("/zones");
    final Path zone1 = new Path(zoneParent, "zone1");
    CryptoAdmin cryptoAdmin = new CryptoAdmin(conf);
    fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
    String[] cryptoArgv = new String[] { "-createZone", "-keyName", TEST_KEY, "-path", zone1.toUri().getPath() };
    cryptoAdmin.run(cryptoArgv);
    // remove the trash directory
    Configuration clientConf = new Configuration(conf);
    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
    final FsShell shell = new FsShell(clientConf);
    final Path trashDir = new Path(zone1, FileSystem.TRASH_PREFIX);
    String[] argv = new String[] { "-rmdir", trashDir.toUri().getPath() };
    int res = ToolRunner.run(shell, argv);
    assertEquals("Unable to delete trash directory.", 0, res);
    assertFalse(fsWrapper.exists(trashDir));
    // execute -provisionTrash command option and make sure the trash
    // directory has sticky bit.
    String[] provisionTrashArgv = new String[] { "-provisionTrash", "-path", zone1.toUri().getPath() };
    cryptoAdmin.run(provisionTrashArgv);
    assertTrue(fsWrapper.exists(trashDir));
    FileStatus trashFileStatus = fsWrapper.getFileStatus(trashDir);
    assertTrue(trashFileStatus.getPermission().getStickyBit());
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) FileStatus(org.apache.hadoop.fs.FileStatus) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) Configuration(org.apache.hadoop.conf.Configuration) CryptoAdmin(org.apache.hadoop.hdfs.tools.CryptoAdmin) Mockito.anyString(org.mockito.Mockito.anyString) Test(org.junit.Test)

Example 9 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestErasureCodingPolicyWithSnapshot method testCopySnapshotWillNotPreserveErasureCodingPolicy.

/**
   * Test copy a snapshot will not preserve its erasure coding policy info.
   */
@Test(timeout = 120000)
public void testCopySnapshotWillNotPreserveErasureCodingPolicy() throws Exception {
    final int len = 1024;
    final Path ecDir = new Path("/ecdir");
    final Path ecFile = new Path(ecDir, "ecFile");
    fs.mkdirs(ecDir);
    fs.allowSnapshot(ecDir);
    // set erasure coding policy
    fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
    DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
    final Path snap1 = fs.createSnapshot(ecDir, "snap1");
    Path snap1Copy = new Path(ecDir.toString() + "-copy");
    final Path snap1CopyECDir = new Path("/ecdir-copy");
    String[] argv = new String[] { "-cp", "-px", snap1.toUri().toString(), snap1Copy.toUri().toString() };
    int ret = ToolRunner.run(new FsShell(conf), argv);
    assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);
    assertNull("Got unexpected erasure coding policy", fs.getErasureCodingPolicy(snap1CopyECDir));
    assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy, fs.getErasureCodingPolicy(snap1));
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) Test(org.junit.Test)

Example 10 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestUnderReplicatedBlocks method testSetrepIncWithUnderReplicatedBlocks.

// 1 min timeout
@Test(timeout = 60000)
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final short REPLICATION_FACTOR = 2;
    final String FILE_NAME = "/testFile";
    final Path FILE_PATH = new Path(FILE_NAME);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build();
    try {
        // create a file with one block with a replication factor of 2
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
        DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
        // remove one replica from the blocksMap so block becomes under-replicated
        // but the block does not get put into the under-replicated blocks queue
        final BlockManager bm = cluster.getNamesystem().getBlockManager();
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
        DatanodeDescriptor dn = bm.blocksMap.getStorages(b.getLocalBlock()).iterator().next().getDatanodeDescriptor();
        bm.addToInvalidates(b.getLocalBlock(), dn);
        // Compute the invalidate work in NN, and trigger the heartbeat from DN
        BlockManagerTestUtil.computeAllPendingWork(bm);
        DataNodeTestUtils.triggerHeartbeat(cluster.getDataNode(dn.getIpcPort()));
        // Wait to make sure the DataNode receives the deletion request 
        Thread.sleep(5000);
        // Remove the record from blocksMap
        bm.blocksMap.removeNode(b.getLocalBlock(), dn);
        // increment this file's replication factor
        FsShell shell = new FsShell(conf);
        assertEquals(0, shell.run(new String[] { "-setrep", "-w", Integer.toString(1 + REPLICATION_FACTOR), FILE_NAME }));
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Aggregations

FsShell (org.apache.hadoop.fs.FsShell)37 Path (org.apache.hadoop.fs.Path)27 Test (org.junit.Test)26 Configuration (org.apache.hadoop.conf.Configuration)18 FileSystem (org.apache.hadoop.fs.FileSystem)10 FileStatus (org.apache.hadoop.fs.FileStatus)9 HdfsAdmin (org.apache.hadoop.hdfs.client.HdfsAdmin)6 IOException (java.io.IOException)5 FsPermission (org.apache.hadoop.fs.permission.FsPermission)4 Mockito.anyString (org.mockito.Mockito.anyString)4 ByteArrayOutputStream (java.io.ByteArrayOutputStream)3 PrintStream (java.io.PrintStream)3 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)3 SnapshotDiffReport (org.apache.hadoop.hdfs.protocol.SnapshotDiffReport)3 WebHdfsFileSystem (org.apache.hadoop.hdfs.web.WebHdfsFileSystem)3 File (java.io.File)2 FileNotFoundException (java.io.FileNotFoundException)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2