use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class TestAclCommands method testLsNoRpcForGetAclStatus.
@Test
public void testLsNoRpcForGetAclStatus() throws Exception {
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "stubfs:///");
conf.setClass("fs.stubfs.impl", StubFileSystem.class, FileSystem.class);
conf.setBoolean("stubfs.noRpcForGetAclStatus", true);
assertEquals("ls must succeed even if getAclStatus RPC does not exist.", 0, ToolRunner.run(conf, new FsShell(), new String[] { "-ls", "/" }));
}
use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class TestEncryptionZones method testRootDirEZTrash.
@Test
public void testRootDirEZTrash() throws Exception {
final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
final String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
final Path rootDir = new Path("/");
dfsAdmin.createEncryptionZone(rootDir, TEST_KEY, NO_TRASH);
final Path encFile = new Path("/encFile");
final int len = 8192;
DFSTestUtil.createFile(fs, encFile, len, (short) 1, 0xFEED);
Configuration clientConf = new Configuration(conf);
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
FsShell shell = new FsShell(clientConf);
verifyShellDeleteWithTrash(shell, encFile);
// Trash path should be consistent
// if root path is an encryption zone
Path encFileCurrentTrash = shell.getCurrentTrashDir(encFile);
Path rootDirCurrentTrash = shell.getCurrentTrashDir(rootDir);
assertEquals("Root trash should be equal with ezFile trash", encFileCurrentTrash, rootDirCurrentTrash);
// Use webHDFS client to test trash root path
final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final Path expectedTrash = new Path(rootDir, new Path(FileSystem.TRASH_PREFIX, currentUser));
Path webHDFSTrash = webFS.getTrashRoot(encFile);
assertEquals(expectedTrash.toUri().getPath(), webHDFSTrash.toUri().getPath());
assertEquals(encFileCurrentTrash.getParent().toUri().getPath(), webHDFSTrash.toUri().getPath());
}
use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class TestEncryptionZones method testProvisionTrash.
/**
* Make sure hdfs crypto -provisionTrash command creates a trash directory
* with sticky bits.
* @throws Exception
*/
@Test
public void testProvisionTrash() throws Exception {
// create an EZ /zones/zone1
final Path zoneParent = new Path("/zones");
final Path zone1 = new Path(zoneParent, "zone1");
CryptoAdmin cryptoAdmin = new CryptoAdmin(conf);
fsWrapper.mkdir(zone1, FsPermission.getDirDefault(), true);
String[] cryptoArgv = new String[] { "-createZone", "-keyName", TEST_KEY, "-path", zone1.toUri().getPath() };
cryptoAdmin.run(cryptoArgv);
// remove the trash directory
Configuration clientConf = new Configuration(conf);
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
final FsShell shell = new FsShell(clientConf);
final Path trashDir = new Path(zone1, FileSystem.TRASH_PREFIX);
String[] argv = new String[] { "-rmdir", trashDir.toUri().getPath() };
int res = ToolRunner.run(shell, argv);
assertEquals("Unable to delete trash directory.", 0, res);
assertFalse(fsWrapper.exists(trashDir));
// execute -provisionTrash command option and make sure the trash
// directory has sticky bit.
String[] provisionTrashArgv = new String[] { "-provisionTrash", "-path", zone1.toUri().getPath() };
cryptoAdmin.run(provisionTrashArgv);
assertTrue(fsWrapper.exists(trashDir));
FileStatus trashFileStatus = fsWrapper.getFileStatus(trashDir);
assertTrue(trashFileStatus.getPermission().getStickyBit());
}
use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class TestErasureCodingPolicyWithSnapshot method testCopySnapshotWillNotPreserveErasureCodingPolicy.
/**
* Test copy a snapshot will not preserve its erasure coding policy info.
*/
@Test(timeout = 120000)
public void testCopySnapshotWillNotPreserveErasureCodingPolicy() throws Exception {
final int len = 1024;
final Path ecDir = new Path("/ecdir");
final Path ecFile = new Path(ecDir, "ecFile");
fs.mkdirs(ecDir);
fs.allowSnapshot(ecDir);
// set erasure coding policy
fs.setErasureCodingPolicy(ecDir, sysDefaultPolicy.getName());
DFSTestUtil.createFile(fs, ecFile, len, (short) 1, 0xFEED);
final Path snap1 = fs.createSnapshot(ecDir, "snap1");
Path snap1Copy = new Path(ecDir.toString() + "-copy");
final Path snap1CopyECDir = new Path("/ecdir-copy");
String[] argv = new String[] { "-cp", "-px", snap1.toUri().toString(), snap1Copy.toUri().toString() };
int ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);
assertNull("Got unexpected erasure coding policy", fs.getErasureCodingPolicy(snap1CopyECDir));
assertEquals("Got unexpected erasure coding policy", sysDefaultPolicy, fs.getErasureCodingPolicy(snap1));
}
use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class TestUnderReplicatedBlocks method testSetrepIncWithUnderReplicatedBlocks.
// 1 min timeout
@Test(timeout = 60000)
public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
Configuration conf = new HdfsConfiguration();
final short REPLICATION_FACTOR = 2;
final String FILE_NAME = "/testFile";
final Path FILE_PATH = new Path(FILE_NAME);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR + 1).build();
try {
// create a file with one block with a replication factor of 2
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
// remove one replica from the blocksMap so block becomes under-replicated
// but the block does not get put into the under-replicated blocks queue
final BlockManager bm = cluster.getNamesystem().getBlockManager();
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
DatanodeDescriptor dn = bm.blocksMap.getStorages(b.getLocalBlock()).iterator().next().getDatanodeDescriptor();
bm.addToInvalidates(b.getLocalBlock(), dn);
// Compute the invalidate work in NN, and trigger the heartbeat from DN
BlockManagerTestUtil.computeAllPendingWork(bm);
DataNodeTestUtils.triggerHeartbeat(cluster.getDataNode(dn.getIpcPort()));
// Wait to make sure the DataNode receives the deletion request
Thread.sleep(5000);
// Remove the record from blocksMap
bm.blocksMap.removeNode(b.getLocalBlock(), dn);
// increment this file's replication factor
FsShell shell = new FsShell(conf);
assertEquals(0, shell.run(new String[] { "-setrep", "-w", Integer.toString(1 + REPLICATION_FACTOR), FILE_NAME }));
} finally {
cluster.shutdown();
}
}
Aggregations