use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class Gridmix method writeInputData.
/**
* Write random bytes at the path <inputDir> if needed.
* @see org.apache.hadoop.mapred.gridmix.GenerateData
* @return exit status
*/
protected int writeInputData(long genbytes, Path inputDir) throws IOException, InterruptedException {
if (genbytes > 0) {
final Configuration conf = getConf();
if (inputDir.getFileSystem(conf).exists(inputDir)) {
LOG.error("Gridmix input data directory " + inputDir + " already exists when -generate option is used.\n");
return STARTUP_FAILED_ERROR;
}
// configure the compression ratio if needed
CompressionEmulationUtil.setupDataGeneratorConfig(conf);
final GenerateData genData = new GenerateData(conf, inputDir, genbytes);
LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) + " of test data...");
launchGridmixJob(genData);
FsShell shell = new FsShell(conf);
try {
LOG.info("Changing the permissions for inputPath " + inputDir.toString());
shell.run(new String[] { "-chmod", "-R", "777", inputDir.toString() });
} catch (Exception e) {
LOG.error("Couldnt change the file permissions ", e);
throw new IOException(e);
}
LOG.info("Input data generation successful.");
}
return 0;
}
use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class TestHadoopArchives method testSingleFile.
@Test
public void testSingleFile() throws Exception {
final Path sub1 = new Path(inputPath, "dir1");
fs.mkdirs(sub1);
String singleFileName = "a";
createFile(inputPath, fs, sub1.getName(), singleFileName);
final FsShell shell = new FsShell(conf);
final List<String> originalPaths = lsr(shell, sub1.toString());
System.out.println("originalPaths: " + originalPaths);
// make the archive:
final String fullHarPathStr = makeArchive(sub1, singleFileName);
// compare results:
final List<String> harPaths = lsr(shell, fullHarPathStr);
Assert.assertEquals(originalPaths, harPaths);
}
use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class TestHadoopArchives method testRelativePathWitRepl.
@Test
public void testRelativePathWitRepl() throws Exception {
final Path sub1 = new Path(inputPath, "dir1");
fs.mkdirs(sub1);
createFile(inputPath, fs, sub1.getName(), "a");
final FsShell shell = new FsShell(conf);
final List<String> originalPaths = lsr(shell, "input");
System.out.println("originalPaths: " + originalPaths);
// make the archive:
final String fullHarPathStr = makeArchiveWithRepl();
// compare results:
final List<String> harPaths = lsr(shell, fullHarPathStr);
Assert.assertEquals(originalPaths, harPaths);
}
use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class TestDistCpSyncReverseBase method testSync.
/**
* Test the basic functionality.
*/
@Test
public void testSync() throws Exception {
if (isSrcNotSameAsTgt) {
initData(source);
}
initData(target);
enableAndCreateFirstSnapshot();
final FsShell shell = new FsShell(conf);
lsrSource("Before source: ", shell, source);
lsr("Before target: ", shell, target);
// make changes under target
int numDeletedModified = changeData(target);
createSecondSnapshotAtTarget();
SnapshotDiffReport report = dfs.getSnapshotDiffReport(target, "s2", "s1");
System.out.println(report);
DistCpSync distCpSync = new DistCpSync(options, conf);
lsr("Before sync target: ", shell, target);
// do the sync
Assert.assertTrue(distCpSync.sync());
lsr("After sync target: ", shell, target);
// make sure the source path has been updated to the snapshot path
final Path spath = new Path(source, HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s1");
Assert.assertEquals(spath, options.getSourcePaths().get(0));
// build copy listing
final Path listingPath = new Path("/tmp/META/fileList.seq");
CopyListing listing = new SimpleCopyListing(conf, new Credentials(), distCpSync);
listing.buildListing(listingPath, options);
Map<Text, CopyListingFileStatus> copyListing = getListing(listingPath);
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(conf, null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context<Text, CopyListingFileStatus, Text, Text> context = stubContext.getContext();
// Enable append
context.getConfiguration().setBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(), true);
copyMapper.setup(context);
for (Map.Entry<Text, CopyListingFileStatus> entry : copyListing.entrySet()) {
copyMapper.map(entry.getKey(), entry.getValue(), context);
}
lsrSource("After mapper source: ", shell, source);
lsr("After mapper target: ", shell, target);
// verify that we only list modified and created files/directories
Assert.assertEquals(numDeletedModified, copyListing.size());
// verify that we only copied new appended data of f2 and the new file f1
Assert.assertEquals(blockSize * 3, stubContext.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED).getValue());
// verify the source and target now has the same structure
verifyCopy(dfs.getFileStatus(spath), dfs.getFileStatus(target), false);
}
use of org.apache.hadoop.fs.FsShell in project hadoop by apache.
the class TestEncryptionZones method testRootDirEZTrash.
@Test
public void testRootDirEZTrash() throws Exception {
final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
final String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
final Path rootDir = new Path("/");
dfsAdmin.createEncryptionZone(rootDir, TEST_KEY, NO_TRASH);
final Path encFile = new Path("/encFile");
final int len = 8192;
DFSTestUtil.createFile(fs, encFile, len, (short) 1, 0xFEED);
Configuration clientConf = new Configuration(conf);
clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
FsShell shell = new FsShell(clientConf);
verifyShellDeleteWithTrash(shell, encFile);
// Trash path should be consistent
// if root path is an encryption zone
Path encFileCurrentTrash = shell.getCurrentTrashDir(encFile);
Path rootDirCurrentTrash = shell.getCurrentTrashDir(rootDir);
assertEquals("Root trash should be equal with ezFile trash", encFileCurrentTrash, rootDirCurrentTrash);
// Use webHDFS client to test trash root path
final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
final Path expectedTrash = new Path(rootDir, new Path(FileSystem.TRASH_PREFIX, currentUser));
Path webHDFSTrash = webFS.getTrashRoot(encFile);
assertEquals(expectedTrash.toUri().getPath(), webHDFSTrash.toUri().getPath());
assertEquals(encFileCurrentTrash.getParent().toUri().getPath(), webHDFSTrash.toUri().getPath());
}
Aggregations