Search in sources :

Example 1 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class Gridmix method writeInputData.

/**
   * Write random bytes at the path <inputDir> if needed.
   * @see org.apache.hadoop.mapred.gridmix.GenerateData
   * @return exit status
   */
protected int writeInputData(long genbytes, Path inputDir) throws IOException, InterruptedException {
    if (genbytes > 0) {
        final Configuration conf = getConf();
        if (inputDir.getFileSystem(conf).exists(inputDir)) {
            LOG.error("Gridmix input data directory " + inputDir + " already exists when -generate option is used.\n");
            return STARTUP_FAILED_ERROR;
        }
        // configure the compression ratio if needed
        CompressionEmulationUtil.setupDataGeneratorConfig(conf);
        final GenerateData genData = new GenerateData(conf, inputDir, genbytes);
        LOG.info("Generating " + StringUtils.humanReadableInt(genbytes) + " of test data...");
        launchGridmixJob(genData);
        FsShell shell = new FsShell(conf);
        try {
            LOG.info("Changing the permissions for inputPath " + inputDir.toString());
            shell.run(new String[] { "-chmod", "-R", "777", inputDir.toString() });
        } catch (Exception e) {
            LOG.error("Couldnt change the file permissions ", e);
            throw new IOException(e);
        }
        LOG.info("Input data generation successful.");
    }
    return 0;
}
Also used : FsShell(org.apache.hadoop.fs.FsShell) Configuration(org.apache.hadoop.conf.Configuration) IOException(java.io.IOException) IOException(java.io.IOException)

Example 2 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestHadoopArchives method testSingleFile.

@Test
public void testSingleFile() throws Exception {
    final Path sub1 = new Path(inputPath, "dir1");
    fs.mkdirs(sub1);
    String singleFileName = "a";
    createFile(inputPath, fs, sub1.getName(), singleFileName);
    final FsShell shell = new FsShell(conf);
    final List<String> originalPaths = lsr(shell, sub1.toString());
    System.out.println("originalPaths: " + originalPaths);
    // make the archive:
    final String fullHarPathStr = makeArchive(sub1, singleFileName);
    // compare results:
    final List<String> harPaths = lsr(shell, fullHarPathStr);
    Assert.assertEquals(originalPaths, harPaths);
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) Test(org.junit.Test)

Example 3 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestHadoopArchives method testRelativePathWitRepl.

@Test
public void testRelativePathWitRepl() throws Exception {
    final Path sub1 = new Path(inputPath, "dir1");
    fs.mkdirs(sub1);
    createFile(inputPath, fs, sub1.getName(), "a");
    final FsShell shell = new FsShell(conf);
    final List<String> originalPaths = lsr(shell, "input");
    System.out.println("originalPaths: " + originalPaths);
    // make the archive:
    final String fullHarPathStr = makeArchiveWithRepl();
    // compare results:
    final List<String> harPaths = lsr(shell, fullHarPathStr);
    Assert.assertEquals(originalPaths, harPaths);
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) Test(org.junit.Test)

Example 4 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestDistCpSyncReverseBase method testSync.

/**
   * Test the basic functionality.
   */
@Test
public void testSync() throws Exception {
    if (isSrcNotSameAsTgt) {
        initData(source);
    }
    initData(target);
    enableAndCreateFirstSnapshot();
    final FsShell shell = new FsShell(conf);
    lsrSource("Before source: ", shell, source);
    lsr("Before target: ", shell, target);
    // make changes under target
    int numDeletedModified = changeData(target);
    createSecondSnapshotAtTarget();
    SnapshotDiffReport report = dfs.getSnapshotDiffReport(target, "s2", "s1");
    System.out.println(report);
    DistCpSync distCpSync = new DistCpSync(options, conf);
    lsr("Before sync target: ", shell, target);
    // do the sync
    Assert.assertTrue(distCpSync.sync());
    lsr("After sync target: ", shell, target);
    // make sure the source path has been updated to the snapshot path
    final Path spath = new Path(source, HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + "s1");
    Assert.assertEquals(spath, options.getSourcePaths().get(0));
    // build copy listing
    final Path listingPath = new Path("/tmp/META/fileList.seq");
    CopyListing listing = new SimpleCopyListing(conf, new Credentials(), distCpSync);
    listing.buildListing(listingPath, options);
    Map<Text, CopyListingFileStatus> copyListing = getListing(listingPath);
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(conf, null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context<Text, CopyListingFileStatus, Text, Text> context = stubContext.getContext();
    // Enable append
    context.getConfiguration().setBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(), true);
    copyMapper.setup(context);
    for (Map.Entry<Text, CopyListingFileStatus> entry : copyListing.entrySet()) {
        copyMapper.map(entry.getKey(), entry.getValue(), context);
    }
    lsrSource("After mapper source: ", shell, source);
    lsr("After mapper target: ", shell, target);
    // verify that we only list modified and created files/directories
    Assert.assertEquals(numDeletedModified, copyListing.size());
    // verify that we only copied new appended data of f2 and the new file f1
    Assert.assertEquals(blockSize * 3, stubContext.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED).getValue());
    // verify the source and target now has the same structure
    verifyCopy(dfs.getFileStatus(spath), dfs.getFileStatus(target), false);
}
Also used : Path(org.apache.hadoop.fs.Path) Text(org.apache.hadoop.io.Text) FsShell(org.apache.hadoop.fs.FsShell) CopyMapper(org.apache.hadoop.tools.mapred.CopyMapper) Mapper(org.apache.hadoop.mapreduce.Mapper) SnapshotDiffReport(org.apache.hadoop.hdfs.protocol.SnapshotDiffReport) CopyMapper(org.apache.hadoop.tools.mapred.CopyMapper) HashMap(java.util.HashMap) Map(java.util.Map) Credentials(org.apache.hadoop.security.Credentials) Test(org.junit.Test)

Example 5 with FsShell

use of org.apache.hadoop.fs.FsShell in project hadoop by apache.

the class TestEncryptionZones method testRootDirEZTrash.

@Test
public void testRootDirEZTrash() throws Exception {
    final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
    final String currentUser = UserGroupInformation.getCurrentUser().getShortUserName();
    final Path rootDir = new Path("/");
    dfsAdmin.createEncryptionZone(rootDir, TEST_KEY, NO_TRASH);
    final Path encFile = new Path("/encFile");
    final int len = 8192;
    DFSTestUtil.createFile(fs, encFile, len, (short) 1, 0xFEED);
    Configuration clientConf = new Configuration(conf);
    clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
    FsShell shell = new FsShell(clientConf);
    verifyShellDeleteWithTrash(shell, encFile);
    // Trash path should be consistent
    // if root path is an encryption zone
    Path encFileCurrentTrash = shell.getCurrentTrashDir(encFile);
    Path rootDirCurrentTrash = shell.getCurrentTrashDir(rootDir);
    assertEquals("Root trash should be equal with ezFile trash", encFileCurrentTrash, rootDirCurrentTrash);
    // Use webHDFS client to test trash root path
    final WebHdfsFileSystem webFS = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
    final Path expectedTrash = new Path(rootDir, new Path(FileSystem.TRASH_PREFIX, currentUser));
    Path webHDFSTrash = webFS.getTrashRoot(encFile);
    assertEquals(expectedTrash.toUri().getPath(), webHDFSTrash.toUri().getPath());
    assertEquals(encFileCurrentTrash.getParent().toUri().getPath(), webHDFSTrash.toUri().getPath());
}
Also used : Path(org.apache.hadoop.fs.Path) FsShell(org.apache.hadoop.fs.FsShell) Configuration(org.apache.hadoop.conf.Configuration) HdfsAdmin(org.apache.hadoop.hdfs.client.HdfsAdmin) Mockito.anyString(org.mockito.Mockito.anyString) WebHdfsFileSystem(org.apache.hadoop.hdfs.web.WebHdfsFileSystem) Test(org.junit.Test)

Aggregations

FsShell (org.apache.hadoop.fs.FsShell)44 Path (org.apache.hadoop.fs.Path)33 Test (org.junit.Test)30 Configuration (org.apache.hadoop.conf.Configuration)23 FileSystem (org.apache.hadoop.fs.FileSystem)17 FileStatus (org.apache.hadoop.fs.FileStatus)12 IOException (java.io.IOException)8 FsPermission (org.apache.hadoop.fs.permission.FsPermission)6 HdfsAdmin (org.apache.hadoop.hdfs.client.HdfsAdmin)6 PrintStream (java.io.PrintStream)4 Mockito.anyString (org.mockito.Mockito.anyString)4 ByteArrayOutputStream (java.io.ByteArrayOutputStream)3 MetastoreUnitTest (org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest)3 Text (org.apache.hadoop.io.Text)3 File (java.io.File)2 FileNotFoundException (java.io.FileNotFoundException)2 URISyntaxException (java.net.URISyntaxException)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 ExecutionException (java.util.concurrent.ExecutionException)2