Search in sources :

Example 21 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class JobHistoryCopyService method getPreviousJobHistoryFileStream.

public static FSDataInputStream getPreviousJobHistoryFileStream(Configuration conf, ApplicationAttemptId applicationAttemptId) throws IOException {
    FSDataInputStream in = null;
    Path historyFile = null;
    String jobId = TypeConverter.fromYarn(applicationAttemptId.getApplicationId()).toString();
    String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
    Path histDirPath = FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir));
    FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
    // read the previous history file
    historyFile = fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(histDirPath, jobId, (applicationAttemptId.getAttemptId() - 1)));
    LOG.info("History file is at " + historyFile);
    in = fc.open(historyFile);
    return in;
}
Also used : Path(org.apache.hadoop.fs.Path) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) FileContext(org.apache.hadoop.fs.FileContext)

Example 22 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestPermissionSymlinks method doRenameSrcNotWritableFC.

private void doRenameSrcNotWritableFC() throws Exception {
    // Rename the link when the target and parent are not writable
    try {
        user.doAs(new PrivilegedExceptionAction<Object>() {

            @Override
            public Object run() throws IOException {
                FileContext myfc = FileContext.getFileContext(conf);
                Path newlink = new Path(targetParent, "newlink");
                myfc.rename(link, newlink, Rename.NONE);
                return null;
            }
        });
        fail("Renamed link even though link's parent is not writable!");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains("Permission denied", e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) IOException(java.io.IOException) FileContext(org.apache.hadoop.fs.FileContext)

Example 23 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestLocalContainerLauncher method testRenameMapOutputForReduce.

@Test
public void testRenameMapOutputForReduce() throws Exception {
    final JobConf conf = new JobConf();
    final MROutputFiles mrOutputFiles = new MROutputFiles();
    mrOutputFiles.setConf(conf);
    // make sure both dirs are distinct
    //
    conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString());
    final Path mapOut = mrOutputFiles.getOutputFileForWrite(1);
    conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString());
    final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1);
    Assert.assertNotEquals("Paths must be different!", mapOut.getParent(), mapOutIdx.getParent());
    // make both dirs part of LOCAL_DIR
    conf.setStrings(MRConfig.LOCAL_DIR, localDirs);
    final FileContext lfc = FileContext.getLocalFSFileContext(conf);
    lfc.create(mapOut, EnumSet.of(CREATE)).close();
    lfc.create(mapOutIdx, EnumSet.of(CREATE)).close();
    final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2);
    final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
    final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0);
    LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles);
}
Also used : Path(org.apache.hadoop.fs.Path) TaskId(org.apache.hadoop.mapreduce.v2.api.records.TaskId) TaskAttemptId(org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId) FileContext(org.apache.hadoop.fs.FileContext) JobId(org.apache.hadoop.mapreduce.v2.api.records.JobId) Test(org.junit.Test)

Example 24 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class ViewFsBaseTest method testOwnerForInternalDir.

@Test
public void testOwnerForInternalDir() throws IOException, InterruptedException, URISyntaxException {
    final UserGroupInformation userUgi = UserGroupInformation.createUserForTesting("user@HADOOP.COM", new String[] { "hadoop" });
    userUgi.doAs(new PrivilegedExceptionAction<Object>() {

        @Override
        public Object run() throws IOException, URISyntaxException {
            UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
            String doAsUserName = ugi.getUserName();
            assertEquals(doAsUserName, "user@HADOOP.COM");
            FileContext viewFS = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
            FileStatus stat = viewFS.getFileStatus(new Path("/internalDir"));
            assertEquals(userUgi.getShortUserName(), stat.getOwner());
            return null;
        }
    });
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) FileContextTestHelper.checkFileStatus(org.apache.hadoop.fs.FileContextTestHelper.checkFileStatus) LocatedFileStatus(org.apache.hadoop.fs.LocatedFileStatus) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) Matchers.anyString(org.mockito.Matchers.anyString) FileContext(org.apache.hadoop.fs.FileContext) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) Test(org.junit.Test)

Example 25 with FileContext

use of org.apache.hadoop.fs.FileContext in project hadoop by apache.

the class TestCreateEditsLog method testCanLoadCreatedEditsLog.

/**
   * Tests that an edits log created using CreateEditsLog is valid and can be
   * loaded successfully by a namenode.
   */
@Test(timeout = 60000)
public void testCanLoadCreatedEditsLog() throws Exception {
    // Format namenode.
    HdfsConfiguration conf = new HdfsConfiguration();
    File nameDir = new File(HDFS_DIR, "name");
    conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString());
    DFSTestUtil.formatNameNode(conf);
    // Call CreateEditsLog and move the resulting edits to the name dir.
    CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d", TEST_DIR.getAbsolutePath() });
    Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*");
    FileContext localFc = FileContext.getLocalFSFileContext();
    for (FileStatus edits : localFc.util().globStatus(editsWildcard)) {
        Path src = edits.getPath();
        Path dst = new Path(new File(nameDir, "current").getAbsolutePath(), src.getName());
        localFc.rename(src, dst);
    }
    // Start a namenode to try to load the edits.
    cluster = new MiniDFSCluster.Builder(conf).format(false).manageNameDfsDirs(false).waitSafeMode(false).build();
    cluster.waitClusterUp();
// Test successful, because no exception thrown.
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) File(java.io.File) FileContext(org.apache.hadoop.fs.FileContext) Test(org.junit.Test)

Aggregations

FileContext (org.apache.hadoop.fs.FileContext)84 Path (org.apache.hadoop.fs.Path)71 Test (org.junit.Test)34 Configuration (org.apache.hadoop.conf.Configuration)33 IOException (java.io.IOException)29 File (java.io.File)16 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)14 FileStatus (org.apache.hadoop.fs.FileStatus)13 HashMap (java.util.HashMap)12 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 ArrayList (java.util.ArrayList)9 FileSystem (org.apache.hadoop.fs.FileSystem)8 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)8 ExecutorService (java.util.concurrent.ExecutorService)7 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)7 URISyntaxException (java.net.URISyntaxException)6 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)6 ExecutionException (java.util.concurrent.ExecutionException)6 Future (java.util.concurrent.Future)6 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)6