use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class JobHistoryCopyService method getPreviousJobHistoryFileStream.
public static FSDataInputStream getPreviousJobHistoryFileStream(Configuration conf, ApplicationAttemptId applicationAttemptId) throws IOException {
FSDataInputStream in = null;
Path historyFile = null;
String jobId = TypeConverter.fromYarn(applicationAttemptId.getApplicationId()).toString();
String jobhistoryDir = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
Path histDirPath = FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir));
FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
// read the previous history file
historyFile = fc.makeQualified(JobHistoryUtils.getStagingJobHistoryFile(histDirPath, jobId, (applicationAttemptId.getAttemptId() - 1)));
LOG.info("History file is at " + historyFile);
in = fc.open(historyFile);
return in;
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestPermissionSymlinks method doRenameSrcNotWritableFC.
private void doRenameSrcNotWritableFC() throws Exception {
// Rename the link when the target and parent are not writable
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException {
FileContext myfc = FileContext.getFileContext(conf);
Path newlink = new Path(targetParent, "newlink");
myfc.rename(link, newlink, Rename.NONE);
return null;
}
});
fail("Renamed link even though link's parent is not writable!");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestLocalContainerLauncher method testRenameMapOutputForReduce.
@Test
public void testRenameMapOutputForReduce() throws Exception {
final JobConf conf = new JobConf();
final MROutputFiles mrOutputFiles = new MROutputFiles();
mrOutputFiles.setConf(conf);
// make sure both dirs are distinct
//
conf.set(MRConfig.LOCAL_DIR, localDirs[0].toString());
final Path mapOut = mrOutputFiles.getOutputFileForWrite(1);
conf.set(MRConfig.LOCAL_DIR, localDirs[1].toString());
final Path mapOutIdx = mrOutputFiles.getOutputIndexFileForWrite(1);
Assert.assertNotEquals("Paths must be different!", mapOut.getParent(), mapOutIdx.getParent());
// make both dirs part of LOCAL_DIR
conf.setStrings(MRConfig.LOCAL_DIR, localDirs);
final FileContext lfc = FileContext.getLocalFSFileContext(conf);
lfc.create(mapOut, EnumSet.of(CREATE)).close();
lfc.create(mapOutIdx, EnumSet.of(CREATE)).close();
final JobId jobId = MRBuilderUtils.newJobId(12345L, 1, 2);
final TaskId tid = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
final TaskAttemptId taid = MRBuilderUtils.newTaskAttemptId(tid, 0);
LocalContainerLauncher.renameMapOutputForReduce(conf, taid, mrOutputFiles);
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class ViewFsBaseTest method testOwnerForInternalDir.
@Test
public void testOwnerForInternalDir() throws IOException, InterruptedException, URISyntaxException {
final UserGroupInformation userUgi = UserGroupInformation.createUserForTesting("user@HADOOP.COM", new String[] { "hadoop" });
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws IOException, URISyntaxException {
UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
String doAsUserName = ugi.getUserName();
assertEquals(doAsUserName, "user@HADOOP.COM");
FileContext viewFS = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
FileStatus stat = viewFS.getFileStatus(new Path("/internalDir"));
assertEquals(userUgi.getShortUserName(), stat.getOwner());
return null;
}
});
}
use of org.apache.hadoop.fs.FileContext in project hadoop by apache.
the class TestCreateEditsLog method testCanLoadCreatedEditsLog.
/**
* Tests that an edits log created using CreateEditsLog is valid and can be
* loaded successfully by a namenode.
*/
@Test(timeout = 60000)
public void testCanLoadCreatedEditsLog() throws Exception {
// Format namenode.
HdfsConfiguration conf = new HdfsConfiguration();
File nameDir = new File(HDFS_DIR, "name");
conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString());
DFSTestUtil.formatNameNode(conf);
// Call CreateEditsLog and move the resulting edits to the name dir.
CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d", TEST_DIR.getAbsolutePath() });
Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*");
FileContext localFc = FileContext.getLocalFSFileContext();
for (FileStatus edits : localFc.util().globStatus(editsWildcard)) {
Path src = edits.getPath();
Path dst = new Path(new File(nameDir, "current").getAbsolutePath(), src.getName());
localFc.rename(src, dst);
}
// Start a namenode to try to load the edits.
cluster = new MiniDFSCluster.Builder(conf).format(false).manageNameDfsDirs(false).waitSafeMode(false).build();
cluster.waitClusterUp();
// Test successful, because no exception thrown.
}
Aggregations