use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestFSImageWithSnapshot method testSaveLoadImage.
/**
* Testing steps:
* <pre>
* 1. Creating/modifying directories/files while snapshots are being taken.
* 2. Dump the FSDirectory tree of the namesystem.
* 3. Save the namesystem to a temp file (FSImage saving).
* 4. Restart the cluster and format the namesystem.
* 5. Load the namesystem from the temp file (FSImage loading).
* 6. Dump the FSDirectory again and compare the two dumped string.
* </pre>
*/
@Test
public void testSaveLoadImage() throws Exception {
int s = 0;
// make changes to the namesystem
hdfs.mkdirs(dir);
SnapshotTestHelper.createSnapshot(hdfs, dir, "s" + ++s);
Path sub1 = new Path(dir, "sub1");
hdfs.mkdirs(sub1);
hdfs.setPermission(sub1, new FsPermission((short) 0777));
Path sub11 = new Path(sub1, "sub11");
hdfs.mkdirs(sub11);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub1file1 = new Path(sub1, "sub1file1");
Path sub1file2 = new Path(sub1, "sub1file2");
DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed);
DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub2 = new Path(dir, "sub2");
Path sub2file1 = new Path(sub2, "sub2file1");
Path sub2file2 = new Path(sub2, "sub2file2");
DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, (short) 1, seed);
DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, (short) 1, seed);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
hdfs.setReplication(sub1file1, (short) 1);
hdfs.delete(sub1file2, true);
hdfs.setOwner(sub2, "dr.who", "unknown");
hdfs.delete(sub2file1, true);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub1_sub2file2 = new Path(sub1, "sub2file2");
hdfs.rename(sub2file2, sub1_sub2file2);
hdfs.rename(sub1file1, sub2file1);
checkImage(s);
hdfs.rename(sub2file1, sub2file2);
checkImage(s);
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestRenameWithSnapshots method testRenameFromSDir2NonSDir.
@Test(timeout = 300000)
public void testRenameFromSDir2NonSDir() throws Exception {
final String dirStr = "/testRenameWithSnapshot";
final String abcStr = dirStr + "/abc";
final Path abc = new Path(abcStr);
hdfs.mkdirs(abc, new FsPermission((short) 0777));
hdfs.allowSnapshot(abc);
final Path foo = new Path(abc, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
hdfs.createSnapshot(abc, "s0");
try {
hdfs.rename(abc, new Path(dirStr, "tmp"));
fail("Expect exception since " + abc + " is snapshottable and already has snapshots");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(abcStr + " is snapshottable and already has snapshots", e);
}
final String xyzStr = dirStr + "/xyz";
final Path xyz = new Path(xyzStr);
hdfs.mkdirs(xyz, new FsPermission((short) 0777));
final Path bar = new Path(xyz, "bar");
hdfs.rename(foo, bar);
final INode fooRef = fsdir.getINode(SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
Assert.assertTrue(fooRef.isReference());
Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
final INodeReference.WithCount withCount = (INodeReference.WithCount) fooRef.asReference().getReferredINode();
Assert.assertEquals(2, withCount.getReferenceCount());
final INode barRef = fsdir.getINode(bar.toString());
Assert.assertTrue(barRef.isReference());
Assert.assertSame(withCount, barRef.asReference().getReferredINode());
hdfs.delete(bar, false);
Assert.assertEquals(1, withCount.getReferenceCount());
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestNestedSnapshots method testSnapshotName.
@Test(timeout = 300000)
public void testSnapshotName() throws Exception {
final String dirStr = "/testSnapshotWithQuota/dir";
final Path dir = new Path(dirStr);
hdfs.mkdirs(dir, new FsPermission((short) 0777));
hdfs.allowSnapshot(dir);
// set namespace quota
final int NS_QUOTA = 6;
hdfs.setQuota(dir, NS_QUOTA, HdfsConstants.QUOTA_DONT_SET);
// create object to use up the quota.
final Path foo = new Path(dir, "foo");
final Path f1 = new Path(foo, "f1");
DFSTestUtil.createFile(hdfs, f1, BLOCKSIZE, REPLICATION, SEED);
{
//create a snapshot with default snapshot name
final Path snapshotPath = hdfs.createSnapshot(dir);
//check snapshot path and the default snapshot name
final String snapshotName = snapshotPath.getName();
Assert.assertTrue("snapshotName=" + snapshotName, Pattern.matches("s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d", snapshotName));
final Path parent = snapshotPath.getParent();
Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
Assert.assertEquals(dir, parent.getParent());
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class TestNestedSnapshots method testSnapshotLimit.
/**
* Test the snapshot limit of a single snapshottable directory.
* @throws Exception
*/
@Test(timeout = 300000)
public void testSnapshotLimit() throws Exception {
final int step = 1000;
final String dirStr = "/testSnapshotLimit/dir";
final Path dir = new Path(dirStr);
hdfs.mkdirs(dir, new FsPermission((short) 0777));
hdfs.allowSnapshot(dir);
int s = 0;
for (; s < SNAPSHOT_LIMIT; s++) {
final String snapshotName = "s" + s;
hdfs.createSnapshot(dir, snapshotName);
//create a file occasionally
if (s % step == 0) {
final Path file = new Path(dirStr, "f" + s);
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
}
}
try {
hdfs.createSnapshot(dir, "s" + s);
Assert.fail("Expected to fail to create snapshot, but didn't.");
} catch (IOException ioe) {
SnapshotTestHelper.LOG.info("The exception is expected.", ioe);
}
for (int f = 0; f < SNAPSHOT_LIMIT; f += step) {
final String file = "f" + f;
s = RANDOM.nextInt(step);
for (; s < SNAPSHOT_LIMIT; s += RANDOM.nextInt(step)) {
final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file);
//the file #f exists in snapshot #s iff s > f.
Assert.assertEquals(s > f, hdfs.exists(p));
}
}
}
use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.
the class JobHistoryEventHandler method serviceInit.
/* (non-Javadoc)
* @see org.apache.hadoop.yarn.service.AbstractService#init(org.
* apache.hadoop.conf.Configuration)
* Initializes the FileSystem and Path objects for the log and done directories.
* Creates these directories if they do not already exist.
*/
@Override
protected void serviceInit(Configuration conf) throws Exception {
String jobId = TypeConverter.fromYarn(context.getApplicationID()).toString();
String stagingDirStr = null;
String doneDirStr = null;
String userDoneDirStr = null;
try {
stagingDirStr = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
doneDirStr = JobHistoryUtils.getConfiguredHistoryIntermediateDoneDirPrefix(conf);
userDoneDirStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
} catch (IOException e) {
LOG.error("Failed while getting the configured log directories", e);
throw new YarnRuntimeException(e);
}
//Check for the existence of the history staging dir. Maybe create it.
try {
stagingDirPath = FileContext.getFileContext(conf).makeQualified(new Path(stagingDirStr));
stagingDirFS = FileSystem.get(stagingDirPath.toUri(), conf);
mkdir(stagingDirFS, stagingDirPath, new FsPermission(JobHistoryUtils.HISTORY_STAGING_DIR_PERMISSIONS));
} catch (IOException e) {
LOG.error("Failed while checking for/creating history staging path: [" + stagingDirPath + "]", e);
throw new YarnRuntimeException(e);
}
//Check for the existence of intermediate done dir.
Path doneDirPath = null;
try {
doneDirPath = FileContext.getFileContext(conf).makeQualified(new Path(doneDirStr));
doneDirFS = FileSystem.get(doneDirPath.toUri(), conf);
// created by the JobHistoryServer or as part of deployment.
if (!doneDirFS.exists(doneDirPath)) {
if (JobHistoryUtils.shouldCreateNonUserDirectory(conf)) {
LOG.info("Creating intermediate history logDir: [" + doneDirPath + "] + based on conf. Should ideally be created by the JobHistoryServer: " + MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR);
mkdir(doneDirFS, doneDirPath, new FsPermission(JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS.toShort()));
// TODO Temporary toShort till new FsPermission(FsPermissions)
// respects
// sticky
} else {
String message = "Not creating intermediate history logDir: [" + doneDirPath + "] based on conf: " + MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR + ". Either set to true or pre-create this directory with" + " appropriate permissions";
LOG.error(message);
throw new YarnRuntimeException(message);
}
}
} catch (IOException e) {
LOG.error("Failed checking for the existance of history intermediate " + "done directory: [" + doneDirPath + "]");
throw new YarnRuntimeException(e);
}
//Check/create user directory under intermediate done dir.
try {
doneDirPrefixPath = FileContext.getFileContext(conf).makeQualified(new Path(userDoneDirStr));
mkdir(doneDirFS, doneDirPrefixPath, new FsPermission(JobHistoryUtils.HISTORY_INTERMEDIATE_USER_DIR_PERMISSIONS));
} catch (IOException e) {
LOG.error("Error creating user intermediate history done directory: [ " + doneDirPrefixPath + "]", e);
throw new YarnRuntimeException(e);
}
// Maximum number of unflushed completion-events that can stay in the queue
// before flush kicks in.
maxUnflushedCompletionEvents = conf.getInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, MRJobConfig.DEFAULT_MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS);
// We want to cut down flushes after job completes so as to write quicker,
// so we increase maxUnflushedEvents post Job completion by using the
// following multiplier.
postJobCompletionMultiplier = conf.getInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, MRJobConfig.DEFAULT_MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER);
// Max time until which flush doesn't take place.
flushTimeout = conf.getLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS, MRJobConfig.DEFAULT_MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS);
minQueueSizeForBatchingFlushes = conf.getInt(MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, MRJobConfig.DEFAULT_MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD);
// configuration status: off, on_with_v1 or on_with_v2.
if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
LOG.info("Emitting job history data to the timeline service is enabled");
if (YarnConfiguration.timelineServiceEnabled(conf)) {
boolean timelineServiceV2Enabled = ((int) YarnConfiguration.getTimelineServiceVersion(conf) == 2);
if (timelineServiceV2Enabled) {
timelineV2Client = ((MRAppMaster.RunningAppContext) context).getTimelineV2Client();
timelineV2Client.init(conf);
} else {
timelineClient = ((MRAppMaster.RunningAppContext) context).getTimelineClient();
timelineClient.init(conf);
}
LOG.info("Timeline service is enabled; version: " + YarnConfiguration.getTimelineServiceVersion(conf));
} else {
LOG.info("Timeline service is not enabled");
}
} else {
LOG.info("Emitting job history data to the timeline server is not " + "enabled");
}
// Flag for setting
String jhistFormat = conf.get(JHAdminConfig.MR_HS_JHIST_FORMAT, JHAdminConfig.DEFAULT_MR_HS_JHIST_FORMAT);
if (jhistFormat.equals("json")) {
jhistMode = EventWriter.WriteMode.JSON;
} else if (jhistFormat.equals("binary")) {
jhistMode = EventWriter.WriteMode.BINARY;
} else {
LOG.warn("Unrecognized value '" + jhistFormat + "' for property " + JHAdminConfig.MR_HS_JHIST_FORMAT + ". Valid values are " + "'json' or 'binary'. Falling back to default value '" + JHAdminConfig.DEFAULT_MR_HS_JHIST_FORMAT + "'.");
}
super.serviceInit(conf);
}
Aggregations