Search in sources :

Example 71 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestFSImageWithSnapshot method testSaveLoadImage.

/**
   * Testing steps:
   * <pre>
   * 1. Creating/modifying directories/files while snapshots are being taken.
   * 2. Dump the FSDirectory tree of the namesystem.
   * 3. Save the namesystem to a temp file (FSImage saving).
   * 4. Restart the cluster and format the namesystem.
   * 5. Load the namesystem from the temp file (FSImage loading).
   * 6. Dump the FSDirectory again and compare the two dumped string.
   * </pre>
   */
@Test
public void testSaveLoadImage() throws Exception {
    int s = 0;
    // make changes to the namesystem
    hdfs.mkdirs(dir);
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s" + ++s);
    Path sub1 = new Path(dir, "sub1");
    hdfs.mkdirs(sub1);
    hdfs.setPermission(sub1, new FsPermission((short) 0777));
    Path sub11 = new Path(sub1, "sub11");
    hdfs.mkdirs(sub11);
    checkImage(s);
    hdfs.createSnapshot(dir, "s" + ++s);
    Path sub1file1 = new Path(sub1, "sub1file1");
    Path sub1file2 = new Path(sub1, "sub1file2");
    DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, (short) 1, seed);
    DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, (short) 1, seed);
    checkImage(s);
    hdfs.createSnapshot(dir, "s" + ++s);
    Path sub2 = new Path(dir, "sub2");
    Path sub2file1 = new Path(sub2, "sub2file1");
    Path sub2file2 = new Path(sub2, "sub2file2");
    DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, (short) 1, seed);
    DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, (short) 1, seed);
    checkImage(s);
    hdfs.createSnapshot(dir, "s" + ++s);
    hdfs.setReplication(sub1file1, (short) 1);
    hdfs.delete(sub1file2, true);
    hdfs.setOwner(sub2, "dr.who", "unknown");
    hdfs.delete(sub2file1, true);
    checkImage(s);
    hdfs.createSnapshot(dir, "s" + ++s);
    Path sub1_sub2file2 = new Path(sub1, "sub2file2");
    hdfs.rename(sub2file2, sub1_sub2file2);
    hdfs.rename(sub1file1, sub2file1);
    checkImage(s);
    hdfs.rename(sub2file1, sub2file2);
    checkImage(s);
}
Also used : Path(org.apache.hadoop.fs.Path) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 72 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestRenameWithSnapshots method testRenameFromSDir2NonSDir.

@Test(timeout = 300000)
public void testRenameFromSDir2NonSDir() throws Exception {
    final String dirStr = "/testRenameWithSnapshot";
    final String abcStr = dirStr + "/abc";
    final Path abc = new Path(abcStr);
    hdfs.mkdirs(abc, new FsPermission((short) 0777));
    hdfs.allowSnapshot(abc);
    final Path foo = new Path(abc, "foo");
    DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
    hdfs.createSnapshot(abc, "s0");
    try {
        hdfs.rename(abc, new Path(dirStr, "tmp"));
        fail("Expect exception since " + abc + " is snapshottable and already has snapshots");
    } catch (IOException e) {
        GenericTestUtils.assertExceptionContains(abcStr + " is snapshottable and already has snapshots", e);
    }
    final String xyzStr = dirStr + "/xyz";
    final Path xyz = new Path(xyzStr);
    hdfs.mkdirs(xyz, new FsPermission((short) 0777));
    final Path bar = new Path(xyz, "bar");
    hdfs.rename(foo, bar);
    final INode fooRef = fsdir.getINode(SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
    Assert.assertTrue(fooRef.isReference());
    Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
    final INodeReference.WithCount withCount = (INodeReference.WithCount) fooRef.asReference().getReferredINode();
    Assert.assertEquals(2, withCount.getReferenceCount());
    final INode barRef = fsdir.getINode(bar.toString());
    Assert.assertTrue(barRef.isReference());
    Assert.assertSame(withCount, barRef.asReference().getReferredINode());
    hdfs.delete(bar, false);
    Assert.assertEquals(1, withCount.getReferenceCount());
}
Also used : Path(org.apache.hadoop.fs.Path) INodesInPath(org.apache.hadoop.hdfs.server.namenode.INodesInPath) INode(org.apache.hadoop.hdfs.server.namenode.INode) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) WithCount(org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) INodeReference(org.apache.hadoop.hdfs.server.namenode.INodeReference) Test(org.junit.Test)

Example 73 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestNestedSnapshots method testSnapshotName.

@Test(timeout = 300000)
public void testSnapshotName() throws Exception {
    final String dirStr = "/testSnapshotWithQuota/dir";
    final Path dir = new Path(dirStr);
    hdfs.mkdirs(dir, new FsPermission((short) 0777));
    hdfs.allowSnapshot(dir);
    // set namespace quota
    final int NS_QUOTA = 6;
    hdfs.setQuota(dir, NS_QUOTA, HdfsConstants.QUOTA_DONT_SET);
    // create object to use up the quota.
    final Path foo = new Path(dir, "foo");
    final Path f1 = new Path(foo, "f1");
    DFSTestUtil.createFile(hdfs, f1, BLOCKSIZE, REPLICATION, SEED);
    {
        //create a snapshot with default snapshot name
        final Path snapshotPath = hdfs.createSnapshot(dir);
        //check snapshot path and the default snapshot name
        final String snapshotName = snapshotPath.getName();
        Assert.assertTrue("snapshotName=" + snapshotName, Pattern.matches("s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d", snapshotName));
        final Path parent = snapshotPath.getParent();
        Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
        Assert.assertEquals(dir, parent.getParent());
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FsPermission(org.apache.hadoop.fs.permission.FsPermission) Test(org.junit.Test)

Example 74 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class TestNestedSnapshots method testSnapshotLimit.

/**
   * Test the snapshot limit of a single snapshottable directory.
   * @throws Exception
   */
@Test(timeout = 300000)
public void testSnapshotLimit() throws Exception {
    final int step = 1000;
    final String dirStr = "/testSnapshotLimit/dir";
    final Path dir = new Path(dirStr);
    hdfs.mkdirs(dir, new FsPermission((short) 0777));
    hdfs.allowSnapshot(dir);
    int s = 0;
    for (; s < SNAPSHOT_LIMIT; s++) {
        final String snapshotName = "s" + s;
        hdfs.createSnapshot(dir, snapshotName);
        //create a file occasionally 
        if (s % step == 0) {
            final Path file = new Path(dirStr, "f" + s);
            DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
        }
    }
    try {
        hdfs.createSnapshot(dir, "s" + s);
        Assert.fail("Expected to fail to create snapshot, but didn't.");
    } catch (IOException ioe) {
        SnapshotTestHelper.LOG.info("The exception is expected.", ioe);
    }
    for (int f = 0; f < SNAPSHOT_LIMIT; f += step) {
        final String file = "f" + f;
        s = RANDOM.nextInt(step);
        for (; s < SNAPSHOT_LIMIT; s += RANDOM.nextInt(step)) {
            final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file);
            //the file #f exists in snapshot #s iff s > f.
            Assert.assertEquals(s > f, hdfs.exists(p));
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FsPermission(org.apache.hadoop.fs.permission.FsPermission) IOException(java.io.IOException) Test(org.junit.Test)

Example 75 with FsPermission

use of org.apache.hadoop.fs.permission.FsPermission in project hadoop by apache.

the class JobHistoryEventHandler method serviceInit.

/* (non-Javadoc)
   * @see org.apache.hadoop.yarn.service.AbstractService#init(org.
   * apache.hadoop.conf.Configuration)
   * Initializes the FileSystem and Path objects for the log and done directories.
   * Creates these directories if they do not already exist.
   */
@Override
protected void serviceInit(Configuration conf) throws Exception {
    String jobId = TypeConverter.fromYarn(context.getApplicationID()).toString();
    String stagingDirStr = null;
    String doneDirStr = null;
    String userDoneDirStr = null;
    try {
        stagingDirStr = JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
        doneDirStr = JobHistoryUtils.getConfiguredHistoryIntermediateDoneDirPrefix(conf);
        userDoneDirStr = JobHistoryUtils.getHistoryIntermediateDoneDirForUser(conf);
    } catch (IOException e) {
        LOG.error("Failed while getting the configured log directories", e);
        throw new YarnRuntimeException(e);
    }
    //Check for the existence of the history staging dir. Maybe create it. 
    try {
        stagingDirPath = FileContext.getFileContext(conf).makeQualified(new Path(stagingDirStr));
        stagingDirFS = FileSystem.get(stagingDirPath.toUri(), conf);
        mkdir(stagingDirFS, stagingDirPath, new FsPermission(JobHistoryUtils.HISTORY_STAGING_DIR_PERMISSIONS));
    } catch (IOException e) {
        LOG.error("Failed while checking for/creating  history staging path: [" + stagingDirPath + "]", e);
        throw new YarnRuntimeException(e);
    }
    //Check for the existence of intermediate done dir.
    Path doneDirPath = null;
    try {
        doneDirPath = FileContext.getFileContext(conf).makeQualified(new Path(doneDirStr));
        doneDirFS = FileSystem.get(doneDirPath.toUri(), conf);
        // created by the JobHistoryServer or as part of deployment.
        if (!doneDirFS.exists(doneDirPath)) {
            if (JobHistoryUtils.shouldCreateNonUserDirectory(conf)) {
                LOG.info("Creating intermediate history logDir: [" + doneDirPath + "] + based on conf. Should ideally be created by the JobHistoryServer: " + MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR);
                mkdir(doneDirFS, doneDirPath, new FsPermission(JobHistoryUtils.HISTORY_INTERMEDIATE_DONE_DIR_PERMISSIONS.toShort()));
            // TODO Temporary toShort till new FsPermission(FsPermissions)
            // respects
            // sticky
            } else {
                String message = "Not creating intermediate history logDir: [" + doneDirPath + "] based on conf: " + MRJobConfig.MR_AM_CREATE_JH_INTERMEDIATE_BASE_DIR + ". Either set to true or pre-create this directory with" + " appropriate permissions";
                LOG.error(message);
                throw new YarnRuntimeException(message);
            }
        }
    } catch (IOException e) {
        LOG.error("Failed checking for the existance of history intermediate " + "done directory: [" + doneDirPath + "]");
        throw new YarnRuntimeException(e);
    }
    //Check/create user directory under intermediate done dir.
    try {
        doneDirPrefixPath = FileContext.getFileContext(conf).makeQualified(new Path(userDoneDirStr));
        mkdir(doneDirFS, doneDirPrefixPath, new FsPermission(JobHistoryUtils.HISTORY_INTERMEDIATE_USER_DIR_PERMISSIONS));
    } catch (IOException e) {
        LOG.error("Error creating user intermediate history done directory: [ " + doneDirPrefixPath + "]", e);
        throw new YarnRuntimeException(e);
    }
    // Maximum number of unflushed completion-events that can stay in the queue
    // before flush kicks in.
    maxUnflushedCompletionEvents = conf.getInt(MRJobConfig.MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS, MRJobConfig.DEFAULT_MR_AM_HISTORY_MAX_UNFLUSHED_COMPLETE_EVENTS);
    // We want to cut down flushes after job completes so as to write quicker,
    // so we increase maxUnflushedEvents post Job completion by using the
    // following multiplier.
    postJobCompletionMultiplier = conf.getInt(MRJobConfig.MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER, MRJobConfig.DEFAULT_MR_AM_HISTORY_JOB_COMPLETE_UNFLUSHED_MULTIPLIER);
    // Max time until which flush doesn't take place.
    flushTimeout = conf.getLong(MRJobConfig.MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS, MRJobConfig.DEFAULT_MR_AM_HISTORY_COMPLETE_EVENT_FLUSH_TIMEOUT_MS);
    minQueueSizeForBatchingFlushes = conf.getInt(MRJobConfig.MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD, MRJobConfig.DEFAULT_MR_AM_HISTORY_USE_BATCHED_FLUSH_QUEUE_SIZE_THRESHOLD);
    // configuration status: off, on_with_v1 or on_with_v2.
    if (conf.getBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, MRJobConfig.DEFAULT_MAPREDUCE_JOB_EMIT_TIMELINE_DATA)) {
        LOG.info("Emitting job history data to the timeline service is enabled");
        if (YarnConfiguration.timelineServiceEnabled(conf)) {
            boolean timelineServiceV2Enabled = ((int) YarnConfiguration.getTimelineServiceVersion(conf) == 2);
            if (timelineServiceV2Enabled) {
                timelineV2Client = ((MRAppMaster.RunningAppContext) context).getTimelineV2Client();
                timelineV2Client.init(conf);
            } else {
                timelineClient = ((MRAppMaster.RunningAppContext) context).getTimelineClient();
                timelineClient.init(conf);
            }
            LOG.info("Timeline service is enabled; version: " + YarnConfiguration.getTimelineServiceVersion(conf));
        } else {
            LOG.info("Timeline service is not enabled");
        }
    } else {
        LOG.info("Emitting job history data to the timeline server is not " + "enabled");
    }
    // Flag for setting
    String jhistFormat = conf.get(JHAdminConfig.MR_HS_JHIST_FORMAT, JHAdminConfig.DEFAULT_MR_HS_JHIST_FORMAT);
    if (jhistFormat.equals("json")) {
        jhistMode = EventWriter.WriteMode.JSON;
    } else if (jhistFormat.equals("binary")) {
        jhistMode = EventWriter.WriteMode.BINARY;
    } else {
        LOG.warn("Unrecognized value '" + jhistFormat + "' for property " + JHAdminConfig.MR_HS_JHIST_FORMAT + ".  Valid values are " + "'json' or 'binary'.  Falling back to default value '" + JHAdminConfig.DEFAULT_MR_HS_JHIST_FORMAT + "'.");
    }
    super.serviceInit(conf);
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) Path(org.apache.hadoop.fs.Path) MRAppMaster(org.apache.hadoop.mapreduce.v2.app.MRAppMaster) IOException(java.io.IOException) FsPermission(org.apache.hadoop.fs.permission.FsPermission)

Aggregations

FsPermission (org.apache.hadoop.fs.permission.FsPermission)427 Path (org.apache.hadoop.fs.Path)267 Test (org.junit.Test)180 IOException (java.io.IOException)120 FileSystem (org.apache.hadoop.fs.FileSystem)93 Configuration (org.apache.hadoop.conf.Configuration)89 FileStatus (org.apache.hadoop.fs.FileStatus)87 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)52 AccessControlException (org.apache.hadoop.security.AccessControlException)43 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)36 FileNotFoundException (java.io.FileNotFoundException)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)29 File (java.io.File)26 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)26 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)26 AclEntry (org.apache.hadoop.fs.permission.AclEntry)25 ArrayList (java.util.ArrayList)22 HashMap (java.util.HashMap)19 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)16 URI (java.net.URI)15