Search in sources :

Example 41 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class FSDirStatAndListingOp method getContentSummaryInt.

private static ContentSummary getContentSummaryInt(FSDirectory fsd, INodesInPath iip) throws IOException {
    fsd.readLock();
    try {
        INode targetNode = iip.getLastINode();
        if (targetNode == null) {
            throw new FileNotFoundException("File does not exist: " + iip.getPath());
        } else {
            // Make it relinquish locks everytime contentCountLimit entries are
            // processed. 0 means disabled. I.e. blocking for the entire duration.
            ContentSummaryComputationContext cscc = new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(), fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
            ContentSummary cs = targetNode.computeAndConvertContentSummary(iip.getPathSnapshotId(), cscc);
            fsd.addYieldCount(cscc.getYieldCount());
            return cs;
        }
    } finally {
        fsd.readUnlock();
    }
}
Also used : ContentSummary(org.apache.hadoop.fs.ContentSummary) FileNotFoundException(java.io.FileNotFoundException)

Example 42 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestViewFsDefaultValue method testGetContentSummary.

/**
   * Test that getContentSummary can be retrieved on the client side.
   */
@Test
public void testGetContentSummary() throws IOException {
    FileSystem hFs = cluster.getFileSystem(0);
    final DistributedFileSystem dfs = (DistributedFileSystem) hFs;
    dfs.setQuota(testFileDirPath, 100, 500);
    ContentSummary cs = vfs.getContentSummary(testFileDirPath);
    assertEquals(100, cs.getQuota());
    assertEquals(500, cs.getSpaceQuota());
}
Also used : FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ContentSummary(org.apache.hadoop.fs.ContentSummary) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 43 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestQuota method testMaxSpaceQuotas.

/**
   * Test limit cases for setting space quotas.
   */
@Test
public void testMaxSpaceQuotas() throws Exception {
    final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(parent));
    final FileSystem fs = cluster.getFileSystem();
    assertTrue("Not a HDFS: " + fs.getUri(), fs instanceof DistributedFileSystem);
    final DistributedFileSystem dfs = (DistributedFileSystem) fs;
    // create test directory
    final Path testFolder = new Path(parent, "testFolder");
    assertTrue(dfs.mkdirs(testFolder));
    // setting namespace quota to Long.MAX_VALUE - 1 should work
    dfs.setQuota(testFolder, Long.MAX_VALUE - 1, 10);
    ContentSummary c = dfs.getContentSummary(testFolder);
    compareQuotaUsage(c, dfs, testFolder);
    assertTrue("Quota not set properly", c.getQuota() == Long.MAX_VALUE - 1);
    // setting diskspace quota to Long.MAX_VALUE - 1 should work
    dfs.setQuota(testFolder, 10, Long.MAX_VALUE - 1);
    c = dfs.getContentSummary(testFolder);
    compareQuotaUsage(c, dfs, testFolder);
    assertTrue("Quota not set properly", c.getSpaceQuota() == Long.MAX_VALUE - 1);
    // setting namespace quota to Long.MAX_VALUE should not work + no error
    dfs.setQuota(testFolder, Long.MAX_VALUE, 10);
    c = dfs.getContentSummary(testFolder);
    compareQuotaUsage(c, dfs, testFolder);
    assertTrue("Quota should not have changed", c.getQuota() == 10);
    // setting diskspace quota to Long.MAX_VALUE should not work + no error
    dfs.setQuota(testFolder, 10, Long.MAX_VALUE);
    c = dfs.getContentSummary(testFolder);
    compareQuotaUsage(c, dfs, testFolder);
    assertTrue("Quota should not have changed", c.getSpaceQuota() == 10);
    // setting namespace quota to Long.MAX_VALUE + 1 should not work + error
    try {
        dfs.setQuota(testFolder, Long.MAX_VALUE + 1, 10);
        fail("Exception not thrown");
    } catch (IllegalArgumentException e) {
    // Expected
    }
    // setting diskspace quota to Long.MAX_VALUE + 1 should not work + error
    try {
        dfs.setQuota(testFolder, 10, Long.MAX_VALUE + 1);
        fail("Exception not thrown");
    } catch (IllegalArgumentException e) {
    // Expected
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FileSystem(org.apache.hadoop.fs.FileSystem) ContentSummary(org.apache.hadoop.fs.ContentSummary) Test(org.junit.Test)

Example 44 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestQuota method testHugeFileCount.

/**
   * File count on root , should return total value of files in Filesystem
   * when one folder contains files more than "dfs.content-summary.limit".
   */
@Test
public void testHugeFileCount() throws IOException {
    final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(parent));
    for (int i = 1; i <= 5; i++) {
        FSDataOutputStream out = dfs.create(new Path(parent, "Folder1/" + "file" + i), (short) 1);
        out.close();
    }
    FSDataOutputStream out = dfs.create(new Path(parent, "Folder2/file6"), (short) 1);
    out.close();
    ContentSummary contentSummary = dfs.getContentSummary(parent);
    compareQuotaUsage(contentSummary, dfs, parent);
    assertEquals(6, contentSummary.getFileCount());
}
Also used : Path(org.apache.hadoop.fs.Path) ContentSummary(org.apache.hadoop.fs.ContentSummary) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 45 with ContentSummary

use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.

the class TestQuota method testSpaceCommands.

/**
   * Test HDFS operations that change disk space consumed by a directory tree.
   * namely create, rename, delete, append, and setReplication.
   * 
   * This is based on testNamespaceCommands() above.
   */
@Test
public void testSpaceCommands() throws Exception {
    final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(parent));
    int fileLen = 1024;
    short replication = 3;
    int fileSpace = fileLen * replication;
    // create directory nqdir0/qdir1/qdir20/nqdir30
    assertTrue(dfs.mkdirs(new Path(parent, "nqdir0/qdir1/qdir20/nqdir30")));
    // set the quota of nqdir0/qdir1 to 4 * fileSpace
    final Path quotaDir1 = new Path(parent, "nqdir0/qdir1");
    dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 4 * fileSpace);
    ContentSummary c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getSpaceQuota(), 4 * fileSpace);
    // set the quota of nqdir0/qdir1/qdir20 to 6 * fileSpace
    final Path quotaDir20 = new Path(parent, "nqdir0/qdir1/qdir20");
    dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace);
    c = dfs.getContentSummary(quotaDir20);
    compareQuotaUsage(c, dfs, quotaDir20);
    assertEquals(c.getSpaceQuota(), 6 * fileSpace);
    // Create nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
    final Path quotaDir21 = new Path(parent, "nqdir0/qdir1/qdir21");
    assertTrue(dfs.mkdirs(quotaDir21));
    dfs.setQuota(quotaDir21, HdfsConstants.QUOTA_DONT_SET, 2 * fileSpace);
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceQuota(), 2 * fileSpace);
    // 5: Create directory nqdir0/qdir1/qdir21/nqdir32
    Path tempPath = new Path(quotaDir21, "nqdir32");
    assertTrue(dfs.mkdirs(tempPath));
    // create a file under nqdir32/fileDir
    DFSTestUtil.createFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen, replication, 0);
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), fileSpace);
    // Create a larger file nqdir0/qdir1/qdir21/nqdir33/
    boolean hasException = false;
    try {
        DFSTestUtil.createFile(dfs, new Path(quotaDir21, "nqdir33/file2"), 2 * fileLen, replication, 0);
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    // delete nqdir33
    assertTrue(dfs.delete(new Path(quotaDir21, "nqdir33"), true));
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), fileSpace);
    assertEquals(c.getSpaceQuota(), 2 * fileSpace);
    // Verify space before the move:
    c = dfs.getContentSummary(quotaDir20);
    compareQuotaUsage(c, dfs, quotaDir20);
    assertEquals(c.getSpaceConsumed(), 0);
    // Move nqdir0/qdir1/qdir21/nqdir32 nqdir0/qdir1/qdir20/nqdir30
    Path dstPath = new Path(quotaDir20, "nqdir30");
    Path srcPath = new Path(quotaDir21, "nqdir32");
    assertTrue(dfs.rename(srcPath, dstPath));
    // verify space after the move
    c = dfs.getContentSummary(quotaDir20);
    assertEquals(c.getSpaceConsumed(), fileSpace);
    // verify space for its parent
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getSpaceConsumed(), fileSpace);
    // verify space for source for the move
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), 0);
    final Path file2 = new Path(dstPath, "fileDir/file2");
    int file2Len = 2 * fileLen;
    // create a larger file under nqdir0/qdir1/qdir20/nqdir30
    DFSTestUtil.createFile(dfs, file2, file2Len, replication, 0);
    c = dfs.getContentSummary(quotaDir20);
    assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), 0);
    // Reverse: Move nqdir0/qdir1/qdir20/nqdir30 to nqdir0/qdir1/qdir21/
    hasException = false;
    try {
        assertFalse(dfs.rename(dstPath, srcPath));
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    // make sure no intermediate directories left by failed rename
    assertFalse(dfs.exists(srcPath));
    // directory should exist
    assertTrue(dfs.exists(dstPath));
    // verify space after the failed move
    c = dfs.getContentSummary(quotaDir20);
    assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
    c = dfs.getContentSummary(quotaDir21);
    compareQuotaUsage(c, dfs, quotaDir21);
    assertEquals(c.getSpaceConsumed(), 0);
    // Test Append :
    // verify space quota
    c = dfs.getContentSummary(quotaDir1);
    compareQuotaUsage(c, dfs, quotaDir1);
    assertEquals(c.getSpaceQuota(), 4 * fileSpace);
    // verify space before append;
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
    OutputStream out = dfs.append(file2);
    // appending 1 fileLen should succeed
    out.write(new byte[fileLen]);
    out.close();
    // after append
    file2Len += fileLen;
    // verify space after append;
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 4 * fileSpace);
    // now increase the quota for quotaDir1
    dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 5 * fileSpace);
    // Now, appending more than 1 fileLen should result in an error
    out = dfs.append(file2);
    hasException = false;
    try {
        out.write(new byte[fileLen + 1024]);
        out.flush();
        out.close();
    } catch (DSQuotaExceededException e) {
        hasException = true;
        IOUtils.closeStream(out);
    }
    assertTrue(hasException);
    // after partial append
    file2Len += fileLen;
    // verify space after partial append
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 5 * fileSpace);
    // Test set replication :
    // first reduce the replication
    dfs.setReplication(file2, (short) (replication - 1));
    // verify that space is reduced by file2Len
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
    // now try to increase the replication and and expect an error.
    hasException = false;
    try {
        dfs.setReplication(file2, (short) (replication + 1));
    } catch (DSQuotaExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    // verify space consumed remains unchanged.
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
    // now increase the quota for quotaDir1 and quotaDir20
    dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
    dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
    // then increasing replication should be ok.
    dfs.setReplication(file2, (short) (replication + 1));
    // verify increase in space
    c = dfs.getContentSummary(dstPath);
    compareQuotaUsage(c, dfs, dstPath);
    assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
    // Test HDFS-2053 :
    // Create directory hdfs-2053
    final Path quotaDir2053 = new Path(parent, "hdfs-2053");
    assertTrue(dfs.mkdirs(quotaDir2053));
    // Create subdirectories /hdfs-2053/{A,B,C}
    final Path quotaDir2053_A = new Path(quotaDir2053, "A");
    assertTrue(dfs.mkdirs(quotaDir2053_A));
    final Path quotaDir2053_B = new Path(quotaDir2053, "B");
    assertTrue(dfs.mkdirs(quotaDir2053_B));
    final Path quotaDir2053_C = new Path(quotaDir2053, "C");
    assertTrue(dfs.mkdirs(quotaDir2053_C));
    // Factors to vary the sizes of test files created in each subdir.
    // The actual factors are not really important but they allow us to create
    // identifiable file sizes per subdir, which helps during debugging.
    int sizeFactorA = 1;
    int sizeFactorB = 2;
    int sizeFactorC = 4;
    // Set space quota for subdirectory C
    dfs.setQuota(quotaDir2053_C, HdfsConstants.QUOTA_DONT_SET, (sizeFactorC + 1) * fileSpace);
    c = dfs.getContentSummary(quotaDir2053_C);
    compareQuotaUsage(c, dfs, quotaDir2053_C);
    assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace);
    // Create a file under subdirectory A
    DFSTestUtil.createFile(dfs, new Path(quotaDir2053_A, "fileA"), sizeFactorA * fileLen, replication, 0);
    c = dfs.getContentSummary(quotaDir2053_A);
    compareQuotaUsage(c, dfs, quotaDir2053_A);
    assertEquals(c.getSpaceConsumed(), sizeFactorA * fileSpace);
    // Create a file under subdirectory B
    DFSTestUtil.createFile(dfs, new Path(quotaDir2053_B, "fileB"), sizeFactorB * fileLen, replication, 0);
    c = dfs.getContentSummary(quotaDir2053_B);
    compareQuotaUsage(c, dfs, quotaDir2053_B);
    assertEquals(c.getSpaceConsumed(), sizeFactorB * fileSpace);
    // Create a file under subdirectory C (which has a space quota)
    DFSTestUtil.createFile(dfs, new Path(quotaDir2053_C, "fileC"), sizeFactorC * fileLen, replication, 0);
    c = dfs.getContentSummary(quotaDir2053_C);
    compareQuotaUsage(c, dfs, quotaDir2053_C);
    assertEquals(c.getSpaceConsumed(), sizeFactorC * fileSpace);
    // Check space consumed for /hdfs-2053
    c = dfs.getContentSummary(quotaDir2053);
    compareQuotaUsage(c, dfs, quotaDir2053);
    assertEquals(c.getSpaceConsumed(), (sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace);
}
Also used : Path(org.apache.hadoop.fs.Path) ContentSummary(org.apache.hadoop.fs.ContentSummary) DSQuotaExceededException(org.apache.hadoop.hdfs.protocol.DSQuotaExceededException) ByteArrayOutputStream(java.io.ByteArrayOutputStream) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) Test(org.junit.Test)

Aggregations

ContentSummary (org.apache.hadoop.fs.ContentSummary)61 Path (org.apache.hadoop.fs.Path)42 Test (org.junit.Test)38 FileSystem (org.apache.hadoop.fs.FileSystem)10 IOException (java.io.IOException)9 Configuration (org.apache.hadoop.conf.Configuration)8 ArrayList (java.util.ArrayList)6 OutputStream (java.io.OutputStream)5 URI (java.net.URI)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)5 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)5 WebHdfsFileSystem (org.apache.hadoop.hdfs.web.WebHdfsFileSystem)5 JobConf (org.apache.hadoop.mapred.JobConf)5 HttpURLConnection (java.net.HttpURLConnection)4 HashMap (java.util.HashMap)4 Properties (java.util.Properties)4 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)4 FileNotFoundException (java.io.FileNotFoundException)3