Search in sources :

Example 1 with QuotaByStorageTypeExceededException

use of org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException in project hadoop by apache.

the class TestDiskspaceQuotaUpdate method testAppendOverTypeQuota.

/**
   * Test append over a specific type of storage quota does not mark file as
   * UC or create a lease
   */
@Test(timeout = 60000)
public void testAppendOverTypeQuota() throws Exception {
    final Path dir = getParent(GenericTestUtils.getMethodName());
    final Path file = new Path(dir, "file");
    // create partial block file
    getDFS().mkdirs(dir);
    // set the storage policy on dir
    getDFS().setStoragePolicy(dir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
    DFSTestUtil.createFile(getDFS(), file, BLOCKSIZE / 2, REPLICATION, seed);
    // set quota of SSD to 1L
    getDFS().setQuotaByStorageType(dir, StorageType.SSD, 1L);
    final INodeDirectory dirNode = getFSDirectory().getINode4Write(dir.toString()).asDirectory();
    final long spaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
    try {
        DFSTestUtil.appendFile(getDFS(), file, BLOCKSIZE);
        Assert.fail("append didn't fail");
    } catch (QuotaByStorageTypeExceededException e) {
    //ignore
    }
    // check that the file exists, isn't UC, and has no dangling lease
    LeaseManager lm = cluster.getNamesystem().getLeaseManager();
    INodeFile inode = getFSDirectory().getINode(file.toString()).asFile();
    Assert.assertNotNull(inode);
    Assert.assertFalse("should not be UC", inode.isUnderConstruction());
    Assert.assertNull("should not have a lease", lm.getLease(inode));
    // make sure the quota usage is unchanged
    final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
    assertEquals(spaceUsed, newSpaceUsed);
    // make sure edits aren't corrupted
    getDFS().recoverLease(file);
    cluster.restartNameNode(true);
}
Also used : Path(org.apache.hadoop.fs.Path) QuotaByStorageTypeExceededException(org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException) Test(org.junit.Test)

Example 2 with QuotaByStorageTypeExceededException

use of org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException in project hadoop by apache.

the class TestQuota method testQuotaByStorageType.

/**
   * Test quota by storage type.
   */
@Test
public void testQuotaByStorageType() throws Exception {
    final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(parent));
    int fileLen = 1024;
    short replication = 3;
    int fileSpace = fileLen * replication;
    final Path quotaDir20 = new Path(parent, "nqdir0/qdir1/qdir20");
    assertTrue(dfs.mkdirs(quotaDir20));
    dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace);
    // Verify DirectoryWithQuotaFeature's storage type usage
    // is updated properly after deletion.
    // File creation followed by deletion shouldn't change storage type
    // usage regardless whether storage policy is set.
    Path file = new Path(quotaDir20, "fileDir/file1");
    DFSTestUtil.createFile(dfs, file, fileLen * 3, replication, 0);
    dfs.delete(file, false);
    dfs.setStoragePolicy(quotaDir20, HdfsConstants.HOT_STORAGE_POLICY_NAME);
    dfs.setQuotaByStorageType(quotaDir20, StorageType.DEFAULT, 2 * fileSpace);
    boolean hasException = false;
    try {
        DFSTestUtil.createFile(dfs, file, fileLen * 3, replication, 0);
    } catch (QuotaByStorageTypeExceededException e) {
        hasException = true;
    }
    assertTrue(hasException);
    dfs.delete(file, false);
    dfs.setQuotaByStorageType(quotaDir20, StorageType.DEFAULT, 6 * fileSpace);
}
Also used : Path(org.apache.hadoop.fs.Path) QuotaByStorageTypeExceededException(org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException) Test(org.junit.Test)

Example 3 with QuotaByStorageTypeExceededException

use of org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException in project hadoop by apache.

the class TestQuotaByStorageType method testStorageSpaceQuotaWithWarmPolicy.

/**
   * Tests space quota for storage policy = WARM.
   */
@Test
public void testStorageSpaceQuotaWithWarmPolicy() throws IOException {
    final Path testDir = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
    assertTrue(dfs.mkdirs(testDir));
    /* set policy to HOT */
    dfs.setStoragePolicy(testDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
    /* init space quota */
    final long storageSpaceQuota = BLOCKSIZE * 6;
    final long storageTypeSpaceQuota = BLOCKSIZE * 1;
    /* set space quota */
    dfs.setQuota(testDir, HdfsConstants.QUOTA_DONT_SET, storageSpaceQuota);
    /* init vars */
    Path createdFile;
    final long fileLen = BLOCKSIZE;
    /**
     * create one file with 3 replicas, REPLICATION * BLOCKSIZE go to DISK due
     * to HOT policy
     */
    createdFile = new Path(testDir, "file1.data");
    DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen, BLOCKSIZE, REPLICATION, seed);
    assertTrue(dfs.exists(createdFile));
    assertTrue(dfs.isFile(createdFile));
    /* set space quota for DISK */
    dfs.setQuotaByStorageType(testDir, StorageType.DISK, storageTypeSpaceQuota);
    /* set policy to WARM */
    dfs.setStoragePolicy(testDir, HdfsConstants.WARM_STORAGE_POLICY_NAME);
    /* create another file with 3 replicas */
    try {
        createdFile = new Path(testDir, "file2.data");
        /**
       * This will fail since quota on DISK is 1 block but space consumed on
       * DISK is already 3 blocks due to the first file creation.
       */
        DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen, BLOCKSIZE, REPLICATION, seed);
        fail("should fail on QuotaByStorageTypeExceededException");
    } catch (QuotaByStorageTypeExceededException e) {
        LOG.info("Got expected exception ", e);
        assertThat(e.toString(), is(allOf(containsString("Quota by storage type"), containsString("DISK on path"), containsString(testDir.toString()))));
    }
}
Also used : Path(org.apache.hadoop.fs.Path) QuotaByStorageTypeExceededException(org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException) Test(org.junit.Test)

Aggregations

Path (org.apache.hadoop.fs.Path)3 QuotaByStorageTypeExceededException (org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException)3 Test (org.junit.Test)3