use of org.apache.hadoop.hdfs.protocol.DSQuotaExceededException in project hadoop by apache.
the class TestQuota method testQuotaCommands.
/** Test quota related commands:
* setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count
*/
@Test
public void testQuotaCommands() throws Exception {
DFSAdmin admin = new DFSAdmin(conf);
final Path dir = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(dir));
final int fileLen = 1024;
final short replication = 5;
final long spaceQuota = fileLen * replication * 15 / 8;
// 1: create a directory test and set its quota to be 3
final Path parent = new Path(dir, "test");
assertTrue(dfs.mkdirs(parent));
String[] args = new String[] { "-setQuota", "3", parent.toString() };
runCommand(admin, args, false);
//try setting space quota with a 'binary prefix'
runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
assertEquals(2L << 40, dfs.getContentSummary(parent).getSpaceQuota());
// set diskspace quota to 10000
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota), parent.toString());
// 2: create directory /test/data0
final Path childDir0 = new Path(parent, "data0");
assertTrue(dfs.mkdirs(childDir0));
// 3: create a file /test/datafile0
final Path childFile0 = new Path(parent, "datafile0");
DFSTestUtil.createFile(dfs, childFile0, fileLen, replication, 0);
// 4: count -q /test
ContentSummary c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getFileCount() + c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 3);
assertEquals(c.getSpaceConsumed(), fileLen * replication);
assertEquals(c.getSpaceQuota(), spaceQuota);
// 5: count -q /test/data0
c = dfs.getContentSummary(childDir0);
compareQuotaUsage(c, dfs, childDir0);
assertEquals(c.getFileCount() + c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), -1);
// check disk space consumed
c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getSpaceConsumed(), fileLen * replication);
// 6: create a directory /test/data1
final Path childDir1 = new Path(parent, "data1");
boolean hasException = false;
try {
assertFalse(dfs.mkdirs(childDir1));
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
OutputStream fout;
// 7: create a file /test/datafile1
final Path childFile1 = new Path(parent, "datafile1");
hasException = false;
try {
fout = dfs.create(childFile1);
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// 8: clear quota /test
runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getQuota(), -1);
assertEquals(c.getSpaceQuota(), spaceQuota);
// 9: clear quota /test/data0
runCommand(admin, new String[] { "-clrQuota", childDir0.toString() }, false);
c = dfs.getContentSummary(childDir0);
compareQuotaUsage(c, dfs, childDir0);
assertEquals(c.getQuota(), -1);
// 10: create a file /test/datafile1
fout = dfs.create(childFile1, replication);
// 10.s: but writing fileLen bytes should result in an quota exception
try {
fout.write(new byte[fileLen]);
fout.close();
Assert.fail();
} catch (QuotaExceededException e) {
IOUtils.closeStream(fout);
}
//delete the file
dfs.delete(childFile1, false);
// 9.s: clear diskspace quota
runCommand(admin, false, "-clrSpaceQuota", parent.toString());
c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getQuota(), -1);
assertEquals(c.getSpaceQuota(), -1);
// now creating childFile1 should succeed
DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);
// 11: set the quota of /test to be 1
// HADOOP-5872 - we can set quota even if it is immediately violated
args = new String[] { "-setQuota", "1", parent.toString() };
runCommand(admin, args, false);
runCommand(// for space quota
admin, // for space quota
false, // for space quota
"-setSpaceQuota", Integer.toString(fileLen), args[2]);
// 12: set the quota of /test/data0 to be 1
args = new String[] { "-setQuota", "1", childDir0.toString() };
runCommand(admin, args, false);
// 13: not able create a directory under data0
hasException = false;
try {
assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
c = dfs.getContentSummary(childDir0);
compareQuotaUsage(c, dfs, childDir0);
assertEquals(c.getDirectoryCount() + c.getFileCount(), 1);
assertEquals(c.getQuota(), 1);
// 14a: set quota on a non-existent directory
Path nonExistentPath = new Path(dir, "test1");
assertFalse(dfs.exists(nonExistentPath));
args = new String[] { "-setQuota", "1", nonExistentPath.toString() };
runCommand(admin, args, true);
runCommand(// for space quota
admin, // for space quota
true, // for space quota
"-setSpaceQuota", // for space quota
"1g", nonExistentPath.toString());
// 14b: set quota on a file
assertTrue(dfs.isFile(childFile0));
args[1] = childFile0.toString();
runCommand(admin, args, true);
// same for space quota
runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
// 15a: clear quota on a file
args[0] = "-clrQuota";
runCommand(admin, args, true);
runCommand(admin, true, "-clrSpaceQuota", args[1]);
// 15b: clear quota on a non-existent directory
args[1] = nonExistentPath.toString();
runCommand(admin, args, true);
runCommand(admin, true, "-clrSpaceQuota", args[1]);
// 16a: set the quota of /test to be 0
args = new String[] { "-setQuota", "0", parent.toString() };
runCommand(admin, args, true);
runCommand(admin, false, "-setSpaceQuota", "0", args[2]);
// 16b: set the quota of /test to be -1
args[1] = "-1";
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
// 16c: set the quota of /test to be Long.MAX_VALUE+1
args[1] = String.valueOf(Long.MAX_VALUE + 1L);
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
// 16d: set the quota of /test to be a non integer
args[1] = "33aa1.5";
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
// 16e: set space quota with a value larger than Long.MAX_VALUE
runCommand(admin, true, "-setSpaceQuota", (Long.MAX_VALUE / 1024 / 1024 + 1024) + "m", args[2]);
// 17: setQuota by a non-administrator
final String username = "userxx";
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[] { "groupyy" });
// need final ref for doAs block
final String[] args2 = args.clone();
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
assertEquals("Not running as new user", username, UserGroupInformation.getCurrentUser().getShortUserName());
DFSAdmin userAdmin = new DFSAdmin(conf);
args2[1] = "100";
runCommand(userAdmin, args2, true);
runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
// 18: clrQuota by a non-administrator
String[] args3 = new String[] { "-clrQuota", parent.toString() };
runCommand(userAdmin, args3, true);
runCommand(userAdmin, true, "-clrSpaceQuota", args3[1]);
return null;
}
});
// 19: clrQuota on the root directory ("/") should fail
runCommand(admin, true, "-clrQuota", "/");
// 20: setQuota on the root directory ("/") should succeed
runCommand(admin, false, "-setQuota", "1000000", "/");
runCommand(admin, true, "-clrQuota", "/");
runCommand(admin, false, "-clrSpaceQuota", "/");
runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
runCommand(admin, false, "-clrSpaceQuota", parent.toString());
// 2: create directory /test/data2
final Path childDir2 = new Path(parent, "data2");
assertTrue(dfs.mkdirs(childDir2));
final Path childFile2 = new Path(childDir2, "datafile2");
final Path childFile3 = new Path(childDir2, "datafile3");
final long spaceQuota2 = DEFAULT_BLOCK_SIZE * replication;
final long fileLen2 = DEFAULT_BLOCK_SIZE;
// set space quota to a real low value
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
// clear space quota
runCommand(admin, false, "-clrSpaceQuota", childDir2.toString());
// create a file that is greater than the size of space quota
DFSTestUtil.createFile(dfs, childFile2, fileLen2, replication, 0);
// now set space quota again. This should succeed
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
hasException = false;
try {
DFSTestUtil.createFile(dfs, childFile3, fileLen2, replication, 0);
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// now test the same for root
final Path childFile4 = new Path(dir, "datafile2");
final Path childFile5 = new Path(dir, "datafile3");
runCommand(admin, true, "-clrQuota", "/");
runCommand(admin, false, "-clrSpaceQuota", "/");
// set space quota to a real low value
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
runCommand(admin, false, "-clrSpaceQuota", "/");
DFSTestUtil.createFile(dfs, childFile4, fileLen2, replication, 0);
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
hasException = false;
try {
DFSTestUtil.createFile(dfs, childFile5, fileLen2, replication, 0);
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
assertEquals(5, cluster.getNamesystem().getFSDirectory().getYieldCount());
/*
* clear sapce quota for root, otherwise other tests may fail due to
* insufficient space quota.
*/
runCommand(admin, false, "-clrSpaceQuota", "/");
}
use of org.apache.hadoop.hdfs.protocol.DSQuotaExceededException in project hadoop by apache.
the class TestDiskspaceQuotaUpdate method testAppendOverStorageQuota.
/**
* Test append over storage quota does not mark file as UC or create lease
*/
@Test(timeout = 60000)
public void testAppendOverStorageQuota() throws Exception {
final Path dir = getParent(GenericTestUtils.getMethodName());
final Path file = new Path(dir, "file");
// create partial block file
getDFS().mkdirs(dir);
DFSTestUtil.createFile(getDFS(), file, BLOCKSIZE / 2, REPLICATION, seed);
// lower quota to cause exception when appending to partial block
getDFS().setQuota(dir, Long.MAX_VALUE - 1, 1);
final INodeDirectory dirNode = getFSDirectory().getINode4Write(dir.toString()).asDirectory();
final long spaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
try {
DFSTestUtil.appendFile(getDFS(), file, BLOCKSIZE);
Assert.fail("append didn't fail");
} catch (DSQuotaExceededException e) {
// ignore
}
LeaseManager lm = cluster.getNamesystem().getLeaseManager();
// check that the file exists, isn't UC, and has no dangling lease
INodeFile inode = getFSDirectory().getINode(file.toString()).asFile();
Assert.assertNotNull(inode);
Assert.assertFalse("should not be UC", inode.isUnderConstruction());
Assert.assertNull("should not have a lease", lm.getLease(inode));
// make sure the quota usage is unchanged
final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
assertEquals(spaceUsed, newSpaceUsed);
// make sure edits aren't corrupted
getDFS().recoverLease(file);
cluster.restartNameNode(true);
}
use of org.apache.hadoop.hdfs.protocol.DSQuotaExceededException in project hadoop by apache.
the class TestQuotaByStorageType method testStorageSpaceQuotaWithRepFactor.
/**
* Tests if changing replication factor results in copying file as quota
* doesn't exceed.
*/
@Test(timeout = 30000)
public void testStorageSpaceQuotaWithRepFactor() throws IOException {
final Path testDir = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(testDir));
final long storageSpaceQuota = BLOCKSIZE * 2;
/* set policy to HOT */
dfs.setStoragePolicy(testDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
/* set space quota */
dfs.setQuota(testDir, HdfsConstants.QUOTA_DONT_SET, storageSpaceQuota);
/* init vars */
Path createdFile = null;
final long fileLen = BLOCKSIZE;
try {
/* create one file with 3 replicas */
createdFile = new Path(testDir, "file1.data");
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen, BLOCKSIZE, REPLICATION, seed);
fail("should fail on DSQuotaExceededException");
} catch (DSQuotaExceededException e) {
LOG.info("Got expected exception ", e);
assertThat(e.toString(), is(allOf(containsString("DiskSpace quota"), containsString(testDir.toString()))));
}
/* try creating file again with 2 replicas */
createdFile = new Path(testDir, "file2.data");
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16, fileLen, BLOCKSIZE, (short) 2, seed);
assertTrue(dfs.exists(createdFile));
assertTrue(dfs.isFile(createdFile));
}
use of org.apache.hadoop.hdfs.protocol.DSQuotaExceededException in project hadoop by apache.
the class TestQuota method testSpaceCommands.
/**
* Test HDFS operations that change disk space consumed by a directory tree.
* namely create, rename, delete, append, and setReplication.
*
* This is based on testNamespaceCommands() above.
*/
@Test
public void testSpaceCommands() throws Exception {
final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(parent));
int fileLen = 1024;
short replication = 3;
int fileSpace = fileLen * replication;
// create directory nqdir0/qdir1/qdir20/nqdir30
assertTrue(dfs.mkdirs(new Path(parent, "nqdir0/qdir1/qdir20/nqdir30")));
// set the quota of nqdir0/qdir1 to 4 * fileSpace
final Path quotaDir1 = new Path(parent, "nqdir0/qdir1");
dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 4 * fileSpace);
ContentSummary c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getSpaceQuota(), 4 * fileSpace);
// set the quota of nqdir0/qdir1/qdir20 to 6 * fileSpace
final Path quotaDir20 = new Path(parent, "nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace);
c = dfs.getContentSummary(quotaDir20);
compareQuotaUsage(c, dfs, quotaDir20);
assertEquals(c.getSpaceQuota(), 6 * fileSpace);
// Create nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
final Path quotaDir21 = new Path(parent, "nqdir0/qdir1/qdir21");
assertTrue(dfs.mkdirs(quotaDir21));
dfs.setQuota(quotaDir21, HdfsConstants.QUOTA_DONT_SET, 2 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceQuota(), 2 * fileSpace);
// 5: Create directory nqdir0/qdir1/qdir21/nqdir32
Path tempPath = new Path(quotaDir21, "nqdir32");
assertTrue(dfs.mkdirs(tempPath));
// create a file under nqdir32/fileDir
DFSTestUtil.createFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), fileSpace);
// Create a larger file nqdir0/qdir1/qdir21/nqdir33/
boolean hasException = false;
try {
DFSTestUtil.createFile(dfs, new Path(quotaDir21, "nqdir33/file2"), 2 * fileLen, replication, 0);
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// delete nqdir33
assertTrue(dfs.delete(new Path(quotaDir21, "nqdir33"), true));
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), fileSpace);
assertEquals(c.getSpaceQuota(), 2 * fileSpace);
// Verify space before the move:
c = dfs.getContentSummary(quotaDir20);
compareQuotaUsage(c, dfs, quotaDir20);
assertEquals(c.getSpaceConsumed(), 0);
// Move nqdir0/qdir1/qdir21/nqdir32 nqdir0/qdir1/qdir20/nqdir30
Path dstPath = new Path(quotaDir20, "nqdir30");
Path srcPath = new Path(quotaDir21, "nqdir32");
assertTrue(dfs.rename(srcPath, dstPath));
// verify space after the move
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(), fileSpace);
// verify space for its parent
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getSpaceConsumed(), fileSpace);
// verify space for source for the move
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), 0);
final Path file2 = new Path(dstPath, "fileDir/file2");
int file2Len = 2 * fileLen;
// create a larger file under nqdir0/qdir1/qdir20/nqdir30
DFSTestUtil.createFile(dfs, file2, file2Len, replication, 0);
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), 0);
// Reverse: Move nqdir0/qdir1/qdir20/nqdir30 to nqdir0/qdir1/qdir21/
hasException = false;
try {
assertFalse(dfs.rename(dstPath, srcPath));
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// make sure no intermediate directories left by failed rename
assertFalse(dfs.exists(srcPath));
// directory should exist
assertTrue(dfs.exists(dstPath));
// verify space after the failed move
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), 0);
// Test Append :
// verify space quota
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getSpaceQuota(), 4 * fileSpace);
// verify space before append;
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
OutputStream out = dfs.append(file2);
// appending 1 fileLen should succeed
out.write(new byte[fileLen]);
out.close();
// after append
file2Len += fileLen;
// verify space after append;
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 4 * fileSpace);
// now increase the quota for quotaDir1
dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 5 * fileSpace);
// Now, appending more than 1 fileLen should result in an error
out = dfs.append(file2);
hasException = false;
try {
out.write(new byte[fileLen + 1024]);
out.flush();
out.close();
} catch (DSQuotaExceededException e) {
hasException = true;
IOUtils.closeStream(out);
}
assertTrue(hasException);
// after partial append
file2Len += fileLen;
// verify space after partial append
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace);
// Test set replication :
// first reduce the replication
dfs.setReplication(file2, (short) (replication - 1));
// verify that space is reduced by file2Len
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
// now try to increase the replication and and expect an error.
hasException = false;
try {
dfs.setReplication(file2, (short) (replication + 1));
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// verify space consumed remains unchanged.
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
// now increase the quota for quotaDir1 and quotaDir20
dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 10 * fileSpace);
// then increasing replication should be ok.
dfs.setReplication(file2, (short) (replication + 1));
// verify increase in space
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
// Test HDFS-2053 :
// Create directory hdfs-2053
final Path quotaDir2053 = new Path(parent, "hdfs-2053");
assertTrue(dfs.mkdirs(quotaDir2053));
// Create subdirectories /hdfs-2053/{A,B,C}
final Path quotaDir2053_A = new Path(quotaDir2053, "A");
assertTrue(dfs.mkdirs(quotaDir2053_A));
final Path quotaDir2053_B = new Path(quotaDir2053, "B");
assertTrue(dfs.mkdirs(quotaDir2053_B));
final Path quotaDir2053_C = new Path(quotaDir2053, "C");
assertTrue(dfs.mkdirs(quotaDir2053_C));
// Factors to vary the sizes of test files created in each subdir.
// The actual factors are not really important but they allow us to create
// identifiable file sizes per subdir, which helps during debugging.
int sizeFactorA = 1;
int sizeFactorB = 2;
int sizeFactorC = 4;
// Set space quota for subdirectory C
dfs.setQuota(quotaDir2053_C, HdfsConstants.QUOTA_DONT_SET, (sizeFactorC + 1) * fileSpace);
c = dfs.getContentSummary(quotaDir2053_C);
compareQuotaUsage(c, dfs, quotaDir2053_C);
assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace);
// Create a file under subdirectory A
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_A, "fileA"), sizeFactorA * fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir2053_A);
compareQuotaUsage(c, dfs, quotaDir2053_A);
assertEquals(c.getSpaceConsumed(), sizeFactorA * fileSpace);
// Create a file under subdirectory B
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_B, "fileB"), sizeFactorB * fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir2053_B);
compareQuotaUsage(c, dfs, quotaDir2053_B);
assertEquals(c.getSpaceConsumed(), sizeFactorB * fileSpace);
// Create a file under subdirectory C (which has a space quota)
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_C, "fileC"), sizeFactorC * fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir2053_C);
compareQuotaUsage(c, dfs, quotaDir2053_C);
assertEquals(c.getSpaceConsumed(), sizeFactorC * fileSpace);
// Check space consumed for /hdfs-2053
c = dfs.getContentSummary(quotaDir2053);
compareQuotaUsage(c, dfs, quotaDir2053);
assertEquals(c.getSpaceConsumed(), (sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace);
}
Aggregations