use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestQuota method testBlockAllocationAdjustsUsageConservatively.
/**
* Violate a space quota using files of size < 1 block. Test that block
* allocation conservatively assumes that for quota checking the entire
* space of the block is used.
*/
@Test
public void testBlockAllocationAdjustsUsageConservatively() throws Exception {
final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(parent));
DFSAdmin admin = new DFSAdmin(conf);
Path dir = new Path(parent, "test");
Path file1 = new Path(parent, "test/test1");
Path file2 = new Path(parent, "test/test2");
boolean exceededQuota = false;
// total space usage including
final int QUOTA_SIZE = 3 * DEFAULT_BLOCK_SIZE;
// repl.
final int FILE_SIZE = DEFAULT_BLOCK_SIZE / 2;
ContentSummary c;
// Create the directory and set the quota
assertTrue(dfs.mkdirs(dir));
runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE), dir.toString());
// Creating a file should use half the quota
DFSTestUtil.createFile(dfs, file1, FILE_SIZE, (short) 3, 1L);
DFSTestUtil.waitReplication(dfs, file1, (short) 3);
c = dfs.getContentSummary(dir);
compareQuotaUsage(c, dfs, dir);
checkContentSummary(c, webhdfs.getContentSummary(dir));
assertEquals("Quota is half consumed", QUOTA_SIZE / 2, c.getSpaceConsumed());
// used half the quota for the first file.
try {
DFSTestUtil.createFile(dfs, file2, FILE_SIZE, (short) 3, 1L);
} catch (QuotaExceededException e) {
exceededQuota = true;
}
assertTrue("Quota not exceeded", exceededQuota);
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestQuota method testQuotaCommands.
/** Test quota related commands:
* setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count
*/
@Test
public void testQuotaCommands() throws Exception {
DFSAdmin admin = new DFSAdmin(conf);
final Path dir = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(dir));
final int fileLen = 1024;
final short replication = 5;
final long spaceQuota = fileLen * replication * 15 / 8;
// 1: create a directory test and set its quota to be 3
final Path parent = new Path(dir, "test");
assertTrue(dfs.mkdirs(parent));
String[] args = new String[] { "-setQuota", "3", parent.toString() };
runCommand(admin, args, false);
//try setting space quota with a 'binary prefix'
runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
assertEquals(2L << 40, dfs.getContentSummary(parent).getSpaceQuota());
// set diskspace quota to 10000
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota), parent.toString());
// 2: create directory /test/data0
final Path childDir0 = new Path(parent, "data0");
assertTrue(dfs.mkdirs(childDir0));
// 3: create a file /test/datafile0
final Path childFile0 = new Path(parent, "datafile0");
DFSTestUtil.createFile(dfs, childFile0, fileLen, replication, 0);
// 4: count -q /test
ContentSummary c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getFileCount() + c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 3);
assertEquals(c.getSpaceConsumed(), fileLen * replication);
assertEquals(c.getSpaceQuota(), spaceQuota);
// 5: count -q /test/data0
c = dfs.getContentSummary(childDir0);
compareQuotaUsage(c, dfs, childDir0);
assertEquals(c.getFileCount() + c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), -1);
// check disk space consumed
c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getSpaceConsumed(), fileLen * replication);
// 6: create a directory /test/data1
final Path childDir1 = new Path(parent, "data1");
boolean hasException = false;
try {
assertFalse(dfs.mkdirs(childDir1));
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
OutputStream fout;
// 7: create a file /test/datafile1
final Path childFile1 = new Path(parent, "datafile1");
hasException = false;
try {
fout = dfs.create(childFile1);
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// 8: clear quota /test
runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getQuota(), -1);
assertEquals(c.getSpaceQuota(), spaceQuota);
// 9: clear quota /test/data0
runCommand(admin, new String[] { "-clrQuota", childDir0.toString() }, false);
c = dfs.getContentSummary(childDir0);
compareQuotaUsage(c, dfs, childDir0);
assertEquals(c.getQuota(), -1);
// 10: create a file /test/datafile1
fout = dfs.create(childFile1, replication);
// 10.s: but writing fileLen bytes should result in an quota exception
try {
fout.write(new byte[fileLen]);
fout.close();
Assert.fail();
} catch (QuotaExceededException e) {
IOUtils.closeStream(fout);
}
//delete the file
dfs.delete(childFile1, false);
// 9.s: clear diskspace quota
runCommand(admin, false, "-clrSpaceQuota", parent.toString());
c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getQuota(), -1);
assertEquals(c.getSpaceQuota(), -1);
// now creating childFile1 should succeed
DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);
// 11: set the quota of /test to be 1
// HADOOP-5872 - we can set quota even if it is immediately violated
args = new String[] { "-setQuota", "1", parent.toString() };
runCommand(admin, args, false);
runCommand(// for space quota
admin, // for space quota
false, // for space quota
"-setSpaceQuota", Integer.toString(fileLen), args[2]);
// 12: set the quota of /test/data0 to be 1
args = new String[] { "-setQuota", "1", childDir0.toString() };
runCommand(admin, args, false);
// 13: not able create a directory under data0
hasException = false;
try {
assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
} catch (QuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
c = dfs.getContentSummary(childDir0);
compareQuotaUsage(c, dfs, childDir0);
assertEquals(c.getDirectoryCount() + c.getFileCount(), 1);
assertEquals(c.getQuota(), 1);
// 14a: set quota on a non-existent directory
Path nonExistentPath = new Path(dir, "test1");
assertFalse(dfs.exists(nonExistentPath));
args = new String[] { "-setQuota", "1", nonExistentPath.toString() };
runCommand(admin, args, true);
runCommand(// for space quota
admin, // for space quota
true, // for space quota
"-setSpaceQuota", // for space quota
"1g", nonExistentPath.toString());
// 14b: set quota on a file
assertTrue(dfs.isFile(childFile0));
args[1] = childFile0.toString();
runCommand(admin, args, true);
// same for space quota
runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
// 15a: clear quota on a file
args[0] = "-clrQuota";
runCommand(admin, args, true);
runCommand(admin, true, "-clrSpaceQuota", args[1]);
// 15b: clear quota on a non-existent directory
args[1] = nonExistentPath.toString();
runCommand(admin, args, true);
runCommand(admin, true, "-clrSpaceQuota", args[1]);
// 16a: set the quota of /test to be 0
args = new String[] { "-setQuota", "0", parent.toString() };
runCommand(admin, args, true);
runCommand(admin, false, "-setSpaceQuota", "0", args[2]);
// 16b: set the quota of /test to be -1
args[1] = "-1";
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
// 16c: set the quota of /test to be Long.MAX_VALUE+1
args[1] = String.valueOf(Long.MAX_VALUE + 1L);
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
// 16d: set the quota of /test to be a non integer
args[1] = "33aa1.5";
runCommand(admin, args, true);
runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
// 16e: set space quota with a value larger than Long.MAX_VALUE
runCommand(admin, true, "-setSpaceQuota", (Long.MAX_VALUE / 1024 / 1024 + 1024) + "m", args[2]);
// 17: setQuota by a non-administrator
final String username = "userxx";
UserGroupInformation ugi = UserGroupInformation.createUserForTesting(username, new String[] { "groupyy" });
// need final ref for doAs block
final String[] args2 = args.clone();
ugi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
assertEquals("Not running as new user", username, UserGroupInformation.getCurrentUser().getShortUserName());
DFSAdmin userAdmin = new DFSAdmin(conf);
args2[1] = "100";
runCommand(userAdmin, args2, true);
runCommand(userAdmin, true, "-setSpaceQuota", "1g", args2[2]);
// 18: clrQuota by a non-administrator
String[] args3 = new String[] { "-clrQuota", parent.toString() };
runCommand(userAdmin, args3, true);
runCommand(userAdmin, true, "-clrSpaceQuota", args3[1]);
return null;
}
});
// 19: clrQuota on the root directory ("/") should fail
runCommand(admin, true, "-clrQuota", "/");
// 20: setQuota on the root directory ("/") should succeed
runCommand(admin, false, "-setQuota", "1000000", "/");
runCommand(admin, true, "-clrQuota", "/");
runCommand(admin, false, "-clrSpaceQuota", "/");
runCommand(admin, new String[] { "-clrQuota", parent.toString() }, false);
runCommand(admin, false, "-clrSpaceQuota", parent.toString());
// 2: create directory /test/data2
final Path childDir2 = new Path(parent, "data2");
assertTrue(dfs.mkdirs(childDir2));
final Path childFile2 = new Path(childDir2, "datafile2");
final Path childFile3 = new Path(childDir2, "datafile3");
final long spaceQuota2 = DEFAULT_BLOCK_SIZE * replication;
final long fileLen2 = DEFAULT_BLOCK_SIZE;
// set space quota to a real low value
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
// clear space quota
runCommand(admin, false, "-clrSpaceQuota", childDir2.toString());
// create a file that is greater than the size of space quota
DFSTestUtil.createFile(dfs, childFile2, fileLen2, replication, 0);
// now set space quota again. This should succeed
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), childDir2.toString());
hasException = false;
try {
DFSTestUtil.createFile(dfs, childFile3, fileLen2, replication, 0);
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
// now test the same for root
final Path childFile4 = new Path(dir, "datafile2");
final Path childFile5 = new Path(dir, "datafile3");
runCommand(admin, true, "-clrQuota", "/");
runCommand(admin, false, "-clrSpaceQuota", "/");
// set space quota to a real low value
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
runCommand(admin, false, "-clrSpaceQuota", "/");
DFSTestUtil.createFile(dfs, childFile4, fileLen2, replication, 0);
runCommand(admin, false, "-setSpaceQuota", Long.toString(spaceQuota2), "/");
hasException = false;
try {
DFSTestUtil.createFile(dfs, childFile5, fileLen2, replication, 0);
} catch (DSQuotaExceededException e) {
hasException = true;
}
assertTrue(hasException);
assertEquals(5, cluster.getNamesystem().getFSDirectory().getYieldCount());
/*
* clear sapce quota for root, otherwise other tests may fail due to
* insufficient space quota.
*/
runCommand(admin, false, "-clrSpaceQuota", "/");
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestQuota method testMultipleFilesSmallerThanOneBlock.
/**
* Like the previous test but create many files. This covers bugs where
* the quota adjustment is incorrect but it takes many files to accrue
* a big enough accounting error to violate the quota.
*/
@Test
public void testMultipleFilesSmallerThanOneBlock() throws Exception {
final Path parent = new Path(PathUtils.getTestPath(getClass()), GenericTestUtils.getMethodName());
assertTrue(dfs.mkdirs(parent));
Configuration dfsConf = new HdfsConfiguration();
final int BLOCK_SIZE = 6 * 1024;
dfsConf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
// Make it relinquish locks. When run serially, the result should
// be identical.
dfsConf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);
MiniDFSCluster dfsCluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(3).build();
dfsCluster.waitActive();
FileSystem fs = dfsCluster.getFileSystem();
DFSAdmin admin = new DFSAdmin(dfsConf);
final String nnAddr = dfsConf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr;
System.out.println("webhdfsuri=" + webhdfsuri);
final FileSystem webHDFS = new Path(webhdfsuri).getFileSystem(dfsConf);
try {
//Test for deafult NameSpace Quota
long nsQuota = FSImageTestUtil.getNSQuota(dfsCluster.getNameNode().getNamesystem());
assertTrue("Default namespace quota expected as long max. But the value is :" + nsQuota, nsQuota == Long.MAX_VALUE);
Path dir = new Path(parent, "test");
boolean exceededQuota = false;
ContentSummary c;
// 1kb file
// 6kb block
// 192kb quota
final int FILE_SIZE = 1024;
final int QUOTA_SIZE = 32 * (int) fs.getDefaultBlockSize(dir);
assertEquals(6 * 1024, fs.getDefaultBlockSize(dir));
assertEquals(192 * 1024, QUOTA_SIZE);
// Create the dir and set the quota. We need to enable the quota before
// writing the files as setting the quota afterwards will over-write
// the cached disk space used for quota verification with the actual
// amount used as calculated by INode#spaceConsumedInTree.
assertTrue(fs.mkdirs(dir));
runCommand(admin, false, "-setSpaceQuota", Integer.toString(QUOTA_SIZE), dir.toString());
// the last block: (58 * 3 * 1024) (3 * 6 * 1024) = 192kb
for (int i = 0; i < 59; i++) {
Path file = new Path(parent, "test/test" + i);
DFSTestUtil.createFile(fs, file, FILE_SIZE, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file, (short) 3);
}
// Should account for all 59 files (almost QUOTA_SIZE)
c = fs.getContentSummary(dir);
compareQuotaUsage(c, fs, dir);
checkContentSummary(c, webHDFS.getContentSummary(dir));
assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3, c.getSpaceConsumed());
assertEquals("Invalid space consumed", QUOTA_SIZE - (59 * FILE_SIZE * 3), 3 * (fs.getDefaultBlockSize(dir) - FILE_SIZE));
// Now check that trying to create another file violates the quota
try {
Path file = new Path(parent, "test/test59");
DFSTestUtil.createFile(fs, file, FILE_SIZE, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file, (short) 3);
} catch (QuotaExceededException e) {
exceededQuota = true;
}
assertTrue("Quota not exceeded", exceededQuota);
assertEquals(2, dfsCluster.getNamesystem().getFSDirectory().getYieldCount());
} finally {
dfsCluster.shutdown();
}
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestDiskspaceQuotaUpdate method testUpdateQuotaForAppend.
/**
* Test if the quota can be correctly updated for append
*/
@Test(timeout = 60000)
public void testUpdateQuotaForAppend() throws Exception {
final Path foo = new Path(getParent(GenericTestUtils.getMethodName()), "foo");
final Path bar = new Path(foo, "bar");
long currentFileLen = BLOCKSIZE;
DFSTestUtil.createFile(getDFS(), bar, currentFileLen, REPLICATION, seed);
getDFS().setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
// append half of the block data, the previous file length is at block
// boundary
DFSTestUtil.appendFile(getDFS(), bar, BLOCKSIZE / 2);
currentFileLen += (BLOCKSIZE / 2);
INodeDirectory fooNode = getFSDirectory().getINode4Write(foo.toString()).asDirectory();
assertTrue(fooNode.isQuotaSet());
QuotaCounts quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
long ns = quota.getNameSpace();
long ds = quota.getStorageSpace();
// foo and bar
assertEquals(2, ns);
assertEquals(currentFileLen * REPLICATION, ds);
ContentSummary c = getDFS().getContentSummary(foo);
assertEquals(c.getSpaceConsumed(), ds);
// append another block, the previous file length is not at block boundary
DFSTestUtil.appendFile(getDFS(), bar, BLOCKSIZE);
currentFileLen += BLOCKSIZE;
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns = quota.getNameSpace();
ds = quota.getStorageSpace();
// foo and bar
assertEquals(2, ns);
assertEquals(currentFileLen * REPLICATION, ds);
c = getDFS().getContentSummary(foo);
assertEquals(c.getSpaceConsumed(), ds);
// append several blocks
DFSTestUtil.appendFile(getDFS(), bar, BLOCKSIZE * 3 + BLOCKSIZE / 8);
currentFileLen += (BLOCKSIZE * 3 + BLOCKSIZE / 8);
quota = fooNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
ns = quota.getNameSpace();
ds = quota.getStorageSpace();
// foo and bar
assertEquals(2, ns);
assertEquals(currentFileLen * REPLICATION, ds);
c = getDFS().getContentSummary(foo);
assertEquals(c.getSpaceConsumed(), ds);
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class DistributedFileSystem method getContentSummary.
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_CONTENT_SUMMARY);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<ContentSummary>() {
@Override
public ContentSummary doCall(final Path p) throws IOException {
return dfs.getContentSummary(getPathName(p));
}
@Override
public ContentSummary next(final FileSystem fs, final Path p) throws IOException {
return fs.getContentSummary(p);
}
}.resolve(this, absF);
}
Aggregations