use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestQuotaByStorageType method testQuotaByStorageTypeWithFileCreateAppend.
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify space consumed and remaining quota
long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// append several blocks
int appendLen = BLOCKSIZE * 2;
DFSTestUtil.appendFile(dfs, createdFile1, appendLen);
file1Len += appendLen;
ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestQuotaByStorageType method testContentSummaryWithoutQuotaByStorageType.
@Test(timeout = 60000)
public void testContentSummaryWithoutQuotaByStorageType() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(!fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify getContentSummary without any quota set
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestOfflineImageViewerForContentSummary method testGetContentSummaryForDirContainsSymlink.
@Test
public void testGetContentSummaryForDirContainsSymlink() throws Exception {
try (WebImageViewer viewer = new WebImageViewer(NetUtils.createSocketAddr("localhost:0"))) {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
// create a WebHdfsFileSystem instance
URI uri = new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf = new Configuration();
WebHdfsFileSystem webfs = (WebHdfsFileSystem) FileSystem.get(uri, conf);
ContentSummary summary = webfs.getContentSummary(new Path("/dirForLinks/"));
verifyContentSummary(symLinkSummaryForDirContainsFromDFS, summary);
}
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestOfflineImageViewerForContentSummary method testGetContentSummaryForFile.
@Test
public void testGetContentSummaryForFile() throws Exception {
try (WebImageViewer viewer = new WebImageViewer(NetUtils.createSocketAddr("localhost:0"))) {
viewer.initServer(originalFsimage.getAbsolutePath());
int port = viewer.getPort();
URL url = new URL("http://localhost:" + port + "/webhdfs/v1/parentDir/file1?op=GETCONTENTSUMMARY");
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod("GET");
connection.connect();
assertEquals(HttpURLConnection.HTTP_OK, connection.getResponseCode());
// create a WebHdfsFileSystem instance
URI uri = new URI("webhdfs://localhost:" + String.valueOf(port));
Configuration conf = new Configuration();
WebHdfsFileSystem webfs = (WebHdfsFileSystem) FileSystem.get(uri, conf);
ContentSummary summary = webfs.getContentSummary(new Path("/parentDir/file1"));
verifyContentSummary(fileSummaryFromDFS, summary);
}
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestFSMainOperationsWebHdfs method testTruncate.
@Test
public void testTruncate() throws Exception {
final short repl = 3;
final int blockSize = 1024;
final int numOfBlocks = 2;
Path dir = getTestRootPath(fSys, "test/hadoop");
Path file = getTestRootPath(fSys, "test/hadoop/file");
final byte[] data = getFileData(numOfBlocks, blockSize);
createFile(fSys, file, data, blockSize, repl);
final int newLength = blockSize;
boolean isReady = fSys.truncate(file, newLength);
Assert.assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fSys.getFileStatus(file);
Assert.assertEquals(fileStatus.getLen(), newLength);
AppendTestUtil.checkFullFile(fSys, file, newLength, data, file.toString());
ContentSummary cs = fSys.getContentSummary(dir);
Assert.assertEquals("Bad disk space usage", cs.getSpaceConsumed(), newLength * repl);
Assert.assertTrue("Deleted", fSys.delete(dir, true));
}
Aggregations