use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestReplicationPolicy method testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication.
@Test(timeout = 60000)
public void testConvertLastBlockToUnderConstructionDoesNotCauseSkippedReplication() throws IOException {
Namesystem mockNS = mock(Namesystem.class);
when(mockNS.hasWriteLock()).thenReturn(true);
BlockManager bm = new BlockManager(mockNS, false, new HdfsConfiguration());
LowRedundancyBlocks lowRedundancyBlocks = bm.neededReconstruction;
long blkID1 = ThreadLocalRandom.current().nextLong();
if (blkID1 < 0) {
blkID1 *= -1;
}
long blkID2 = ThreadLocalRandom.current().nextLong();
if (blkID2 < 0) {
blkID2 *= -1;
}
BlockInfo block1 = genBlockInfo(blkID1);
BlockInfo block2 = genBlockInfo(blkID2);
// Adding QUEUE_LOW_REDUNDANCY block
lowRedundancyBlocks.add(block1, 0, 0, 1, 1);
// Adding QUEUE_LOW_REDUNDANCY block
lowRedundancyBlocks.add(block2, 0, 0, 1, 1);
List<List<BlockInfo>> chosenBlocks;
// Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
// from QUEUE_VERY_LOW_REDUNDANCY.
chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
final BlockInfoContiguous info = new BlockInfoContiguous(block1, (short) 1);
final BlockCollection mbc = mock(BlockCollection.class);
when(mbc.getId()).thenReturn(1000L);
when(mbc.getLastBlock()).thenReturn(info);
when(mbc.getPreferredBlockSize()).thenReturn(block1.getNumBytes() + 1);
when(mbc.isUnderConstruction()).thenReturn(true);
ContentSummary cs = mock(ContentSummary.class);
when(cs.getLength()).thenReturn((long) 1);
when(mbc.computeContentSummary(bm.getStoragePolicySuite())).thenReturn(cs);
info.setBlockCollectionId(1000);
bm.addBlockCollection(info, mbc);
DatanodeStorageInfo[] storageAry = { new DatanodeStorageInfo(dataNodes[0], new DatanodeStorage("s1")) };
info.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, storageAry);
DatanodeStorageInfo storage = mock(DatanodeStorageInfo.class);
DatanodeDescriptor dn = mock(DatanodeDescriptor.class);
when(dn.isDecommissioned()).thenReturn(true);
when(storage.getState()).thenReturn(DatanodeStorage.State.NORMAL);
when(storage.getDatanodeDescriptor()).thenReturn(dn);
when(storage.removeBlock(any(BlockInfo.class))).thenReturn(true);
when(storage.addBlock(any(BlockInfo.class))).thenReturn(DatanodeStorageInfo.AddBlockResult.ADDED);
info.addStorage(storage, info);
BlockInfo lastBlk = mbc.getLastBlock();
when(mbc.getLastBlock()).thenReturn(lastBlk, info);
bm.convertLastBlockToUnderConstruction(mbc, 0L);
// Choose 1 block from lowRedundancyBlocks. Then it should pick 1 block
// from QUEUE_VERY_LOW_REDUNDANCY.
// This block remains and should not be skipped over.
chosenBlocks = lowRedundancyBlocks.chooseLowRedundancyBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestFileTruncate method testTruncate4Symlink.
@Test
public void testTruncate4Symlink() throws IOException {
final int fileLength = 3 * BLOCK_SIZE;
fs.mkdirs(parent);
final byte[] contents = AppendTestUtil.initBuffer(fileLength);
final Path file = new Path(parent, "testTruncate4Symlink");
writeContents(contents, fileLength, file);
final Path link = new Path(parent, "link");
fs.createSymlink(file, link, false);
final int newLength = fileLength / 3;
boolean isReady = fs.truncate(link, newLength);
assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fs.getFileStatus(file);
assertThat(fileStatus.getLen(), is((long) newLength));
ContentSummary cs = fs.getContentSummary(parent);
assertEquals("Bad disk space usage", cs.getSpaceConsumed(), newLength * REPLICATION);
// validate the file content
checkFullFile(file, newLength, contents);
fs.delete(parent, true);
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestFileTruncate method testBasicTruncate.
/**
* Truncate files of different sizes byte by byte.
*/
@Test
public void testBasicTruncate() throws IOException {
int startingFileSize = 3 * BLOCK_SIZE;
fs.mkdirs(parent);
fs.setQuota(parent, 100, 1000);
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
for (int fileLength = startingFileSize; fileLength > 0; fileLength -= BLOCK_SIZE - 1) {
for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
final Path p = new Path(parent, "testBasicTruncate" + fileLength);
writeContents(contents, fileLength, p);
int newLength = fileLength - toTruncate;
boolean isReady = fs.truncate(p, newLength);
LOG.info("fileLength=" + fileLength + ", newLength=" + newLength + ", toTruncate=" + toTruncate + ", isReady=" + isReady);
assertEquals("File must be closed for zero truncate" + " or truncating at the block boundary", isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
if (!isReady) {
checkBlockRecovery(p);
}
ContentSummary cs = fs.getContentSummary(parent);
assertEquals("Bad disk space usage", cs.getSpaceConsumed(), newLength * REPLICATION);
// validate the file content
checkFullFile(p, newLength, contents);
}
}
fs.delete(parent, true);
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestRenameWithSnapshots method checkSpaceConsumed.
private void checkSpaceConsumed(String message, Path directory, long expectedSpace) throws Exception {
ContentSummary summary = hdfs.getContentSummary(directory);
assertEquals(message, expectedSpace, summary.getSpaceConsumed());
}
use of org.apache.hadoop.fs.ContentSummary in project hadoop by apache.
the class TestGetContentSummaryWithSnapshot method testGetContentSummary.
/**
* Calculate against a snapshot path.
* 1. create dirs /foo/bar
* 2. take snapshot s1 on /foo
* 3. create a 10 byte file /foo/bar/baz
* Make sure for "/foo/bar" and "/foo/.snapshot/s1/bar" have correct results:
* the 1 byte file is not included in snapshot s1.
*/
@Test
public void testGetContentSummary() throws IOException {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
final Path baz = new Path(bar, "baz");
dfs.mkdirs(bar);
dfs.allowSnapshot(foo);
dfs.createSnapshot(foo, "s1");
DFSTestUtil.createFile(dfs, baz, 10, REPLICATION, 0L);
ContentSummary summary = cluster.getNameNodeRpc().getContentSummary(bar.toString());
Assert.assertEquals(1, summary.getDirectoryCount());
Assert.assertEquals(1, summary.getFileCount());
Assert.assertEquals(10, summary.getLength());
final Path barS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar");
summary = cluster.getNameNodeRpc().getContentSummary(barS1.toString());
Assert.assertEquals(1, summary.getDirectoryCount());
Assert.assertEquals(0, summary.getFileCount());
Assert.assertEquals(0, summary.getLength());
// also check /foo and /foo/.snapshot/s1
summary = cluster.getNameNodeRpc().getContentSummary(foo.toString());
Assert.assertEquals(2, summary.getDirectoryCount());
Assert.assertEquals(1, summary.getFileCount());
Assert.assertEquals(10, summary.getLength());
final Path fooS1 = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
summary = cluster.getNameNodeRpc().getContentSummary(fooS1.toString());
Assert.assertEquals(2, summary.getDirectoryCount());
Assert.assertEquals(0, summary.getFileCount());
Assert.assertEquals(0, summary.getLength());
final Path bazS1 = SnapshotTestHelper.getSnapshotPath(foo, "s1", "bar/baz");
try {
cluster.getNameNodeRpc().getContentSummary(bazS1.toString());
Assert.fail("should get FileNotFoundException");
} catch (FileNotFoundException ignored) {
}
}
Aggregations