use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestStripedINodeFile method testBlockStripedLength.
@Test
public void testBlockStripedLength() throws IOException, InterruptedException {
INodeFile inf = createStripedINodeFile();
Block blk = new Block(1);
BlockInfoStriped blockInfoStriped = new BlockInfoStriped(blk, testECPolicy);
inf.addBlock(blockInfoStriped);
assertEquals(1, inf.getBlocks().length);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestStripedINodeFile method testBlockStripedConsumedSpace.
@Test
public void testBlockStripedConsumedSpace() throws IOException, InterruptedException {
INodeFile inf = createStripedINodeFile();
Block blk = new Block(1);
BlockInfoStriped blockInfoStriped = new BlockInfoStriped(blk, testECPolicy);
blockInfoStriped.setNumBytes(1);
inf.addBlock(blockInfoStriped);
// 0. Calculate the total bytes per stripes <Num Bytes per Stripes>
// 1. Calculate the number of stripes in this block group. <Num Stripes>
// 2. Calculate the last remaining length which does not make a stripe. <Last Stripe Length>
// 3. Total consumed space is the total of
// a. The total of the full cells of data blocks and parity blocks.
// b. The remaining of data block which does not make a stripe.
// c. The last parity block cells. These size should be same
// to the first cell in this stripe.
// So the total consumed space is the sum of
// a. <Cell Size> * (<Num Stripes> - 1) * <Total Block Num> = 0
// b. <Num Bytes> % <Num Bytes per Stripes> = 1
// c. <Last Stripe Length> * <Parity Block Num> = 1 * 3
assertEquals(4, inf.storagespaceConsumedStriped().getStorageSpace());
assertEquals(4, inf.storagespaceConsumed(defaultPolicy).getStorageSpace());
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestStripedINodeFile method testBlockStripedComputeQuotaUsage.
@Test
public void testBlockStripedComputeQuotaUsage() throws IOException, InterruptedException {
INodeFile inf = createStripedINodeFile();
Block blk = new Block(1);
BlockInfoStriped blockInfoStriped = new BlockInfoStriped(blk, testECPolicy);
blockInfoStriped.setNumBytes(100);
inf.addBlock(blockInfoStriped);
QuotaCounts counts = inf.computeQuotaUsageWithStriped(defaultPolicy, new QuotaCounts.Builder().build());
assertEquals(1, counts.getNameSpace());
// The total consumed space is the sum of
// a. <Cell Size> * (<Num Stripes> - 1) * <Total Block Num> = 0
// b. <Num Bytes> % <Num Bytes per Stripes> = 100
// c. <Last Stripe Length> * <Parity Block Num> = 100 * 3
assertEquals(400, counts.getStorageSpace());
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestStripedINodeFile method testBlockStripedUCComputeQuotaUsage.
@Test
public void testBlockStripedUCComputeQuotaUsage() throws IOException, InterruptedException {
INodeFile inf = createStripedINodeFile();
Block blk = new Block(1);
BlockInfoStriped bInfoUCStriped = new BlockInfoStriped(blk, testECPolicy);
bInfoUCStriped.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, null);
bInfoUCStriped.setNumBytes(100);
inf.addBlock(bInfoUCStriped);
QuotaCounts counts = inf.computeQuotaUsageWithStriped(defaultPolicy, new QuotaCounts.Builder().build());
assertEquals(1024, inf.getPreferredBlockSize());
assertEquals(1, counts.getNameSpace());
// Consumed space in the case of BlockInfoStripedUC can be calculated
// by using preferred block size. This is 1024 and total block num
// is 9(= 3 + 6). Consumed storage space should be 1024 * 9 = 9216.
assertEquals(9216, counts.getStorageSpace());
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class FSDirectory method updateSpaceForCompleteBlock.
/**
* Update the cached quota space for a block that is being completed.
* Must only be called once, as the block is being completed.
* @param completeBlk - Completed block for which to update space
* @param inodes - INodes in path to file containing completeBlk; if null
* this will be resolved internally
*/
public void updateSpaceForCompleteBlock(BlockInfo completeBlk, INodesInPath inodes) throws IOException {
assert namesystem.hasWriteLock();
INodesInPath iip = inodes != null ? inodes : INodesInPath.fromINode(namesystem.getBlockCollection(completeBlk));
INodeFile fileINode = iip.getLastINode().asFile();
// Adjust disk space consumption if required
final long diff;
final short replicationFactor;
if (fileINode.isStriped()) {
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(namesystem, iip);
final short numDataUnits = (short) ecPolicy.getNumDataUnits();
final short numParityUnits = (short) ecPolicy.getNumParityUnits();
final long numBlocks = numDataUnits + numParityUnits;
final long fullBlockGroupSize = fileINode.getPreferredBlockSize() * numBlocks;
final BlockInfoStriped striped = new BlockInfoStriped(completeBlk, ecPolicy);
final long actualBlockGroupSize = striped.spaceConsumed();
diff = fullBlockGroupSize - actualBlockGroupSize;
replicationFactor = (short) 1;
} else {
diff = fileINode.getPreferredBlockSize() - completeBlk.getNumBytes();
replicationFactor = fileINode.getFileReplication();
}
if (diff > 0) {
try {
updateSpaceConsumed(iip, 0, -diff, replicationFactor);
} catch (IOException e) {
LOG.warn("Unexpected exception while updating disk space.", e);
}
}
}
Aggregations