use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestAddStripedBlocks method testGetLocatedStripedBlocks.
@Test
public void testGetLocatedStripedBlocks() throws Exception {
final Path file = new Path("/file1");
// create an empty file
FSDataOutputStream out = null;
try {
out = dfs.create(file, (short) 1);
writeAndFlushStripedOutputStream((DFSStripedOutputStream) out.getWrappedStream(), DFS_BYTES_PER_CHECKSUM_DEFAULT);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
BlockInfoStriped lastBlk = (BlockInfoStriped) fileNode.getLastBlock();
DatanodeInfo[] expectedDNs = DatanodeStorageInfo.toDatanodeInfos(lastBlk.getUnderConstructionFeature().getExpectedStorageLocations());
byte[] indices = lastBlk.getUnderConstructionFeature().getBlockIndices();
LocatedBlocks blks = dfs.getClient().getLocatedBlocks(file.toString(), 0L);
Assert.assertEquals(1, blks.locatedBlockCount());
LocatedBlock lblk = blks.get(0);
Assert.assertTrue(lblk instanceof LocatedStripedBlock);
DatanodeInfo[] datanodes = lblk.getLocations();
byte[] blockIndices = ((LocatedStripedBlock) lblk).getBlockIndices();
Assert.assertEquals(groupSize, datanodes.length);
Assert.assertEquals(groupSize, blockIndices.length);
Assert.assertArrayEquals(indices, blockIndices);
Assert.assertArrayEquals(expectedDNs, datanodes);
} finally {
IOUtils.cleanup(null, out);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class FSDirectory method updateSpaceForCompleteBlock.
/**
* Update the cached quota space for a block that is being completed.
* Must only be called once, as the block is being completed.
* @param completeBlk - Completed block for which to update space
* @param inodes - INodes in path to file containing completeBlk; if null
* this will be resolved internally
*/
public void updateSpaceForCompleteBlock(BlockInfo completeBlk, INodesInPath inodes) throws IOException {
assert namesystem.hasWriteLock();
INodesInPath iip = inodes != null ? inodes : INodesInPath.fromINode(namesystem.getBlockCollection(completeBlk));
INodeFile fileINode = iip.getLastINode().asFile();
// Adjust disk space consumption if required
final long diff;
final short replicationFactor;
if (fileINode.isStriped()) {
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(namesystem, iip);
final short numDataUnits = (short) ecPolicy.getNumDataUnits();
final short numParityUnits = (short) ecPolicy.getNumParityUnits();
final long numBlocks = numDataUnits + numParityUnits;
final long fullBlockGroupSize = fileINode.getPreferredBlockSize() * numBlocks;
final BlockInfoStriped striped = new BlockInfoStriped(completeBlk, ecPolicy);
final long actualBlockGroupSize = striped.spaceConsumed();
diff = fullBlockGroupSize - actualBlockGroupSize;
replicationFactor = (short) 1;
} else {
diff = fileINode.getPreferredBlockSize() - completeBlk.getNumBytes();
replicationFactor = fileINode.getFileReplication();
}
if (diff > 0) {
try {
updateSpaceConsumed(iip, 0, -diff, replicationFactor);
} catch (IOException e) {
LOG.warn("Unexpected exception while updating disk space.", e);
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class FSDirWriteFileOp method addBlock.
/**
* Add a block to the file. Returns a reference to the added block.
*/
private static BlockInfo addBlock(FSDirectory fsd, String path, INodesInPath inodesInPath, Block block, DatanodeStorageInfo[] targets, BlockType blockType) throws IOException {
fsd.writeLock();
try {
final INodeFile fileINode = inodesInPath.getLastINode().asFile();
Preconditions.checkState(fileINode.isUnderConstruction());
// associate new last block for the file
final BlockInfo blockInfo;
if (blockType == BlockType.STRIPED) {
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), inodesInPath);
short numDataUnits = (short) ecPolicy.getNumDataUnits();
short numParityUnits = (short) ecPolicy.getNumParityUnits();
short numLocations = (short) (numDataUnits + numParityUnits);
// check quota limits and updated space consumed
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), numLocations, true);
blockInfo = new BlockInfoStriped(block, ecPolicy);
blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
} else {
// check quota limits and updated space consumed
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), fileINode.getFileReplication(), true);
short numLocations = fileINode.getFileReplication();
blockInfo = new BlockInfoContiguous(block, numLocations);
blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
}
fsd.getBlockManager().addBlockCollection(blockInfo, fileINode);
fileINode.addBlock(blockInfo);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: " + path + " with " + block + " block is added to the in-memory " + "file system");
}
return blockInfo;
} finally {
fsd.writeUnlock();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class INodeFile method computeFileSize.
/**
* Compute file size of the current file.
*
* @param includesLastUcBlock
* If the last block is under construction, should it be included?
* @param usePreferredBlockSize4LastUcBlock
* If the last block is under construction, should we use actual
* block size or preferred block size?
* Note that usePreferredBlockSize4LastUcBlock is ignored
* if includesLastUcBlock == false.
* @return file size
*/
public final long computeFileSize(boolean includesLastUcBlock, boolean usePreferredBlockSize4LastUcBlock) {
if (blocks.length == 0) {
return 0;
}
final int last = blocks.length - 1;
//check if the last block is BlockInfoUnderConstruction
BlockInfo lastBlk = blocks[last];
long size = lastBlk.getNumBytes();
if (!lastBlk.isComplete()) {
if (!includesLastUcBlock) {
size = 0;
} else if (usePreferredBlockSize4LastUcBlock) {
size = isStriped() ? getPreferredBlockSize() * ((BlockInfoStriped) lastBlk).getDataBlockNum() : getPreferredBlockSize();
}
}
//sum other blocks
for (int i = 0; i < last; i++) {
size += blocks[i].getNumBytes();
}
return size;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class NamenodeFsck method collectBlocksSummary.
private void collectBlocksSummary(String parent, HdfsFileStatus file, Result res, LocatedBlocks blocks) throws IOException {
String path = file.getFullName(parent);
boolean isOpen = blocks.isUnderConstruction();
if (isOpen && !showOpenFiles) {
return;
}
int missing = 0;
int corrupt = 0;
long missize = 0;
long corruptSize = 0;
int underReplicatedPerFile = 0;
int misReplicatedPerFile = 0;
StringBuilder report = new StringBuilder();
int blockNumber = 0;
final LocatedBlock lastBlock = blocks.getLastLocatedBlock();
for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
ExtendedBlock block = lBlk.getBlock();
if (!blocks.isLastBlockComplete() && lastBlock != null && lastBlock.getBlock().equals(block)) {
// it is under construction
continue;
}
final BlockInfo storedBlock = blockManager.getStoredBlock(block.getLocalBlock());
final int minReplication = blockManager.getMinStorageNum(storedBlock);
// count decommissionedReplicas / decommissioningReplicas
NumberReplicas numberReplicas = blockManager.countNodes(storedBlock);
int decommissionedReplicas = numberReplicas.decommissioned();
int decommissioningReplicas = numberReplicas.decommissioning();
int enteringMaintenanceReplicas = numberReplicas.liveEnteringMaintenanceReplicas();
int inMaintenanceReplicas = numberReplicas.maintenanceNotForReadReplicas();
res.decommissionedReplicas += decommissionedReplicas;
res.decommissioningReplicas += decommissioningReplicas;
res.enteringMaintenanceReplicas += enteringMaintenanceReplicas;
res.inMaintenanceReplicas += inMaintenanceReplicas;
// count total replicas
int liveReplicas = numberReplicas.liveReplicas();
int totalReplicasPerBlock = liveReplicas + decommissionedReplicas + decommissioningReplicas + enteringMaintenanceReplicas + inMaintenanceReplicas;
res.totalReplicas += totalReplicasPerBlock;
boolean isMissing;
if (storedBlock.isStriped()) {
isMissing = totalReplicasPerBlock < minReplication;
} else {
isMissing = totalReplicasPerBlock == 0;
}
// count expected replicas
short targetFileReplication;
if (file.getErasureCodingPolicy() != null) {
assert storedBlock instanceof BlockInfoStriped;
targetFileReplication = ((BlockInfoStriped) storedBlock).getRealTotalBlockNum();
} else {
targetFileReplication = file.getReplication();
}
res.numExpectedReplicas += targetFileReplication;
// count under min repl'd blocks
if (totalReplicasPerBlock < minReplication) {
res.numUnderMinReplicatedBlocks++;
}
// count excessive Replicas / over replicated blocks
if (liveReplicas > targetFileReplication) {
res.excessiveReplicas += (liveReplicas - targetFileReplication);
res.numOverReplicatedBlocks += 1;
}
// count corrupt blocks
boolean isCorrupt = lBlk.isCorrupt();
if (isCorrupt) {
res.addCorrupt(block.getNumBytes());
corrupt++;
corruptSize += block.getNumBytes();
out.print("\n" + path + ": CORRUPT blockpool " + block.getBlockPoolId() + " block " + block.getBlockName() + "\n");
}
// count minimally replicated blocks
if (totalReplicasPerBlock >= minReplication)
res.numMinReplicatedBlocks++;
// count missing replicas / under replicated blocks
if (totalReplicasPerBlock < targetFileReplication && !isMissing) {
res.missingReplicas += (targetFileReplication - totalReplicasPerBlock);
res.numUnderReplicatedBlocks += 1;
underReplicatedPerFile++;
if (!showFiles) {
out.print("\n" + path + ": ");
}
out.println(" Under replicated " + block + ". Target Replicas is " + targetFileReplication + " but found " + liveReplicas + " live replica(s), " + decommissionedReplicas + " decommissioned replica(s), " + decommissioningReplicas + " decommissioning replica(s)" + (this.showMaintenanceState ? (enteringMaintenanceReplicas + ", entering maintenance replica(s) and " + inMaintenanceReplicas + " in maintenance replica(s).") : "."));
}
// count mis replicated blocks
BlockPlacementStatus blockPlacementStatus = bpPolicies.getPolicy(lBlk.getBlockType()).verifyBlockPlacement(lBlk.getLocations(), targetFileReplication);
if (!blockPlacementStatus.isPlacementPolicySatisfied()) {
res.numMisReplicatedBlocks++;
misReplicatedPerFile++;
if (!showFiles) {
if (underReplicatedPerFile == 0)
out.println();
out.print(path + ": ");
}
out.println(" Replica placement policy is violated for " + block + ". " + blockPlacementStatus.getErrorDescription());
}
// count storage summary
if (this.showStoragePolcies && lBlk.getStorageTypes() != null) {
countStorageTypeSummary(file, lBlk);
}
// report
String blkName = block.toString();
report.append(blockNumber + ". " + blkName + " len=" + block.getNumBytes());
if (isMissing && !isCorrupt) {
// If the block is corrupted, it means all its available replicas are
// corrupted in the case of replication, and it means the state of the
// block group is unrecoverable due to some corrupted intenal blocks in
// the case of EC. We don't mark it as missing given these available
// replicas/internal-blocks might still be accessible as the block might
// be incorrectly marked as corrupted by client machines.
report.append(" MISSING!");
res.addMissing(blkName, block.getNumBytes());
missing++;
missize += block.getNumBytes();
if (storedBlock.isStriped()) {
report.append(" Live_repl=" + liveReplicas);
String info = getReplicaInfo(storedBlock);
if (!info.isEmpty()) {
report.append(" ").append(info);
}
}
} else {
report.append(" Live_repl=" + liveReplicas);
String info = getReplicaInfo(storedBlock);
if (!info.isEmpty()) {
report.append(" ").append(info);
}
}
report.append('\n');
blockNumber++;
}
//display under construction block info.
if (!blocks.isLastBlockComplete() && lastBlock != null) {
ExtendedBlock block = lastBlock.getBlock();
String blkName = block.toString();
BlockInfo storedBlock = blockManager.getStoredBlock(block.getLocalBlock());
DatanodeStorageInfo[] storages = storedBlock.getUnderConstructionFeature().getExpectedStorageLocations();
report.append('\n');
report.append("Under Construction Block:\n");
report.append(blockNumber).append(". ").append(blkName);
report.append(" len=").append(block.getNumBytes());
report.append(" Expected_repl=" + storages.length);
String info = getReplicaInfo(storedBlock);
if (!info.isEmpty()) {
report.append(" ").append(info);
}
}
// count corrupt file & move or delete if necessary
if ((missing > 0) || (corrupt > 0)) {
if (!showFiles) {
if (missing > 0) {
out.print("\n" + path + ": MISSING " + missing + " blocks of total size " + missize + " B.");
}
if (corrupt > 0) {
out.print("\n" + path + ": CORRUPT " + corrupt + " blocks of total size " + corruptSize + " B.");
}
}
res.corruptFiles++;
if (isOpen) {
LOG.info("Fsck: ignoring open file " + path);
} else {
if (doMove)
copyBlocksToLostFound(parent, file, blocks);
if (doDelete)
deleteCorruptedFile(path);
}
}
if (showFiles) {
if (missing > 0 || corrupt > 0) {
if (missing > 0) {
out.print(" MISSING " + missing + " blocks of total size " + missize + " B\n");
}
if (corrupt > 0) {
out.print(" CORRUPT " + corrupt + " blocks of total size " + corruptSize + " B\n");
}
} else if (underReplicatedPerFile == 0 && misReplicatedPerFile == 0) {
out.print(" OK\n");
}
if (showBlocks) {
out.print(report + "\n");
}
}
}
Aggregations