use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class INodeFile method storagespaceConsumedStriped.
// TODO: support EC with heterogeneous storage
public final QuotaCounts storagespaceConsumedStriped() {
QuotaCounts counts = new QuotaCounts.Builder().build();
for (BlockInfo b : blocks) {
Preconditions.checkState(b.isStriped());
long blockSize = b.isComplete() ? ((BlockInfoStriped) b).spaceConsumed() : getPreferredBlockSize() * ((BlockInfoStriped) b).getTotalBlockNum();
counts.addStorageSpace(blockSize);
}
return counts;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class FSEditLogLoader method updateBlocks.
/**
* Update in-memory data structures with new block information.
* @throws IOException
*/
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, INodesInPath iip, INodeFile file, ErasureCodingPolicy ecPolicy) throws IOException {
// Update its block list
BlockInfo[] oldBlocks = file.getBlocks();
Block[] newBlocks = op.getBlocks();
String path = op.getPath();
// Are we only updating the last block's gen stamp.
boolean isGenStampUpdate = oldBlocks.length == newBlocks.length;
// First, update blocks in common
for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) {
BlockInfo oldBlock = oldBlocks[i];
Block newBlock = newBlocks[i];
boolean isLastBlock = i == newBlocks.length - 1;
if (oldBlock.getBlockId() != newBlock.getBlockId() || (oldBlock.getGenerationStamp() != newBlock.getGenerationStamp() && !(isGenStampUpdate && isLastBlock))) {
throw new IOException("Mismatched block IDs or generation stamps, " + "attempting to replace block " + oldBlock + " with " + newBlock + " as block # " + i + "/" + newBlocks.length + " of " + path);
}
oldBlock.setNumBytes(newBlock.getNumBytes());
boolean changeMade = oldBlock.getGenerationStamp() != newBlock.getGenerationStamp();
oldBlock.setGenerationStamp(newBlock.getGenerationStamp());
if (!oldBlock.isComplete() && (!isLastBlock || op.shouldCompleteLastBlock())) {
changeMade = true;
fsNamesys.getBlockManager().forceCompleteBlock(oldBlock);
}
if (changeMade) {
// The state or gen-stamp of the block has changed. So, we may be
// able to process some messages from datanodes that we previously
// were unable to process.
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
}
if (newBlocks.length < oldBlocks.length) {
// We're removing a block from the file, e.g. abandonBlock(...)
if (!file.isUnderConstruction()) {
throw new IOException("Trying to remove a block from file " + path + " which is not under construction.");
}
if (newBlocks.length != oldBlocks.length - 1) {
throw new IOException("Trying to remove more than one block from file " + path);
}
Block oldBlock = oldBlocks[oldBlocks.length - 1];
boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(fsDir, path, iip, file, oldBlock);
if (!removed && !(op instanceof UpdateBlocksOp)) {
throw new IOException("Trying to delete non-existant block " + oldBlock);
}
} else if (newBlocks.length > oldBlocks.length) {
final boolean isStriped = ecPolicy != null;
// We're adding blocks
for (int i = oldBlocks.length; i < newBlocks.length; i++) {
Block newBlock = newBlocks[i];
final BlockInfo newBI;
if (!op.shouldCompleteLastBlock()) {
// until several blocks in?
if (isStriped) {
newBI = new BlockInfoStriped(newBlock, ecPolicy);
} else {
newBI = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
}
newBI.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
} else {
// OP_ADD operations as each block is allocated.
if (isStriped) {
newBI = new BlockInfoStriped(newBlock, ecPolicy);
} else {
newBI = new BlockInfoContiguous(newBlock, file.getFileReplication());
}
}
fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBI, file);
file.addBlock(newBI);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class FSEditLogLoader method addNewBlock.
/**
* Add a new block into the given INodeFile
*/
private void addNewBlock(AddBlockOp op, INodeFile file, ErasureCodingPolicy ecPolicy) throws IOException {
BlockInfo[] oldBlocks = file.getBlocks();
Block pBlock = op.getPenultimateBlock();
Block newBlock = op.getLastBlock();
if (pBlock != null) {
// the penultimate block is not null
assert oldBlocks != null && oldBlocks.length > 0;
// compare pBlock with the last block of oldBlocks
BlockInfo oldLastBlock = oldBlocks[oldBlocks.length - 1];
if (oldLastBlock.getBlockId() != pBlock.getBlockId() || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) {
throw new IOException("Mismatched block IDs or generation stamps for the old last block of file " + op.getPath() + ", the old last block is " + oldLastBlock + ", and the block read from editlog is " + pBlock);
}
oldLastBlock.setNumBytes(pBlock.getNumBytes());
if (!oldLastBlock.isComplete()) {
fsNamesys.getBlockManager().forceCompleteBlock(oldLastBlock);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
}
} else {
// the penultimate block is null
Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
}
// add the new block
final BlockInfo newBlockInfo;
boolean isStriped = ecPolicy != null;
if (isStriped) {
newBlockInfo = new BlockInfoStriped(newBlock, ecPolicy);
} else {
newBlockInfo = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
}
newBlockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBlockInfo, file);
file.addBlock(newBlockInfo);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class NamenodeFsck method getReplicaInfo.
/**
* Display info of each replica for replication block.
* For striped block group, display info of each internal block.
*/
private String getReplicaInfo(BlockInfo storedBlock) {
if (!(showLocations || showRacks || showReplicaDetails || showUpgradeDomains)) {
return "";
}
final boolean isComplete = storedBlock.isComplete();
Iterator<DatanodeStorageInfo> storagesItr;
StringBuilder sb = new StringBuilder(" [");
final boolean isStriped = storedBlock.isStriped();
Map<DatanodeStorageInfo, Long> storage2Id = new HashMap<>();
if (isComplete) {
if (isStriped) {
long blockId = storedBlock.getBlockId();
Iterable<StorageAndBlockIndex> sis = ((BlockInfoStriped) storedBlock).getStorageAndIndexInfos();
for (StorageAndBlockIndex si : sis) {
storage2Id.put(si.getStorage(), blockId + si.getBlockIndex());
}
}
storagesItr = storedBlock.getStorageInfos();
} else {
storagesItr = storedBlock.getUnderConstructionFeature().getExpectedStorageLocationsIterator();
}
while (storagesItr.hasNext()) {
DatanodeStorageInfo storage = storagesItr.next();
if (isStriped && isComplete) {
long index = storage2Id.get(storage);
sb.append("blk_" + index + ":");
}
DatanodeDescriptor dnDesc = storage.getDatanodeDescriptor();
if (showRacks) {
sb.append(NodeBase.getPath(dnDesc));
} else {
sb.append(new DatanodeInfoWithStorage(dnDesc, storage.getStorageID(), storage.getStorageType()));
}
if (showUpgradeDomains) {
String upgradeDomain = (dnDesc.getUpgradeDomain() != null) ? dnDesc.getUpgradeDomain() : UNDEFINED;
sb.append("(ud=" + upgradeDomain + ")");
}
if (showReplicaDetails) {
Collection<DatanodeDescriptor> corruptReplicas = blockManager.getCorruptReplicas(storedBlock);
sb.append("(");
if (dnDesc.isDecommissioned()) {
sb.append("DECOMMISSIONED)");
} else if (dnDesc.isDecommissionInProgress()) {
sb.append("DECOMMISSIONING)");
} else if (this.showMaintenanceState && dnDesc.isEnteringMaintenance()) {
sb.append("ENTERING MAINTENANCE)");
} else if (this.showMaintenanceState && dnDesc.isInMaintenance()) {
sb.append("IN MAINTENANCE)");
} else if (corruptReplicas != null && corruptReplicas.contains(dnDesc)) {
sb.append("CORRUPT)");
} else if (blockManager.isExcess(dnDesc, storedBlock)) {
sb.append("EXCESS)");
} else if (dnDesc.isStale(this.staleInterval)) {
sb.append("STALE_NODE)");
} else if (storage.areBlockContentsStale()) {
sb.append("STALE_BLOCK_CONTENT)");
} else {
sb.append("LIVE)");
}
}
if (storagesItr.hasNext()) {
sb.append(", ");
}
}
sb.append(']');
return sb.toString();
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped in project hadoop by apache.
the class TestFSEditLogLoader method testAddNewStripedBlock.
@Test
public void testAddNewStripedBlock() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, testECPolicy.getName());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/ec";
String testFile = "testfile_001";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser1";
String clientMachine = "testMachine1";
long blkId = 1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
short blockNum = (short) testECPolicy.getNumDataUnits();
short parityNum = (short) testECPolicy.getNumParityUnits();
//set the storage policy of the directory
fs.mkdir(new Path(testDir), new FsPermission("755"));
fs.getClient().getNamenode().setErasureCodingPolicy(testDir, testECPolicy.getName());
// Create a file with striped block
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
fns.leaveSafeMode(false);
// Add a striped block to the file
BlockInfoStriped stripedBlk = new BlockInfoStriped(new Block(blkId, blkNumBytes, timestamp), testECPolicy);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(stripedBlk);
fns.getEditLog().logAddBlock(testFilePath, file);
TestINodeFile.toCompleteFile(file);
//If the block by loaded is the same as above it means that
//we have successfully applied the edit log to the fsimage.
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
INodeFile inodeLoaded = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
assertTrue(inodeLoaded.isStriped());
BlockInfo[] blks = inodeLoaded.getBlocks();
assertEquals(1, blks.length);
assertEquals(blkId, blks[0].getBlockId());
assertEquals(blkNumBytes, blks[0].getNumBytes());
assertEquals(timestamp, blks[0].getGenerationStamp());
assertEquals(blockNum, ((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(parityNum, ((BlockInfoStriped) blks[0]).getParityBlockNum());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations