use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.
the class TestFileWithSnapshotFeature method testUpdateQuotaAndCollectBlocks.
@Test
public void testUpdateQuotaAndCollectBlocks() {
FileDiffList diffs = new FileDiffList();
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
FileDiff diff = mock(FileDiff.class);
BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
BlockInfo[] blocks = new BlockInfo[] { new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1) };
BlockManager bm = mock(BlockManager.class);
// No snapshot
INodeFile file = mock(INodeFile.class);
when(file.getFileWithSnapshotFeature()).thenReturn(sf);
when(file.getBlocks()).thenReturn(blocks);
when(file.getStoragePolicyID()).thenReturn((byte) 1);
Whitebox.setInternalState(file, "header", (long) REPL_1 << 48);
when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
when(bsps.getPolicy(anyByte())).thenReturn(bsp);
INode.BlocksMapUpdateInfo collectedBlocks = mock(INode.BlocksMapUpdateInfo.class);
ArrayList<INode> removedINodes = new ArrayList<>();
INode.ReclaimContext ctx = new INode.ReclaimContext(bsps, collectedBlocks, removedINodes, null);
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals(0, counts.getStorageSpace());
Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
// INode only exists in the snapshot
INodeFile snapshotINode = mock(INodeFile.class);
Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
when(diff.getSnapshotINode()).thenReturn(snapshotINode);
when(bsp.chooseStorageTypes(REPL_1)).thenReturn(Lists.newArrayList(SSD));
when(bsp.chooseStorageTypes(REPL_3)).thenReturn(Lists.newArrayList(DISK));
blocks[0].setReplication(REPL_3);
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE, counts.getStorageSpace());
Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.
the class CreateEditsLog method addFiles.
static void addFiles(FSEditLog editLog, int numFiles, short replication, int blocksPerFile, long startingBlockId, long blockSize, FileNameGenerator nameGenerator) {
PermissionStatus p = new PermissionStatus("joeDoe", "people", new FsPermission((short) 0777));
INodeId inodeId = new INodeId();
INodeDirectory dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
editLog.logMkDir(BASE_PATH, dirInode);
BlockInfo[] blocks = new BlockInfo[blocksPerFile];
for (int iB = 0; iB < blocksPerFile; ++iB) {
blocks[iB] = new BlockInfoContiguous(new Block(0, blockSize, BLOCK_GENERATION_STAMP), replication);
}
long currentBlockId = startingBlockId;
long bidAtSync = startingBlockId;
for (int iF = 0; iF < numFiles; iF++) {
for (int iB = 0; iB < blocksPerFile; ++iB) {
blocks[iB].setBlockId(currentBlockId++);
}
final INodeFile inode = new INodeFile(inodeId.nextValue(), null, p, 0L, 0L, blocks, replication, blockSize);
inode.toUnderConstruction("", "");
// Append path to filename with information about blockIDs
String path = "_" + iF + "_B" + blocks[0].getBlockId() + "_to_B" + blocks[blocksPerFile - 1].getBlockId() + "_";
String filePath = nameGenerator.getNextFileName("");
filePath = filePath + path;
// Log the new sub directory in edits
if ((iF % nameGenerator.getFilesPerDirectory()) == 0) {
String currentDir = nameGenerator.getCurrentDir();
dirInode = new INodeDirectory(inodeId.nextValue(), null, p, 0L);
editLog.logMkDir(currentDir, dirInode);
}
INodeFile fileUc = new INodeFile(inodeId.nextValue(), null, p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
fileUc.toUnderConstruction("", "");
editLog.logOpenFile(filePath, fileUc, false, false);
editLog.logCloseFile(filePath, inode);
if (currentBlockId - bidAtSync >= 2000) {
// sync every 2K blocks
editLog.logSync();
bidAtSync = currentBlockId;
}
}
System.out.println("Created edits log in directory " + edits_dir);
System.out.println(" containing " + numFiles + " File-Creates, each file with " + blocksPerFile + " blocks");
System.out.println(" blocks range: " + startingBlockId + " to " + (currentBlockId - 1));
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.
the class FSDirWriteFileOp method addBlock.
/**
* Add a block to the file. Returns a reference to the added block.
*/
private static BlockInfo addBlock(FSDirectory fsd, String path, INodesInPath inodesInPath, Block block, DatanodeStorageInfo[] targets, BlockType blockType) throws IOException {
fsd.writeLock();
try {
final INodeFile fileINode = inodesInPath.getLastINode().asFile();
Preconditions.checkState(fileINode.isUnderConstruction());
// associate new last block for the file
final BlockInfo blockInfo;
if (blockType == BlockType.STRIPED) {
ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), inodesInPath);
short numDataUnits = (short) ecPolicy.getNumDataUnits();
short numParityUnits = (short) ecPolicy.getNumParityUnits();
short numLocations = (short) (numDataUnits + numParityUnits);
// check quota limits and updated space consumed
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), numLocations, true);
blockInfo = new BlockInfoStriped(block, ecPolicy);
blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
} else {
// check quota limits and updated space consumed
fsd.updateCount(inodesInPath, 0, fileINode.getPreferredBlockSize(), fileINode.getFileReplication(), true);
short numLocations = fileINode.getFileReplication();
blockInfo = new BlockInfoContiguous(block, numLocations);
blockInfo.convertToBlockUnderConstruction(HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
}
fsd.getBlockManager().addBlockCollection(blockInfo, fileINode);
fileINode.addBlock(blockInfo);
if (NameNode.stateChangeLog.isDebugEnabled()) {
NameNode.stateChangeLog.debug("DIR* FSDirectory.addBlock: " + path + " with " + block + " block is added to the in-memory " + "file system");
}
return blockInfo;
} finally {
fsd.writeUnlock();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.
the class FileDiffList method combineAndCollectSnapshotBlocks.
/**
* Copy blocks from the removed snapshot into the previous snapshot
* up to the file length of the latter.
* Collect unused blocks of the removed snapshot.
*/
void combineAndCollectSnapshotBlocks(INode.ReclaimContext reclaimContext, INodeFile file, FileDiff removed) {
BlockInfo[] removedBlocks = removed.getBlocks();
if (removedBlocks == null) {
FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
assert sf != null : "FileWithSnapshotFeature is null";
if (sf.isCurrentFileDeleted())
sf.collectBlocksAndClear(reclaimContext, file);
return;
}
int p = getPrior(removed.getSnapshotId(), true);
FileDiff earlierDiff = p == Snapshot.NO_SNAPSHOT_ID ? null : getDiffById(p);
// Copy blocks to the previous snapshot if not set already
if (earlierDiff != null) {
earlierDiff.setBlocks(removedBlocks);
}
BlockInfo[] earlierBlocks = (earlierDiff == null ? new BlockInfoContiguous[] {} : earlierDiff.getBlocks());
// Find later snapshot (or file itself) with blocks
BlockInfo[] laterBlocks = findLaterSnapshotBlocks(removed.getSnapshotId());
laterBlocks = (laterBlocks == null) ? file.getBlocks() : laterBlocks;
// Skip blocks, which belong to either the earlier or the later lists
int i = 0;
for (; i < removedBlocks.length; i++) {
if (i < earlierBlocks.length && removedBlocks[i] == earlierBlocks[i])
continue;
if (i < laterBlocks.length && removedBlocks[i] == laterBlocks[i])
continue;
break;
}
// Check if last block is part of truncate recovery
BlockInfo lastBlock = file.getLastBlock();
Block dontRemoveBlock = null;
if (lastBlock != null && lastBlock.getBlockUCState().equals(HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
dontRemoveBlock = lastBlock.getUnderConstructionFeature().getTruncateBlock();
}
// Collect the remaining blocks of the file, ignoring truncate block
for (; i < removedBlocks.length; i++) {
if (dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) {
reclaimContext.collectedBlocks().addDeleteBlock(removedBlocks[i]);
}
}
}
Aggregations