use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.
the class FSEditLogLoader method updateBlocks.
/**
* Update in-memory data structures with new block information.
* @throws IOException
*/
private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op, INodesInPath iip, INodeFile file, ErasureCodingPolicy ecPolicy) throws IOException {
// Update its block list
BlockInfo[] oldBlocks = file.getBlocks();
Block[] newBlocks = op.getBlocks();
String path = op.getPath();
// Are we only updating the last block's gen stamp.
boolean isGenStampUpdate = oldBlocks.length == newBlocks.length;
// First, update blocks in common
for (int i = 0; i < oldBlocks.length && i < newBlocks.length; i++) {
BlockInfo oldBlock = oldBlocks[i];
Block newBlock = newBlocks[i];
boolean isLastBlock = i == newBlocks.length - 1;
if (oldBlock.getBlockId() != newBlock.getBlockId() || (oldBlock.getGenerationStamp() != newBlock.getGenerationStamp() && !(isGenStampUpdate && isLastBlock))) {
throw new IOException("Mismatched block IDs or generation stamps, " + "attempting to replace block " + oldBlock + " with " + newBlock + " as block # " + i + "/" + newBlocks.length + " of " + path);
}
oldBlock.setNumBytes(newBlock.getNumBytes());
boolean changeMade = oldBlock.getGenerationStamp() != newBlock.getGenerationStamp();
oldBlock.setGenerationStamp(newBlock.getGenerationStamp());
if (!oldBlock.isComplete() && (!isLastBlock || op.shouldCompleteLastBlock())) {
changeMade = true;
fsNamesys.getBlockManager().forceCompleteBlock(oldBlock);
}
if (changeMade) {
// The state or gen-stamp of the block has changed. So, we may be
// able to process some messages from datanodes that we previously
// were unable to process.
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
}
if (newBlocks.length < oldBlocks.length) {
// We're removing a block from the file, e.g. abandonBlock(...)
if (!file.isUnderConstruction()) {
throw new IOException("Trying to remove a block from file " + path + " which is not under construction.");
}
if (newBlocks.length != oldBlocks.length - 1) {
throw new IOException("Trying to remove more than one block from file " + path);
}
Block oldBlock = oldBlocks[oldBlocks.length - 1];
boolean removed = FSDirWriteFileOp.unprotectedRemoveBlock(fsDir, path, iip, file, oldBlock);
if (!removed && !(op instanceof UpdateBlocksOp)) {
throw new IOException("Trying to delete non-existant block " + oldBlock);
}
} else if (newBlocks.length > oldBlocks.length) {
final boolean isStriped = ecPolicy != null;
// We're adding blocks
for (int i = oldBlocks.length; i < newBlocks.length; i++) {
Block newBlock = newBlocks[i];
final BlockInfo newBI;
if (!op.shouldCompleteLastBlock()) {
// until several blocks in?
if (isStriped) {
newBI = new BlockInfoStriped(newBlock, ecPolicy);
} else {
newBI = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
}
newBI.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
} else {
// OP_ADD operations as each block is allocated.
if (isStriped) {
newBI = new BlockInfoStriped(newBlock, ecPolicy);
} else {
newBI = new BlockInfoContiguous(newBlock, file.getFileReplication());
}
}
fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBI, file);
file.addBlock(newBI);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.
the class FSEditLogLoader method addNewBlock.
/**
* Add a new block into the given INodeFile
*/
private void addNewBlock(AddBlockOp op, INodeFile file, ErasureCodingPolicy ecPolicy) throws IOException {
BlockInfo[] oldBlocks = file.getBlocks();
Block pBlock = op.getPenultimateBlock();
Block newBlock = op.getLastBlock();
if (pBlock != null) {
// the penultimate block is not null
assert oldBlocks != null && oldBlocks.length > 0;
// compare pBlock with the last block of oldBlocks
BlockInfo oldLastBlock = oldBlocks[oldBlocks.length - 1];
if (oldLastBlock.getBlockId() != pBlock.getBlockId() || oldLastBlock.getGenerationStamp() != pBlock.getGenerationStamp()) {
throw new IOException("Mismatched block IDs or generation stamps for the old last block of file " + op.getPath() + ", the old last block is " + oldLastBlock + ", and the block read from editlog is " + pBlock);
}
oldLastBlock.setNumBytes(pBlock.getNumBytes());
if (!oldLastBlock.isComplete()) {
fsNamesys.getBlockManager().forceCompleteBlock(oldLastBlock);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(pBlock);
}
} else {
// the penultimate block is null
Preconditions.checkState(oldBlocks == null || oldBlocks.length == 0);
}
// add the new block
final BlockInfo newBlockInfo;
boolean isStriped = ecPolicy != null;
if (isStriped) {
newBlockInfo = new BlockInfoStriped(newBlock, ecPolicy);
} else {
newBlockInfo = new BlockInfoContiguous(newBlock, file.getPreferredBlockReplication());
}
newBlockInfo.convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
fsNamesys.getBlockManager().addBlockCollectionWithCheck(newBlockInfo, file);
file.addBlock(newBlockInfo);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.
the class FSImageSerialization method readINodeUnderConstruction.
// Helper function that reads in an INodeUnderConstruction
// from the input stream
//
static INodeFile readINodeUnderConstruction(DataInput in, FSNamesystem fsNamesys, int imgVersion) throws IOException {
byte[] name = readBytes(in);
long inodeId = NameNodeLayoutVersion.supports(LayoutVersion.Feature.ADD_INODE_ID, imgVersion) ? in.readLong() : fsNamesys.dir.allocateNewInodeId();
short blockReplication = in.readShort();
long modificationTime = in.readLong();
long preferredBlockSize = in.readLong();
int numBlocks = in.readInt();
final BlockInfoContiguous[] blocksContiguous = new BlockInfoContiguous[numBlocks];
Block blk = new Block();
int i = 0;
for (; i < numBlocks - 1; i++) {
blk.readFields(in);
blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
}
// last block is UNDER_CONSTRUCTION
if (numBlocks > 0) {
blk.readFields(in);
blocksContiguous[i] = new BlockInfoContiguous(blk, blockReplication);
blocksContiguous[i].convertToBlockUnderConstruction(BlockUCState.UNDER_CONSTRUCTION, null);
}
PermissionStatus perm = PermissionStatus.read(in);
String clientName = readString(in);
String clientMachine = readString(in);
// We previously stored locations for the last block, now we
// just record that there are none
int numLocs = in.readInt();
assert numLocs == 0 : "Unexpected block locations";
// Images in the pre-protobuf format will not have the lazyPersist flag,
// so it is safe to pass false always.
INodeFile file = new INodeFile(inodeId, name, perm, modificationTime, modificationTime, blocksContiguous, blockReplication, preferredBlockSize);
file.toUnderConstruction(clientName, clientMachine);
return file;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.
the class TestFSEditLogLoader method testHasNonEcBlockUsingStripedIDForUpdateBlocks.
@Test
public void testHasNonEcBlockUsingStripedIDForUpdateBlocks() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/test_block_manager";
String testFile = "testfile_002";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser2";
String clientMachine = "testMachine1";
long blkId = 100;
long blkNumBytes = 1024;
long timestamp = 1426222918;
fs.mkdir(new Path(testDir), new FsPermission("755"));
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(cBlk);
TestINodeFile.toCompleteFile(file);
long newBlkNumBytes = 1024 * 8;
long newTimestamp = 1426222918 + 3600;
file.toUnderConstruction(clientName, clientMachine);
file.getLastBlock().setBlockId(-100);
file.getLastBlock().setNumBytes(newBlkNumBytes);
file.getLastBlock().setGenerationStamp(newTimestamp);
fns.getEditLog().logUpdateBlocks(testFilePath, file, true);
TestINodeFile.toCompleteFile(file);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous in project hadoop by apache.
the class TestFSEditLogLoader method testHasNonEcBlockUsingStripedIDForAddBlock.
@Test
public void testHasNonEcBlockUsingStripedIDForAddBlock() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/test_block_manager";
String testFile = "testfile_addblock";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser_addblock";
String clientMachine = "testMachine_addblock";
long blkId = -1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
fs.mkdir(new Path(testDir), new FsPermission("755"));
Path p = new Path(testFilePath);
//check whether the hasNonEcBlockUsingStripedID is set
//after loading a addblock-editlog
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(cBlk);
fns.getEditLog().logAddBlock(testFilePath, file);
TestINodeFile.toCompleteFile(file);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations