use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class FSDirectory method updateReplicationFactor.
/**
* Tell the block manager to update the replication factors when delete
* happens. Deleting a file or a snapshot might decrease the replication
* factor of the blocks as the blocks are always replicated to the highest
* replication factor among all snapshots.
*/
void updateReplicationFactor(Collection<UpdatedReplicationInfo> blocks) {
BlockManager bm = getBlockManager();
for (UpdatedReplicationInfo e : blocks) {
BlockInfo b = e.block();
bm.setReplication(b.getReplication(), e.targetReplication(), b);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class FSDirStatAndListingOp method getBlockLocations.
/**
* Get block locations within the specified range.
* @see ClientProtocol#getBlockLocations(String, long, long)
* @throws IOException
*/
static GetBlockLocationsResult getBlockLocations(FSDirectory fsd, FSPermissionChecker pc, String src, long offset, long length, boolean needBlockToken) throws IOException {
Preconditions.checkArgument(offset >= 0, "Negative offset is not supported. File: " + src);
Preconditions.checkArgument(length >= 0, "Negative length is not supported. File: " + src);
BlockManager bm = fsd.getBlockManager();
fsd.readLock();
try {
final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ);
src = iip.getPath();
final INodeFile inode = INodeFile.valueOf(iip.getLastINode(), src);
if (fsd.isPermissionEnabled()) {
fsd.checkPathAccess(pc, iip, FsAction.READ);
fsd.checkUnreadableBySuperuser(pc, iip);
}
final long fileSize = iip.isSnapshot() ? inode.computeFileSize(iip.getPathSnapshotId()) : inode.computeFileSizeNotIncludingLastUcBlock();
boolean isUc = inode.isUnderConstruction();
if (iip.isSnapshot()) {
// if src indicates a snapshot file, we need to make sure the returned
// blocks do not exceed the size of the snapshot file.
length = Math.min(length, fileSize - offset);
isUc = false;
}
final FileEncryptionInfo feInfo = FSDirEncryptionZoneOp.getFileEncryptionInfo(fsd, iip);
final ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip);
final LocatedBlocks blocks = bm.createLocatedBlocks(inode.getBlocks(iip.getPathSnapshotId()), fileSize, isUc, offset, length, needBlockToken, iip.isSnapshot(), feInfo, ecPolicy);
final long now = now();
boolean updateAccessTime = fsd.isAccessTimeSupported() && !iip.isSnapshot() && now > inode.getAccessTime() + fsd.getAccessTimePrecision();
return new GetBlockLocationsResult(updateAccessTime, blocks);
} finally {
fsd.readUnlock();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class NameNodeRpcServer method blockReport.
// DatanodeProtocol
@Override
public DatanodeCommand blockReport(final DatanodeRegistration nodeReg, String poolId, final StorageBlockReport[] reports, final BlockReportContext context) throws IOException {
checkNNStartup();
verifyRequest(nodeReg);
if (blockStateChangeLog.isDebugEnabled()) {
blockStateChangeLog.debug("*BLOCK* NameNode.blockReport: " + "from " + nodeReg + ", reports.length=" + reports.length);
}
final BlockManager bm = namesystem.getBlockManager();
boolean noStaleStorages = false;
for (int r = 0; r < reports.length; r++) {
final BlockListAsLongs blocks = reports[r].getBlocks();
//
// BlockManager.processReport accumulates information of prior calls
// for the same node and storage, so the value returned by the last
// call of this loop is the final updated value for noStaleStorage.
//
final int index = r;
noStaleStorages = bm.runBlockOp(new Callable<Boolean>() {
@Override
public Boolean call() throws IOException {
return bm.processReport(nodeReg, reports[index].getStorage(), blocks, context);
}
});
metrics.incrStorageBlockReportOps();
}
bm.removeBRLeaseIfNeeded(nodeReg, context);
BlockManagerFaultInjector.getInstance().incomingBlockReportRpc(nodeReg, context);
if (nn.getFSImage().isUpgradeFinalized() && !namesystem.isRollingUpgrade() && !nn.isStandbyState() && noStaleStorages) {
return new FinalizeCommand(poolId);
}
return null;
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class DFSTestUtil method getExpectedPrimaryNode.
/**
* @return the node which is expected to run the recovery of the
* given block, which is known to be under construction inside the
* given NameNOde.
*/
public static DatanodeDescriptor getExpectedPrimaryNode(NameNode nn, ExtendedBlock blk) {
BlockManager bm0 = nn.getNamesystem().getBlockManager();
BlockInfo storedBlock = bm0.getStoredBlock(blk.getLocalBlock());
assertTrue("Block " + blk + " should be under construction, " + "got: " + storedBlock, !storedBlock.isComplete());
// We expect that the replica with the most recent heart beat will be
// the one to be in charge of the synchronization / recovery protocol.
final DatanodeStorageInfo[] storages = storedBlock.getUnderConstructionFeature().getExpectedStorageLocations();
DatanodeStorageInfo expectedPrimary = storages[0];
long mostRecentLastUpdate = expectedPrimary.getDatanodeDescriptor().getLastUpdateMonotonic();
for (int i = 1; i < storages.length; i++) {
final long lastUpdate = storages[i].getDatanodeDescriptor().getLastUpdateMonotonic();
if (lastUpdate > mostRecentLastUpdate) {
expectedPrimary = storages[i];
mostRecentLastUpdate = lastUpdate;
}
}
return expectedPrimary.getDatanodeDescriptor();
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockManager in project hadoop by apache.
the class TestFileCreation method testFileCreationWithOverwrite.
/**
* 1. Check the blocks of old file are cleaned after creating with overwrite
* 2. Restart NN, check the file
* 3. Save new checkpoint and restart NN, check the file
*/
@Test(timeout = 120000)
public void testFileCreationWithOverwrite() throws Exception {
Configuration conf = new Configuration();
conf.setInt("dfs.blocksize", blockSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
DistributedFileSystem dfs = cluster.getFileSystem();
try {
dfs.mkdirs(new Path("/foo/dir"));
String file = "/foo/dir/file";
Path filePath = new Path(file);
// Case 1: Create file with overwrite, check the blocks of old file
// are cleaned after creating with overwrite
NameNode nn = cluster.getNameNode();
FSNamesystem fsn = NameNodeAdapter.getNamesystem(nn);
BlockManager bm = fsn.getBlockManager();
FSDataOutputStream out = dfs.create(filePath);
byte[] oldData = AppendTestUtil.randomBytes(seed, fileSize);
try {
out.write(oldData);
} finally {
out.close();
}
LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
assertBlocks(bm, oldBlocks, true);
out = dfs.create(filePath, true);
byte[] newData = AppendTestUtil.randomBytes(seed, fileSize);
try {
out.write(newData);
} finally {
out.close();
}
dfs.deleteOnExit(filePath);
LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(nn, file, 0, fileSize);
assertBlocks(bm, newBlocks, true);
assertBlocks(bm, oldBlocks, false);
FSDataInputStream in = dfs.open(filePath);
byte[] result = null;
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
// Case 2: Restart NN, check the file
cluster.restartNameNode();
nn = cluster.getNameNode();
in = dfs.open(filePath);
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
// Case 3: Save new checkpoint and restart NN, check the file
NameNodeAdapter.enterSafeMode(nn, false);
NameNodeAdapter.saveNamespace(nn);
cluster.restartNameNode();
nn = cluster.getNameNode();
in = dfs.open(filePath);
try {
result = readAll(in);
} finally {
in.close();
}
Assert.assertArrayEquals(newData, result);
} finally {
if (dfs != null) {
dfs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations