use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotReplication method checkFileReplication.
/**
* Check the replication of a given file.
*
* @param file The given file
* @param replication The expected replication number
* @param blockReplication The expected replication number for the block
* @throws Exception
*/
private void checkFileReplication(Path file, short replication, short blockReplication) throws Exception {
// Get FileStatus of file1, and identify the replication number of file1.
// Note that the replication number in FileStatus was derived from
// INodeFile#getFileReplication().
short fileReplication = hdfs.getFileStatus(file1).getReplication();
assertEquals(replication, fileReplication);
// Check the correctness of getPreferredBlockReplication()
INode inode = fsdir.getINode(file1.toString());
assertTrue(inode instanceof INodeFile);
for (BlockInfo b : inode.asFile().getBlocks()) {
assertEquals(blockReplication, b.getReplication());
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class NamenodeFsck method blockIdCK.
/**
* Check block information given a blockId number
*
*/
public void blockIdCK(String blockId) {
if (blockId == null) {
out.println("Please provide valid blockId!");
return;
}
try {
//get blockInfo
Block block = new Block(Block.getBlockId(blockId));
//find which file this block belongs to
BlockInfo blockInfo = blockManager.getStoredBlock(block);
if (blockInfo == null) {
out.println("Block " + blockId + " " + NONEXISTENT_STATUS);
LOG.warn("Block " + blockId + " " + NONEXISTENT_STATUS);
return;
}
final INodeFile iNode = namenode.getNamesystem().getBlockCollection(blockInfo);
NumberReplicas numberReplicas = blockManager.countNodes(blockInfo);
out.println("Block Id: " + blockId);
out.println("Block belongs to: " + iNode.getFullPathName());
out.println("No. of Expected Replica: " + blockManager.getExpectedRedundancyNum(blockInfo));
out.println("No. of live Replica: " + numberReplicas.liveReplicas());
out.println("No. of excess Replica: " + numberReplicas.excessReplicas());
out.println("No. of stale Replica: " + numberReplicas.replicasOnStaleNodes());
out.println("No. of decommissioned Replica: " + numberReplicas.decommissioned());
out.println("No. of decommissioning Replica: " + numberReplicas.decommissioning());
if (this.showMaintenanceState) {
out.println("No. of entering maintenance Replica: " + numberReplicas.liveEnteringMaintenanceReplicas());
out.println("No. of in maintenance Replica: " + numberReplicas.maintenanceNotForReadReplicas());
}
out.println("No. of corrupted Replica: " + numberReplicas.corruptReplicas());
//record datanodes that have corrupted block replica
Collection<DatanodeDescriptor> corruptionRecord = null;
if (blockManager.getCorruptReplicas(block) != null) {
corruptionRecord = blockManager.getCorruptReplicas(block);
}
//report block replicas status on datanodes
for (int idx = (blockInfo.numNodes() - 1); idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
out.print("Block replica on datanode/rack: " + dn.getHostName() + dn.getNetworkLocation() + " ");
if (corruptionRecord != null && corruptionRecord.contains(dn)) {
out.print(CORRUPT_STATUS + "\t ReasonCode: " + blockManager.getCorruptReason(block, dn));
} else if (dn.isDecommissioned()) {
out.print(DECOMMISSIONED_STATUS);
} else if (dn.isDecommissionInProgress()) {
out.print(DECOMMISSIONING_STATUS);
} else if (this.showMaintenanceState && dn.isEnteringMaintenance()) {
out.print(ENTERING_MAINTENANCE_STATUS);
} else if (this.showMaintenanceState && dn.isInMaintenance()) {
out.print(IN_MAINTENANCE_STATUS);
} else {
out.print(HEALTHY_STATUS);
}
out.print("\n");
}
} catch (Exception e) {
String errMsg = "Fsck on blockId '" + blockId;
LOG.warn(errMsg, e);
out.println(e.getMessage());
out.print("\n\n" + errMsg);
LOG.warn("Error in looking up block", e);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestDecommission method testRecommission.
/**
* Test that over-replicated blocks are deleted on recommission.
*/
@Test(timeout = 120000)
public void testRecommission() throws Exception {
final int numDatanodes = 6;
try {
LOG.info("Starting test testRecommission");
startCluster(1, numDatanodes);
final Path file1 = new Path("testDecommission.dat");
final int replicas = numDatanodes - 1;
ArrayList<DatanodeInfo> decommissionedNodes = Lists.newArrayList();
final FileSystem fileSys = getCluster().getFileSystem();
// Write a file to n-1 datanodes
writeFile(fileSys, file1, replicas);
// Decommission one of the datanodes with a replica
BlockLocation loc = fileSys.getFileBlockLocations(file1, 0, 1)[0];
assertEquals("Unexpected number of replicas from getFileBlockLocations", replicas, loc.getHosts().length);
final String toDecomHost = loc.getNames()[0];
String toDecomUuid = null;
for (DataNode d : getCluster().getDataNodes()) {
if (d.getDatanodeId().getXferAddr().equals(toDecomHost)) {
toDecomUuid = d.getDatanodeId().getDatanodeUuid();
break;
}
}
assertNotNull("Could not find a dn with the block!", toDecomUuid);
final DatanodeInfo decomNode = takeNodeOutofService(0, toDecomUuid, 0, decommissionedNodes, AdminStates.DECOMMISSIONED);
decommissionedNodes.add(decomNode);
final BlockManager blockManager = getCluster().getNamesystem().getBlockManager();
final DatanodeManager datanodeManager = blockManager.getDatanodeManager();
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
// Ensure decommissioned datanode is not automatically shutdown
DFSClient client = getDfsClient(0);
assertEquals("All datanodes must be alive", numDatanodes, client.datanodeReport(DatanodeReportType.LIVE).length);
// wait for the block to be replicated
final ExtendedBlock b = DFSTestUtil.getFirstBlock(fileSys, file1);
final String uuid = toDecomUuid;
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
BlockInfo info = blockManager.getStoredBlock(b.getLocalBlock());
int count = 0;
StringBuilder sb = new StringBuilder("Replica locations: ");
for (int i = 0; i < info.numNodes(); i++) {
DatanodeDescriptor dn = info.getDatanode(i);
sb.append(dn + ", ");
if (!dn.getDatanodeUuid().equals(uuid)) {
count++;
}
}
LOG.info(sb.toString());
LOG.info("Count: " + count);
return count == replicas;
}
}, 500, 30000);
// redecommission and wait for over-replication to be fixed
putNodeInService(0, decomNode);
BlockManagerTestUtil.recheckDecommissionState(datanodeManager);
DFSTestUtil.waitForReplication(getCluster(), b, 1, replicas, 0);
cleanupFile(fileSys, file1);
} finally {
shutdownCluster();
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestFSEditLogLoader method testAddNewStripedBlock.
@Test
public void testAddNewStripedBlock() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, testECPolicy.getName());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/ec";
String testFile = "testfile_001";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser1";
String clientMachine = "testMachine1";
long blkId = 1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
short blockNum = (short) testECPolicy.getNumDataUnits();
short parityNum = (short) testECPolicy.getNumParityUnits();
//set the storage policy of the directory
fs.mkdir(new Path(testDir), new FsPermission("755"));
fs.getClient().getNamenode().setErasureCodingPolicy(testDir, testECPolicy.getName());
// Create a file with striped block
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
fns.leaveSafeMode(false);
// Add a striped block to the file
BlockInfoStriped stripedBlk = new BlockInfoStriped(new Block(blkId, blkNumBytes, timestamp), testECPolicy);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(stripedBlk);
fns.getEditLog().logAddBlock(testFilePath, file);
TestINodeFile.toCompleteFile(file);
//If the block by loaded is the same as above it means that
//we have successfully applied the edit log to the fsimage.
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
INodeFile inodeLoaded = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
assertTrue(inodeLoaded.isStriped());
BlockInfo[] blks = inodeLoaded.getBlocks();
assertEquals(1, blks.length);
assertEquals(blkId, blks[0].getBlockId());
assertEquals(blkNumBytes, blks[0].getNumBytes());
assertEquals(timestamp, blks[0].getGenerationStamp());
assertEquals(blockNum, ((BlockInfoStriped) blks[0]).getDataBlockNum());
assertEquals(parityNum, ((BlockInfoStriped) blks[0]).getParityBlockNum());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestFSImage method testPersistHelper.
private void testPersistHelper(Configuration conf) throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
DistributedFileSystem fs = cluster.getFileSystem();
final Path dir = new Path("/abc/def");
final Path file1 = new Path(dir, "f1");
final Path file2 = new Path(dir, "f2");
// create an empty file f1
fs.create(file1).close();
// create an under-construction file f2
FSDataOutputStream out = fs.create(file2);
out.writeBytes("hello");
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// checkpoint
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode();
cluster.waitActive();
fs = cluster.getFileSystem();
assertTrue(fs.isDirectory(dir));
assertTrue(fs.exists(file1));
assertTrue(fs.exists(file2));
// check internals of file2
INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
assertEquals("hello".length(), file2Node.computeFileSize());
assertTrue(file2Node.isUnderConstruction());
BlockInfo[] blks = file2Node.getBlocks();
assertEquals(1, blks.length);
assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
// check lease manager
Lease lease = fsn.leaseManager.getLease(file2Node);
Assert.assertNotNull(lease);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations