use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestFsck method testCorruptBlock.
@Test
public void testCorruptBlock() throws Exception {
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
FileSystem fs = null;
DFSClient dfsClient = null;
LocatedBlocks blocks = null;
int replicaCount = 0;
Random random = new Random();
String outStr = null;
short factor = 1;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path file1 = new Path("/testCorruptBlock");
DFSTestUtil.createFile(fs, file1, 1024, factor, 0);
// Wait until file replication has completed
DFSTestUtil.waitReplication(fs, file1, factor);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
// Make sure filesystem is in healthy state
outStr = runFsck(conf, 0, true, "/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// corrupt replicas
File blockFile = cluster.getBlockFile(0, block);
if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
FileChannel channel = raFile.getChannel();
String badString = "BADBAD";
int rand = random.nextInt((int) channel.size() / 2);
raFile.seek(rand);
raFile.write(badString.getBytes());
raFile.close();
}
// Read the file to trigger reportBadBlocks
try {
IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
} catch (IOException ie) {
assertTrue(ie instanceof ChecksumException);
}
dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
while (replicaCount != factor) {
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
}
blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
}
assertTrue(blocks.get(0).isCorrupt());
// Check if fsck reports the same
outStr = runFsck(conf, 1, true, "/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
assertTrue(outStr.contains("testCorruptBlock"));
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestHostsFiles method testHostsExcludeInUI.
@Test
public void testHostsExcludeInUI() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
HostsFileWriter hostsFileWriter = new HostsFileWriter();
hostsFileWriter.initialize(conf, "temp/decommission");
// Two blocks and four racks
String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
LOG.info("adding '" + name + "' to decommission");
hostsFileWriter.initExcludeHost(name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned"));
} finally {
if (cluster != null) {
cluster.shutdown();
}
hostsFileWriter.cleanup();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class JsonUtilClient method toLocatedBlock.
/** Convert a Json map to LocatedBlock. */
static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
if (m == null) {
return null;
}
final ExtendedBlock b = toExtendedBlock((Map<?, ?>) m.get("block"));
final DatanodeInfo[] locations = toDatanodeInfoArray(getList(m, "locations"));
final long startOffset = ((Number) m.get("startOffset")).longValue();
final boolean isCorrupt = (Boolean) m.get("isCorrupt");
final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(getList(m, "cachedLocations"));
final StorageType[] storageTypes = toStorageTypeArray(getList(m, "storageTypes"));
final LocatedBlock locatedblock = new LocatedBlock(b, locations, null, storageTypes, startOffset, isCorrupt, cachedLocations);
locatedblock.setBlockToken(toBlockToken((Map<?, ?>) m.get("blockToken")));
return locatedblock;
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class StripedBlockUtil method constructInternalBlock.
/**
* This method creates an internal {@link ExtendedBlock} at the given index
* of a block group.
*/
public static ExtendedBlock constructInternalBlock(ExtendedBlock blockGroup, int cellSize, int dataBlkNum, int idxInBlockGroup) {
ExtendedBlock block = new ExtendedBlock(blockGroup);
block.setBlockId(blockGroup.getBlockId() + idxInBlockGroup);
block.setNumBytes(getInternalBlockLength(blockGroup.getNumBytes(), cellSize, dataBlkNum, idxInBlockGroup));
return block;
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class FSDirWriteFileOp method abandonBlock.
static void abandonBlock(FSDirectory fsd, FSPermissionChecker pc, ExtendedBlock b, long fileId, String src, String holder) throws IOException {
final INodesInPath iip = fsd.resolvePath(pc, src, fileId);
src = iip.getPath();
FSNamesystem fsn = fsd.getFSNamesystem();
final INodeFile file = fsn.checkLease(iip, holder, fileId);
Preconditions.checkState(file.isUnderConstruction());
if (file.getBlockType() == BlockType.STRIPED) {
// do not abandon block for striped file
return;
}
Block localBlock = ExtendedBlock.getLocalBlock(b);
fsd.writeLock();
try {
// Remove the block from the pending creates list
if (!unprotectedRemoveBlock(fsd, src, iip, file, localBlock)) {
return;
}
} finally {
fsd.writeUnlock();
}
persistBlocks(fsd, src, file, false);
}
Aggregations