Search in sources :

Example 41 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestFsck method testCorruptBlock.

@Test
public void testCorruptBlock() throws Exception {
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    FileSystem fs = null;
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;
    short factor = 1;
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testCorruptBlock");
    DFSTestUtil.createFile(fs, file1, 1024, factor, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, factor);
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // corrupt replicas
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
        FileChannel channel = raFile.getChannel();
        String badString = "BADBAD";
        int rand = random.nextInt((int) channel.size() / 2);
        raFile.seek(rand);
        raFile.write(badString.getBytes());
        raFile.close();
    }
    // Read the file to trigger reportBadBlocks
    try {
        IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf, true);
    } catch (IOException ie) {
        assertTrue(ie instanceof ChecksumException);
    }
    dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf);
    blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != factor) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException ignore) {
        }
        blocks = dfsClient.getNamenode().getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
        replicaCount = blocks.get(0).getLocations().length;
    }
    assertTrue(blocks.get(0).isCorrupt());
    // Check if fsck reports the same
    outStr = runFsck(conf, 1, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
    assertTrue(outStr.contains("testCorruptBlock"));
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) FileChannel(java.nio.channels.FileChannel) ChecksumException(org.apache.hadoop.fs.ChecksumException) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) IOUtils(org.apache.hadoop.io.IOUtils) Random(java.util.Random) RandomAccessFile(java.io.RandomAccessFile) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) Test(org.junit.Test)

Example 42 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestHostsFiles method testHostsExcludeInUI.

@Test
public void testHostsExcludeInUI() throws Exception {
    Configuration conf = getConf();
    short REPLICATION_FACTOR = 2;
    final Path filePath = new Path("/testFile");
    HostsFileWriter hostsFileWriter = new HostsFileWriter();
    hostsFileWriter.initialize(conf, "temp/decommission");
    // Two blocks and four racks
    String[] racks = { "/rack1", "/rack1", "/rack2", "/rack2" };
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(racks.length).racks(racks).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    try {
        // Create a file with one block
        final FileSystem fs = cluster.getFileSystem();
        DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
        ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        // Decommission one of the hosts with the block, this should cause 
        // the block to get replicated to another host on the same rack,
        // otherwise the rack policy is violated.
        BlockLocation[] locs = fs.getFileBlockLocations(fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
        String name = locs[0].getNames()[0];
        LOG.info("adding '" + name + "' to decommission");
        hostsFileWriter.initExcludeHost(name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        // Check the block still has sufficient # replicas across racks
        DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
        MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
        ObjectName mxbeanName = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
        String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
        assertTrue("Live nodes should contain the decommissioned node", nodes.contains("Decommissioned"));
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
        hostsFileWriter.cleanup();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HostsFileWriter(org.apache.hadoop.hdfs.util.HostsFileWriter) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) BlockLocation(org.apache.hadoop.fs.BlockLocation) ObjectName(javax.management.ObjectName) FileSystem(org.apache.hadoop.fs.FileSystem) MBeanServer(javax.management.MBeanServer) Test(org.junit.Test)

Example 43 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class JsonUtilClient method toLocatedBlock.

/** Convert a Json map to LocatedBlock. */
static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
    if (m == null) {
        return null;
    }
    final ExtendedBlock b = toExtendedBlock((Map<?, ?>) m.get("block"));
    final DatanodeInfo[] locations = toDatanodeInfoArray(getList(m, "locations"));
    final long startOffset = ((Number) m.get("startOffset")).longValue();
    final boolean isCorrupt = (Boolean) m.get("isCorrupt");
    final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(getList(m, "cachedLocations"));
    final StorageType[] storageTypes = toStorageTypeArray(getList(m, "storageTypes"));
    final LocatedBlock locatedblock = new LocatedBlock(b, locations, null, storageTypes, startOffset, isCorrupt, cachedLocations);
    locatedblock.setBlockToken(toBlockToken((Map<?, ?>) m.get("blockToken")));
    return locatedblock;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Map(java.util.Map)

Example 44 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class StripedBlockUtil method constructInternalBlock.

/**
   * This method creates an internal {@link ExtendedBlock} at the given index
   * of a block group.
   */
public static ExtendedBlock constructInternalBlock(ExtendedBlock blockGroup, int cellSize, int dataBlkNum, int idxInBlockGroup) {
    ExtendedBlock block = new ExtendedBlock(blockGroup);
    block.setBlockId(blockGroup.getBlockId() + idxInBlockGroup);
    block.setNumBytes(getInternalBlockLength(blockGroup.getNumBytes(), cellSize, dataBlkNum, idxInBlockGroup));
    return block;
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock)

Example 45 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class FSDirWriteFileOp method abandonBlock.

static void abandonBlock(FSDirectory fsd, FSPermissionChecker pc, ExtendedBlock b, long fileId, String src, String holder) throws IOException {
    final INodesInPath iip = fsd.resolvePath(pc, src, fileId);
    src = iip.getPath();
    FSNamesystem fsn = fsd.getFSNamesystem();
    final INodeFile file = fsn.checkLease(iip, holder, fileId);
    Preconditions.checkState(file.isUnderConstruction());
    if (file.getBlockType() == BlockType.STRIPED) {
        // do not abandon block for striped file
        return;
    }
    Block localBlock = ExtendedBlock.getLocalBlock(b);
    fsd.writeLock();
    try {
        // Remove the block from the pending creates list
        if (!unprotectedRemoveBlock(fsd, src, iip, file, localBlock)) {
            return;
        }
    } finally {
        fsd.writeUnlock();
    }
    persistBlocks(fsd, src, file, false);
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17