Search in sources :

Example 71 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestDFSShell method getMaterializedReplicas.

private static List<MaterializedReplica> getMaterializedReplicas(MiniDFSCluster cluster) throws IOException {
    List<MaterializedReplica> replicas = new ArrayList<>();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    List<Map<DatanodeStorage, BlockListAsLongs>> blocks = cluster.getAllBlockReports(poolId);
    for (int i = 0; i < blocks.size(); i++) {
        Map<DatanodeStorage, BlockListAsLongs> map = blocks.get(i);
        for (Map.Entry<DatanodeStorage, BlockListAsLongs> e : map.entrySet()) {
            for (Block b : e.getValue()) {
                replicas.add(cluster.getMaterializedReplica(i, new ExtendedBlock(poolId, b)));
            }
        }
    }
    return replicas;
}
Also used : MaterializedReplica(org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica) ArrayList(java.util.ArrayList) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) StringContains.containsString(org.hamcrest.core.StringContains.containsString) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Map(java.util.Map)

Example 72 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndMissingStripedBlock.

// This test is going to be rewritten in HDFS-10854. Ignoring this test
// temporarily as it fails with the fix for HDFS-10301.
@Ignore
@Test
public void testProcessOverReplicatedAndMissingStripedBlock() throws Exception {
    long fileLen = cellSize * dataBlocks;
    DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    // only inject GROUP_SIZE - 1 blocks, so there is one block missing
    for (int i = 0; i < groupSize - 1; i++) {
        blk.setBlockId(groupId + i);
        cluster.injectBlocks(i, Arrays.asList(blk), bpid);
    }
    cluster.triggerBlockReports();
    // let a internal block be over replicated with 2 redundant blocks.
    // Therefor number of internal blocks is over GROUP_SIZE. (5 data blocks +
    // 3 parity blocks  + 2 redundant blocks > GROUP_SIZE)
    blk.setBlockId(groupId + 2);
    cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
    cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
    // update blocksMap
    cluster.triggerBlockReports();
    Thread.sleep(2000);
    // add to invalidates
    cluster.triggerHeartbeats();
    // datanode delete block
    cluster.triggerHeartbeats();
    // update blocksMap
    cluster.triggerBlockReports();
    // Since one block is missing, then over-replicated blocks will not be
    // deleted until reconstruction happens
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    bg = (LocatedStripedBlock) (lbs.get(0));
    assertEquals(groupSize + 1, bg.getBlockIndices().length);
    assertEquals(groupSize + 1, bg.getLocations().length);
    BitSet set = new BitSet(groupSize);
    for (byte index : bg.getBlockIndices()) {
        set.set(index);
    }
    Assert.assertFalse(set.get(groupSize - 1));
    for (int i = 0; i < groupSize - 1; i++) {
        assertTrue(set.get(i));
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BitSet(java.util.BitSet) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 73 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedSBSmallerThanFullBlocks.

@Test
public void testProcessOverReplicatedSBSmallerThanFullBlocks() throws Exception {
    // Create a EC file which doesn't fill full internal blocks.
    int fileLen = cellSize * (dataBlocks - 1);
    byte[] content = new byte[fileLen];
    DFSTestUtil.writeFile(fs, filePath, new String(content));
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    cluster.triggerBlockReports();
    List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
    // let a internal block be over replicated with (numDNs - GROUP_SIZE + 1)
    // redundant blocks. Therefor number of internal blocks is over GROUP_SIZE.
    blk.setBlockId(groupId);
    List<DataNode> dataNodeList = cluster.getDataNodes();
    for (int i = 0; i < numDNs; i++) {
        if (!infos.contains(dataNodeList.get(i).getDatanodeId())) {
            cluster.injectBlocks(i, Arrays.asList(blk), bpid);
            System.out.println("XXX: inject block into datanode " + i);
        }
    }
    // update blocksMap
    cluster.triggerBlockReports();
    // add to invalidates
    cluster.triggerHeartbeats();
    // datanode delete block
    cluster.triggerHeartbeats();
    // update blocksMap
    cluster.triggerBlockReports();
    // verify that all internal blocks exists
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize - 1);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Example 74 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestAddStripedBlocks method testAddUCReplica.

/**
   * Test BlockInfoStripedUnderConstruction#addReplicaIfNotPresent in different
   * scenarios.
   */
@Test
public void testAddUCReplica() throws Exception {
    final Path file = new Path("/file1");
    final List<String> storageIDs = new ArrayList<>();
    // create an empty file
    FSDataOutputStream out = null;
    try {
        out = dfs.create(file, (short) 1);
        // 1. create the UC striped block
        FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
        cluster.getNamesystem().getAdditionalBlock(file.toString(), fileNode.getId(), dfs.getClient().getClientName(), null, null, null, null);
        BlockInfo lastBlock = fileNode.getLastBlock();
        DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        // 2. mimic incremental block reports and make sure the uc-replica list in
        // the BlockInfoUCStriped is correct
        int i = 0;
        for (DataNode dn : cluster.getDataNodes()) {
            final Block block = new Block(lastBlock.getBlockId() + i++, 0, lastBlock.getGenerationStamp());
            DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
            storageIDs.add(storage.getStorageID());
            StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, BlockStatus.RECEIVING_BLOCK, storage);
            for (StorageReceivedDeletedBlocks report : reports) {
                cluster.getNamesystem().processIncrementalBlockReport(dn.getDatanodeId(), report);
            }
        }
        // make sure lastBlock is correct and the storages have been updated
        locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        for (DatanodeStorageInfo newstorage : locs) {
            Assert.assertTrue(storageIDs.contains(newstorage.getStorageID()));
        }
    } finally {
        IOUtils.cleanup(null, out);
    }
    // 3. restart the namenode. mimic the full block reports and check the
    // uc-replica list again
    cluster.restartNameNode(true);
    final String bpId = cluster.getNamesystem().getBlockPoolId();
    INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(file.toString()).asFile();
    BlockInfo lastBlock = fileNode.getLastBlock();
    int i = groupSize - 1;
    for (DataNode dn : cluster.getDataNodes()) {
        String storageID = storageIDs.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i--, lastBlock.getGenerationStamp(), 0);
        DatanodeStorage storage = new DatanodeStorage(storageID);
        List<ReplicaBeingWritten> blocks = new ArrayList<>();
        ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null, null);
        blocks.add(replica);
        BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
        StorageBlockReport[] reports = { new StorageBlockReport(storage, bll) };
        cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId), bpId, reports, new BlockReportContext(1, 0, System.nanoTime(), 0, true));
    }
    DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
    byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
    Assert.assertEquals(groupSize, locs.length);
    Assert.assertEquals(groupSize, indices.length);
    for (i = 0; i < groupSize; i++) {
        Assert.assertEquals(storageIDs.get(i), locs[groupSize - 1 - i].getStorageID());
        Assert.assertEquals(groupSize - i - 1, indices[i]);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ArrayList(java.util.ArrayList) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 75 with Block

use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.

the class TestQuotaWithStripedBlocks method testUpdatingQuotaCount.

@Test
public void testUpdatingQuotaCount() throws Exception {
    final Path file = new Path(ecDir, "file");
    FSDataOutputStream out = null;
    try {
        out = dfs.create(file, (short) 1);
        INodeFile fileNode = dir.getINode4Write(file.toString()).asFile();
        ExtendedBlock previous = null;
        // Create striped blocks which have a cell in each block.
        Block newBlock = DFSTestUtil.addBlockToFile(true, cluster.getDataNodes(), dfs, cluster.getNamesystem(), file.toString(), fileNode, dfs.getClient().getClientName(), previous, 1, 0);
        previous = new ExtendedBlock(cluster.getNamesystem().getBlockPoolId(), newBlock);
        final INodeDirectory dirNode = dir.getINode4Write(ecDir.toString()).asDirectory();
        final long spaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
        final long diskUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
        // When we add a new block we update the quota using the full block size.
        Assert.assertEquals(BLOCK_SIZE * groupSize, spaceUsed);
        Assert.assertEquals(BLOCK_SIZE * groupSize, diskUsed);
        dfs.getClient().getNamenode().complete(file.toString(), dfs.getClient().getClientName(), previous, fileNode.getId());
        final long actualSpaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
        final long actualDiskUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getTypeSpaces().get(StorageType.DISK);
        // In this case the file's real size is cell size * block group size.
        Assert.assertEquals(cellSize * groupSize, actualSpaceUsed);
        Assert.assertEquals(cellSize * groupSize, actualDiskUsed);
    } finally {
        IOUtils.cleanup(null, out);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

Block (org.apache.hadoop.hdfs.protocol.Block)155 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)79 Test (org.junit.Test)77 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)74 Path (org.apache.hadoop.fs.Path)28 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)26 IOException (java.io.IOException)24 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)22 Configuration (org.apache.hadoop.conf.Configuration)20 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)17 BlockInfoContiguous (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous)17 CachedBlock (org.apache.hadoop.hdfs.server.namenode.CachedBlock)17 BlockInfoStriped (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped)15 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)14 ArrayList (java.util.ArrayList)12 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)11 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)11 FsPermission (org.apache.hadoop.fs.permission.FsPermission)10 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)10