Search in sources :

Example 46 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDFSRename method testRenameWithOverwrite.

/**
   * Check the blocks of dst file are cleaned after rename with overwrite
   * Restart NN to check the rename successfully
   */
@Test(timeout = 120000)
public void testRenameWithOverwrite() throws Exception {
    final short replFactor = 2;
    final long blockSize = 512;
    Configuration conf = new Configuration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replFactor).build();
    DistributedFileSystem dfs = cluster.getFileSystem();
    try {
        long fileLen = blockSize * 3;
        String src = "/foo/src";
        String dst = "/foo/dst";
        Path srcPath = new Path(src);
        Path dstPath = new Path(dst);
        DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
        DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
        LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(cluster.getNameNode(), dst, 0, fileLen);
        BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).getBlockManager();
        assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) != null);
        dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
        assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().getLocalBlock()) == null);
        // Restart NN and check the rename successfully
        cluster.restartNameNodes();
        assertFalse(dfs.exists(srcPath));
        assertTrue(dfs.exists(dstPath));
    } finally {
        if (dfs != null) {
            dfs.close();
        }
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Test(org.junit.Test)

Example 47 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedAndMissingStripedBlock.

// This test is going to be rewritten in HDFS-10854. Ignoring this test
// temporarily as it fails with the fix for HDFS-10301.
@Ignore
@Test
public void testProcessOverReplicatedAndMissingStripedBlock() throws Exception {
    long fileLen = cellSize * dataBlocks;
    DFSTestUtil.createStripedFile(cluster, filePath, null, 1, stripesPerBlock, false);
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    // only inject GROUP_SIZE - 1 blocks, so there is one block missing
    for (int i = 0; i < groupSize - 1; i++) {
        blk.setBlockId(groupId + i);
        cluster.injectBlocks(i, Arrays.asList(blk), bpid);
    }
    cluster.triggerBlockReports();
    // let a internal block be over replicated with 2 redundant blocks.
    // Therefor number of internal blocks is over GROUP_SIZE. (5 data blocks +
    // 3 parity blocks  + 2 redundant blocks > GROUP_SIZE)
    blk.setBlockId(groupId + 2);
    cluster.injectBlocks(numDNs - 3, Arrays.asList(blk), bpid);
    cluster.injectBlocks(numDNs - 2, Arrays.asList(blk), bpid);
    // update blocksMap
    cluster.triggerBlockReports();
    Thread.sleep(2000);
    // add to invalidates
    cluster.triggerHeartbeats();
    // datanode delete block
    cluster.triggerHeartbeats();
    // update blocksMap
    cluster.triggerBlockReports();
    // Since one block is missing, then over-replicated blocks will not be
    // deleted until reconstruction happens
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    bg = (LocatedStripedBlock) (lbs.get(0));
    assertEquals(groupSize + 1, bg.getBlockIndices().length);
    assertEquals(groupSize + 1, bg.getLocations().length);
    BitSet set = new BitSet(groupSize);
    for (byte index : bg.getBlockIndices()) {
        set.set(index);
    }
    Assert.assertFalse(set.get(groupSize - 1));
    for (int i = 0; i < groupSize - 1; i++) {
        assertTrue(set.get(i));
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) BitSet(java.util.BitSet) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 48 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestMover method createFileWithFavoredDatanodes.

private void createFileWithFavoredDatanodes(final Configuration conf, final MiniDFSCluster cluster, final DistributedFileSystem dfs) throws IOException {
    // Adding two DISK based data node to the cluster.
    // Also, ensure that blocks are pinned in these new data nodes.
    StorageType[][] newtypes = new StorageType[][] { { StorageType.DISK }, { StorageType.DISK } };
    startAdditionalDNs(conf, 2, newtypes, cluster);
    ArrayList<DataNode> dataNodes = cluster.getDataNodes();
    InetSocketAddress[] favoredNodes = new InetSocketAddress[2];
    int j = 0;
    for (int i = dataNodes.size() - 1; i >= 2; i--) {
        favoredNodes[j++] = dataNodes.get(i).getXferAddress();
    }
    final String file = "/parent/testMoverFailedRetryWithPinnedBlocks2";
    final FSDataOutputStream out = dfs.create(new Path(file), FsPermission.getDefault(), true, DEFAULT_BLOCK_SIZE, (short) 2, DEFAULT_BLOCK_SIZE, null, favoredNodes);
    byte[] fileData = StripedFileTestUtil.generateBytes(DEFAULT_BLOCK_SIZE * 2);
    out.write(fileData);
    out.close();
    // Mock FsDatasetSpi#getPinning to show that the block is pinned.
    LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file, 0);
    Assert.assertEquals("Wrong block count", 2, locatedBlocks.locatedBlockCount());
    LocatedBlock lb = locatedBlocks.get(0);
    DatanodeInfo datanodeInfo = lb.getLocations()[0];
    for (DataNode dn : cluster.getDataNodes()) {
        if (dn.getDatanodeId().getDatanodeUuid().equals(datanodeInfo.getDatanodeUuid())) {
            LOG.info("Simulate block pinning in datanode {}", datanodeInfo);
            DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
            break;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) InetSocketAddress(java.net.InetSocketAddress) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream)

Example 49 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestStorageMover method waitForAllReplicas.

private void waitForAllReplicas(int expectedReplicaNum, Path file, DistributedFileSystem dfs, int retryCount) throws Exception {
    LOG.info("Waiting for replicas count " + expectedReplicaNum + ", file name: " + file);
    for (int i = 0; i < retryCount; i++) {
        LocatedBlocks lbs = dfs.getClient().getLocatedBlocks(file.toString(), 0, BLOCK_SIZE);
        LocatedBlock lb = lbs.get(0);
        if (lb.getLocations().length >= expectedReplicaNum) {
            return;
        } else {
            Thread.sleep(1000);
        }
    }
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 50 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestAddOverReplicatedStripedBlocks method testProcessOverReplicatedSBSmallerThanFullBlocks.

@Test
public void testProcessOverReplicatedSBSmallerThanFullBlocks() throws Exception {
    // Create a EC file which doesn't fill full internal blocks.
    int fileLen = cellSize * (dataBlocks - 1);
    byte[] content = new byte[fileLen];
    DFSTestUtil.writeFile(fs, filePath, new String(content));
    LocatedBlocks lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
    long gs = bg.getBlock().getGenerationStamp();
    String bpid = bg.getBlock().getBlockPoolId();
    long groupId = bg.getBlock().getBlockId();
    Block blk = new Block(groupId, blockSize, gs);
    cluster.triggerBlockReports();
    List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
    // let a internal block be over replicated with (numDNs - GROUP_SIZE + 1)
    // redundant blocks. Therefor number of internal blocks is over GROUP_SIZE.
    blk.setBlockId(groupId);
    List<DataNode> dataNodeList = cluster.getDataNodes();
    for (int i = 0; i < numDNs; i++) {
        if (!infos.contains(dataNodeList.get(i).getDatanodeId())) {
            cluster.injectBlocks(i, Arrays.asList(blk), bpid);
            System.out.println("XXX: inject block into datanode " + i);
        }
    }
    // update blocksMap
    cluster.triggerBlockReports();
    // add to invalidates
    cluster.triggerHeartbeats();
    // datanode delete block
    cluster.triggerHeartbeats();
    // update blocksMap
    cluster.triggerBlockReports();
    // verify that all internal blocks exists
    lbs = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), 0, fileLen);
    StripedFileTestUtil.verifyLocatedStripedBlocks(lbs, groupSize - 1);
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) Test(org.junit.Test)

Aggregations

LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)118 Test (org.junit.Test)67 Path (org.apache.hadoop.fs.Path)65 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)52 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)33 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)32 Configuration (org.apache.hadoop.conf.Configuration)29 IOException (java.io.IOException)20 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)20 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)20 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)18 FileSystem (org.apache.hadoop.fs.FileSystem)17 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)17 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)13 Block (org.apache.hadoop.hdfs.protocol.Block)11 InetSocketAddress (java.net.InetSocketAddress)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)9 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7