Search in sources :

Example 81 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestBlockReplacement method checkBlocks.

/* check if file's blocks have expected number of replicas,
   * and exist at all of includeNodes
   */
private void checkBlocks(DatanodeInfo[] includeNodes, String fileName, long fileLen, short replFactor, DFSClient client) throws IOException, TimeoutException {
    boolean notDone;
    final long TIMEOUT = 20000L;
    long starttime = Time.monotonicNow();
    long failtime = starttime + TIMEOUT;
    do {
        try {
            Thread.sleep(100);
        } catch (InterruptedException e) {
        }
        List<LocatedBlock> blocks = client.getNamenode().getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
        assertEquals(1, blocks.size());
        DatanodeInfo[] nodes = blocks.get(0).getLocations();
        notDone = (nodes.length != replFactor);
        if (notDone) {
            LOG.info("Expected replication factor is " + replFactor + " but the real replication factor is " + nodes.length);
        } else {
            List<DatanodeInfo> nodeLocations = Arrays.asList(nodes);
            for (DatanodeInfo node : includeNodes) {
                if (!nodeLocations.contains(node)) {
                    notDone = true;
                    LOG.info("Block is not located at " + node);
                    break;
                }
            }
        }
        if (Time.monotonicNow() > failtime) {
            String expectedNodesList = "";
            String currentNodesList = "";
            for (DatanodeInfo dn : includeNodes) expectedNodesList += dn + ", ";
            for (DatanodeInfo dn : nodes) currentNodesList += dn + ", ";
            LOG.info("Expected replica nodes are: " + expectedNodesList);
            LOG.info("Current actual replica nodes are: " + currentNodesList);
            throw new TimeoutException("Did not achieve expected replication to expected nodes " + "after more than " + TIMEOUT + " msec.  See logs for details.");
        }
    } while (notDone);
    LOG.info("Achieved expected replication values in " + (Time.now() - starttime) + " msec.");
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) TimeoutException(java.util.concurrent.TimeoutException)

Example 82 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestBlockReplacement method testBlockReplacementWithPinnedBlocks.

/**
   * Test to verify that the copying of pinned block to a different destination
   * datanode will throw IOException with error code Status.ERROR_BLOCK_PINNED.
   *
   */
@Test(timeout = 90000)
public void testBlockReplacementWithPinnedBlocks() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // create only one datanode in the cluster with DISK and ARCHIVE storage
    // types.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        String fileName = "/testBlockReplacementWithPinnedBlocks/file";
        final Path file = new Path(fileName);
        DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
        LocatedBlock lb = dfs.getClient().getLocatedBlocks(fileName, 0).get(0);
        DatanodeInfo[] oldNodes = lb.getLocations();
        assertEquals("Wrong block locations", oldNodes.length, 1);
        DatanodeInfo source = oldNodes[0];
        ExtendedBlock b = lb.getBlock();
        DatanodeInfo[] datanodes = dfs.getDataNodeStats();
        DatanodeInfo destin = null;
        for (DatanodeInfo datanodeInfo : datanodes) {
            // choose different destination node
            if (!oldNodes[0].equals(datanodeInfo)) {
                destin = datanodeInfo;
                break;
            }
        }
        assertNotNull("Failed to choose destination datanode!", destin);
        assertFalse("Source and destin datanode should be different", source.equals(destin));
        // Mock FsDatasetSpi#getPinning to show that the block is pinned.
        for (int i = 0; i < cluster.getDataNodes().size(); i++) {
            DataNode dn = cluster.getDataNodes().get(i);
            LOG.info("Simulate block pinning in datanode " + dn);
            DataNodeTestUtils.mockDatanodeBlkPinning(dn, true);
        }
        // Block movement to a different datanode should fail as the block is
        // pinned.
        assertTrue("Status code mismatches!", replaceBlock(b, source, source, destin, StorageType.ARCHIVE, Status.ERROR_BLOCK_PINNED));
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 83 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestBlockRecovery method testSyncReplicas.

/** Sync two replicas */
private void testSyncReplicas(ReplicaRecoveryInfo replica1, ReplicaRecoveryInfo replica2, InterDatanodeProtocol dn1, InterDatanodeProtocol dn2, long expectLen) throws IOException {
    DatanodeInfo[] locs = new DatanodeInfo[] { mock(DatanodeInfo.class), mock(DatanodeInfo.class) };
    RecoveringBlock rBlock = new RecoveringBlock(block, locs, RECOVERY_ID);
    ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
    BlockRecord record1 = new BlockRecord(DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
    BlockRecord record2 = new BlockRecord(DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
    syncList.add(record1);
    syncList.add(record2);
    when(dn1.updateReplicaUnderRecovery((ExtendedBlock) anyObject(), anyLong(), anyLong(), anyLong())).thenReturn("storage1");
    when(dn2.updateReplicaUnderRecovery((ExtendedBlock) anyObject(), anyLong(), anyLong(), anyLong())).thenReturn("storage2");
    BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
    RecoveryTaskContiguous.syncBlock(syncList);
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) BlockRecord(org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.BlockRecord) ArrayList(java.util.ArrayList)

Example 84 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestSortLocatedStripedBlock method prepareBlockIndexAndTokenList.

private void prepareBlockIndexAndTokenList(List<LocatedBlock> lbs, List<HashMap<DatanodeInfo, Byte>> locToIndexList, List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
    for (LocatedBlock lb : lbs) {
        HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<DatanodeInfo, Byte>();
        locToIndexList.add(locToIndex);
        HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken = new HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>();
        locToTokenList.add(locToToken);
        DatanodeInfo[] di = lb.getLocations();
        LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
        for (int i = 0; i < di.length; i++) {
            locToIndex.put(di[i], stripedBlk.getBlockIndices()[i]);
            locToToken.put(di[i], stripedBlk.getBlockTokens()[i]);
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) HashMap(java.util.HashMap) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Token(org.apache.hadoop.security.token.Token)

Example 85 with DatanodeInfo

use of org.apache.hadoop.hdfs.protocol.DatanodeInfo in project hadoop by apache.

the class TestSortLocatedStripedBlock method testWithMultipleInServiceAndDecommnDatanodes.

/**
   * Test to verify sorting with multiple in-service and decommissioned
   * datanodes exists in storage lists.
   *
   * We have storage list, marked decommissioned internal blocks with a '
   * d0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13
   * mapping to indices
   * 0', 1', 2, 3, 4, 5, 6, 7', 8', 0, 1, 7, 8, 1
   *
   * Decommissioned node indices: 0', 1', 7', 8'
   *
   * Additional In-Service node d13 at the end, block index: 1
   *
   * So in the original list nodes d0, d1, d7, d8 are decommissioned state.
   *
   * After sorting the expected block indices list will be,
   * 0, 1, 2, 3, 4, 5, 6, 7, 8, 1, 0', 1', 7', 8'
   *
   * After sorting the expected storage list will be,
   * d9, d10, d2, d3, d4, d5, d6, d11, d12, d13, d0, d1, d7, d8.
   *
   * Note: after sorting block indices will not be in ascending order.
   */
@Test(timeout = 10000)
public void testWithMultipleInServiceAndDecommnDatanodes() {
    LOG.info("Starting test testWithMultipleInServiceAndDecommnDatanodes");
    // two located block groups
    int lbsCount = 2;
    List<Integer> decommnNodeIndices = new ArrayList<>();
    decommnNodeIndices.add(0);
    decommnNodeIndices.add(1);
    decommnNodeIndices.add(7);
    decommnNodeIndices.add(8);
    List<Integer> targetNodeIndices = new ArrayList<>();
    targetNodeIndices.addAll(decommnNodeIndices);
    // at the end add an additional In-Service node to blockIndex=1
    targetNodeIndices.add(1);
    // map contains decommissioned node details in each located strip block
    // which will be used for assertions
    HashMap<Integer, List<String>> decommissionedNodes = new HashMap<>(lbsCount * decommnNodeIndices.size());
    List<LocatedBlock> lbs = createLocatedStripedBlocks(lbsCount, dataBlocks, parityBlocks, decommnNodeIndices, targetNodeIndices, decommissionedNodes);
    List<DatanodeInfo> staleDns = new ArrayList<>();
    for (LocatedBlock lb : lbs) {
        DatanodeInfo[] locations = lb.getLocations();
        DatanodeInfo staleDn = locations[locations.length - 1];
        staleDn.setLastUpdateMonotonic(Time.monotonicNow() - (STALE_INTERVAL * 2));
        staleDns.add(staleDn);
    }
    // prepare expected block index and token list.
    List<HashMap<DatanodeInfo, Byte>> locToIndexList = new ArrayList<>();
    List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList = new ArrayList<>();
    prepareBlockIndexAndTokenList(lbs, locToIndexList, locToTokenList);
    dm.sortLocatedBlocks(null, lbs);
    assertDecommnNodePosition(groupSize + 1, decommissionedNodes, lbs);
    assertBlockIndexAndTokenPosition(lbs, locToIndexList, locToTokenList);
    for (LocatedBlock lb : lbs) {
        byte[] blockIndices = ((LocatedStripedBlock) lb).getBlockIndices();
        // after sorting stale block index will be placed after normal nodes.
        Assert.assertEquals("Failed to move stale node to bottom!", 1, blockIndices[9]);
        DatanodeInfo[] locations = lb.getLocations();
        // After sorting stale node d13 will be placed after normal nodes
        Assert.assertEquals("Failed to move stale dn after normal one!", staleDns.remove(0), locations[9]);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) ArrayList(java.util.ArrayList) List(java.util.List) Test(org.junit.Test)

Aggregations

DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)214 Test (org.junit.Test)103 Path (org.apache.hadoop.fs.Path)91 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)73 IOException (java.io.IOException)47 FileSystem (org.apache.hadoop.fs.FileSystem)44 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)43 ArrayList (java.util.ArrayList)39 Configuration (org.apache.hadoop.conf.Configuration)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)37 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)32 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)32 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)29 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)27 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)25 InetSocketAddress (java.net.InetSocketAddress)20 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)20 StorageType (org.apache.hadoop.fs.StorageType)18 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)14 DatanodeInfoBuilder (org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder)14