Search in sources :

Example 1 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestDecommissionWithStriped method prepareBlockIndexAndTokenList.

private void prepareBlockIndexAndTokenList(List<LocatedBlock> lbs, List<HashMap<DatanodeInfo, Byte>> locToIndexList, List<HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>> locToTokenList) {
    for (LocatedBlock lb : lbs) {
        HashMap<DatanodeInfo, Byte> locToIndex = new HashMap<DatanodeInfo, Byte>();
        locToIndexList.add(locToIndex);
        HashMap<DatanodeInfo, Token<BlockTokenIdentifier>> locToToken = new HashMap<DatanodeInfo, Token<BlockTokenIdentifier>>();
        locToTokenList.add(locToToken);
        DatanodeInfo[] di = lb.getLocations();
        LocatedStripedBlock stripedBlk = (LocatedStripedBlock) lb;
        for (int i = 0; i < di.length; i++) {
            locToIndex.put(di[i], stripedBlk.getBlockIndices()[i]);
            locToToken.put(di[i], stripedBlk.getBlockTokens()[i]);
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) BlockTokenIdentifier(org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier) HashMap(java.util.HashMap) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Token(org.apache.hadoop.security.token.Token)

Example 2 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestEncryptedTransfer method testEncryptedAppendRequiringBlockTransfer.

@Test
public void testEncryptedAppendRequiringBlockTransfer() throws IOException {
    setEncryptionConfigKeys();
    // start up 4 DNs
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = getFileSystem(conf);
    // Create a file with replication 3, so its block is on 3 / 4 DNs.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
    // Shut down one of the DNs holding a block replica.
    FSDataInputStream in = fs.open(TEST_PATH);
    List<LocatedBlock> locatedBlocks = DFSTestUtil.getAllBlocks(in);
    in.close();
    assertEquals(1, locatedBlocks.size());
    assertEquals(3, locatedBlocks.get(0).getLocations().length);
    DataNode dn = cluster.getDataNode(locatedBlocks.get(0).getLocations()[0].getIpcPort());
    dn.shutdown();
    // Reopen the file for append, which will need to add another DN to the
    // pipeline and in doing so trigger a block transfer.
    writeTestDataToFile(fs);
    assertEquals(PLAIN_TEXT + PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH));
}
Also used : DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Test(org.junit.Test)

Example 3 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestDecommission method checkFile.

/**
   * Verify that the number of replicas are as expected for each block in
   * the given file.
   * For blocks with a decommissioned node, verify that their replication
   * is 1 more than what is specified.
   * For blocks without decommissioned nodes, verify their replication is
   * equal to what is specified.
   * 
   * @param downnode - if null, there is no decommissioned node for this file.
   * @return - null if no failure found, else an error message string.
   */
private static String checkFile(FileSystem fileSys, Path name, int repl, String downnode, int numDatanodes) throws IOException {
    boolean isNodeDown = (downnode != null);
    // need a raw stream
    assertTrue("Not HDFS:" + fileSys.getUri(), fileSys instanceof DistributedFileSystem);
    HdfsDataInputStream dis = (HdfsDataInputStream) fileSys.open(name);
    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
    for (LocatedBlock blk : dinfo) {
        // for each block
        int hasdown = 0;
        DatanodeInfo[] nodes = blk.getLocations();
        for (int j = 0; j < nodes.length; j++) {
            // for each replica
            if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
                hasdown++;
                //Downnode must actually be decommissioned
                if (!nodes[j].isDecommissioned()) {
                    return "For block " + blk.getBlock() + " replica on " + nodes[j] + " is given as downnode, " + "but is not decommissioned";
                }
                //Decommissioned node (if any) should only be last node in list.
                if (j != nodes.length - 1) {
                    return "For block " + blk.getBlock() + " decommissioned node " + nodes[j] + " was not last node in list: " + (j + 1) + " of " + nodes.length;
                }
                LOG.info("Block " + blk.getBlock() + " replica on " + nodes[j] + " is decommissioned.");
            } else {
                //Non-downnodes must not be decommissioned
                if (nodes[j].isDecommissioned()) {
                    return "For block " + blk.getBlock() + " replica on " + nodes[j] + " is unexpectedly decommissioned";
                }
            }
        }
        LOG.info("Block " + blk.getBlock() + " has " + hasdown + " decommissioned replica.");
        if (Math.min(numDatanodes, repl + hasdown) != nodes.length) {
            return "Wrong number of replicas for block " + blk.getBlock() + ": " + nodes.length + ", expected " + Math.min(numDatanodes, repl + hasdown);
        }
    }
    return null;
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsDataInputStream(org.apache.hadoop.hdfs.client.HdfsDataInputStream)

Example 4 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestBlockRecovery method testRaceBetweenReplicaRecoveryAndFinalizeBlock.

/**
   * Test to verify the race between finalizeBlock and Lease recovery
   * 
   * @throws Exception
   */
@Test(timeout = 20000)
public void testRaceBetweenReplicaRecoveryAndFinalizeBlock() throws Exception {
    // Stop the Mocked DN started in startup()
    tearDown();
    Configuration conf = new HdfsConfiguration();
    conf.set(DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY, "1000");
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    try {
        cluster.waitClusterUp();
        DistributedFileSystem fs = cluster.getFileSystem();
        Path path = new Path("/test");
        FSDataOutputStream out = fs.create(path);
        out.writeBytes("data");
        out.hsync();
        List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs.open(path));
        final LocatedBlock block = blocks.get(0);
        final DataNode dataNode = cluster.getDataNodes().get(0);
        final AtomicBoolean recoveryInitResult = new AtomicBoolean(true);
        Thread recoveryThread = new Thread() {

            @Override
            public void run() {
                try {
                    DatanodeInfo[] locations = block.getLocations();
                    final RecoveringBlock recoveringBlock = new RecoveringBlock(block.getBlock(), locations, block.getBlock().getGenerationStamp() + 1);
                    try (AutoCloseableLock lock = dataNode.data.acquireDatasetLock()) {
                        Thread.sleep(2000);
                        dataNode.initReplicaRecovery(recoveringBlock);
                    }
                } catch (Exception e) {
                    recoveryInitResult.set(false);
                }
            }
        };
        recoveryThread.start();
        try {
            out.close();
        } catch (IOException e) {
            Assert.assertTrue("Writing should fail", e.getMessage().contains("are bad. Aborting..."));
        } finally {
            recoveryThread.join();
        }
        Assert.assertTrue("Recovery should be initiated successfully", recoveryInitResult.get());
        dataNode.updateReplicaUnderRecovery(block.getBlock(), block.getBlock().getGenerationStamp() + 1, block.getBlock().getBlockId(), block.getBlockSize());
    } finally {
        if (null != cluster) {
            cluster.shutdown();
            cluster = null;
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeInfoBuilder(org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URISyntaxException(java.net.URISyntaxException) TimeoutException(java.util.concurrent.TimeoutException) RecoveryInProgressException(org.apache.hadoop.hdfs.protocol.RecoveryInProgressException) IOException(java.io.IOException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) AutoCloseableLock(org.apache.hadoop.util.AutoCloseableLock) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 5 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestBlockReplacement method testBlockMoveAcrossStorageInSameNode.

@Test
public void testBlockMoveAcrossStorageInSameNode() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // create only one datanode in the cluster to verify movement within
    // datanode.
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
        DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
        LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
        // get the current 
        LocatedBlock locatedBlock = locatedBlocks.get(0);
        ExtendedBlock block = locatedBlock.getBlock();
        DatanodeInfo[] locations = locatedBlock.getLocations();
        assertEquals(1, locations.length);
        StorageType[] storageTypes = locatedBlock.getStorageTypes();
        // current block should be written to DISK
        assertTrue(storageTypes[0] == StorageType.DISK);
        DatanodeInfo source = locations[0];
        // move block to ARCHIVE by using same DataNodeInfo for source, proxy and
        // destination so that movement happens within datanode 
        assertTrue(replaceBlock(block, source, source, source, StorageType.ARCHIVE, Status.SUCCESS));
        // wait till namenode notified
        Thread.sleep(3000);
        locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
        // get the current 
        locatedBlock = locatedBlocks.get(0);
        assertEquals("Storage should be only one", 1, locatedBlock.getLocations().length);
        assertTrue("Block should be moved to ARCHIVE", locatedBlock.getStorageTypes()[0] == StorageType.ARCHIVE);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Aggregations

LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)206 Test (org.junit.Test)94 Path (org.apache.hadoop.fs.Path)86 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)78 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)52 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)51 Configuration (org.apache.hadoop.conf.Configuration)43 IOException (java.io.IOException)36 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)33 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)25 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)25 ArrayList (java.util.ArrayList)24 StorageType (org.apache.hadoop.fs.StorageType)24 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)24 Block (org.apache.hadoop.hdfs.protocol.Block)16 FileSystem (org.apache.hadoop.fs.FileSystem)15 InetSocketAddress (java.net.InetSocketAddress)11 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)10