Search in sources :

Example 6 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestClientReportBadBlock method createAFileWithCorruptedBlockReplicas.

/**
   * Create a file with one block and corrupt some/all of the block replicas.
   */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
    DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
    DFSTestUtil.waitReplication(dfs, filePath, repl);
    // Locate the file blocks by asking name node
    final LocatedBlocks locatedblocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
    Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
    // The file only has one block
    LocatedBlock lblock = locatedblocks.get(0);
    DatanodeInfo[] datanodeinfos = lblock.getLocations();
    ExtendedBlock block = lblock.getBlock();
    // corrupt some /all of the block replicas
    for (int i = 0; i < corruptBlockCount; i++) {
        DatanodeInfo dninfo = datanodeinfos[i];
        final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
        cluster.corruptReplica(dn, block);
        LOG.debug("Corrupted block " + block.getBlockName() + " on data node " + dninfo);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 7 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestClientReportBadBlock method verifyCorruptedBlockCount.

/**
   * Verify the number of corrupted block replicas by fetching the block
   * location from name node.
   */
private void verifyCorruptedBlockCount(Path filePath, int expectedReplicas) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
    final LocatedBlocks lBlocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
    // we expect only the first block of the file is used for this test
    LocatedBlock firstLocatedBlock = lBlocks.get(0);
    Assert.assertEquals(expectedReplicas, firstLocatedBlock.getLocations().length);
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 8 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestClientReportBadBlock method verifyFirstBlockCorrupted.

/**
   * Verify the first block of the file is corrupted (for all its replica).
   */
private void verifyFirstBlockCorrupted(Path filePath, boolean isCorrupted) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
    final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
    final LocatedBlock firstLocatedBlock = locatedBlocks.get(0);
    Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 9 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestDFSClientRetries method testClientDNProtocolTimeout.

/** Test that timeout occurs when DN does not respond to RPC.
   * Start up a server and ask it to sleep for n seconds. Make an
   * RPC to the server and set rpcTimeout to less than n and ensure
   * that socketTimeoutException is obtained
   */
@Test
public void testClientDNProtocolTimeout() throws IOException {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    ClientDatanodeProtocol proxy = null;
    try {
        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 500, false, fakeBlock);
        proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
        fail("Did not get expected exception: SocketTimeoutException");
    } catch (SocketTimeoutException e) {
        LOG.info("Got the expected Exception: SocketTimeoutException");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) SocketTimeoutException(java.net.SocketTimeoutException) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) Test(org.junit.Test)

Example 10 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestReplication method waitForBlockReplication.

private void waitForBlockReplication(String filename, ClientProtocol namenode, int expected, long maxWaitSec, boolean isUnderConstruction, boolean noOverReplication) throws IOException {
    long start = Time.monotonicNow();
    //wait for all the blocks to be replicated;
    LOG.info("Checking for block replication for " + filename);
    while (true) {
        boolean replOk = true;
        LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
        for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator(); iter.hasNext(); ) {
            LocatedBlock block = iter.next();
            if (isUnderConstruction && !iter.hasNext()) {
                // do not check the last block
                break;
            }
            int actual = block.getLocations().length;
            if (noOverReplication) {
                assertTrue(actual <= expected);
            }
            if (actual < expected) {
                LOG.info("Not enough replicas for " + block.getBlock() + " yet. Expecting " + expected + ", got " + actual + ".");
                replOk = false;
                break;
            }
        }
        if (replOk) {
            return;
        }
        if (maxWaitSec > 0 && (Time.monotonicNow() - start) > (maxWaitSec * 1000)) {
            throw new IOException("Timedout while waiting for all blocks to " + " be replicated for " + filename);
        }
        try {
            Thread.sleep(500);
        } catch (InterruptedException ignored) {
        }
    }
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Aggregations

LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)196 Test (org.junit.Test)92 Path (org.apache.hadoop.fs.Path)86 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)72 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)49 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)49 Configuration (org.apache.hadoop.conf.Configuration)40 IOException (java.io.IOException)34 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)33 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)25 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)25 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)24 StorageType (org.apache.hadoop.fs.StorageType)23 ArrayList (java.util.ArrayList)22 Block (org.apache.hadoop.hdfs.protocol.Block)16 FileSystem (org.apache.hadoop.fs.FileSystem)15 InetSocketAddress (java.net.InetSocketAddress)11 File (java.io.File)9