Search in sources :

Example 86 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class StripedFileTestUtil method waitForAllReconstructionFinished.

/**
   * Wait for the reconstruction to be finished when the file has
   * corrupted blocks. The function can take care file with any length.
   */
public static void waitForAllReconstructionFinished(Path file, DistributedFileSystem fs, long expectedBlocks) throws Exception {
    LOG.info("Waiting for reconstruction to be finished for the file:" + file + ", expectedBlocks:" + expectedBlocks);
    final int attempts = 60;
    for (int i = 0; i < attempts; i++) {
        int totalBlocks = 0;
        LocatedBlocks locatedBlocks = getLocatedBlocks(file, fs);
        for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
            DatanodeInfo[] storageInfos = locatedBlock.getLocations();
            totalBlocks += storageInfos.length;
        }
        if (totalBlocks >= expectedBlocks) {
            return;
        }
        Thread.sleep(1000);
    }
    throw new IOException("Time out waiting for EC block reconstruction.");
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) IOException(java.io.IOException)

Example 87 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestClientReportBadBlock method createAFileWithCorruptedBlockReplicas.

/**
   * Create a file with one block and corrupt some/all of the block replicas.
   */
private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl, int corruptBlockCount) throws IOException, AccessControlException, FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
    DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
    DFSTestUtil.waitReplication(dfs, filePath, repl);
    // Locate the file blocks by asking name node
    final LocatedBlocks locatedblocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toString(), 0L, BLOCK_SIZE);
    Assert.assertEquals(repl, locatedblocks.get(0).getLocations().length);
    // The file only has one block
    LocatedBlock lblock = locatedblocks.get(0);
    DatanodeInfo[] datanodeinfos = lblock.getLocations();
    ExtendedBlock block = lblock.getBlock();
    // corrupt some /all of the block replicas
    for (int i = 0; i < corruptBlockCount; i++) {
        DatanodeInfo dninfo = datanodeinfos[i];
        final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
        cluster.corruptReplica(dn, block);
        LOG.debug("Corrupted block " + block.getBlockName() + " on data node " + dninfo);
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 88 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestClientReportBadBlock method verifyCorruptedBlockCount.

/**
   * Verify the number of corrupted block replicas by fetching the block
   * location from name node.
   */
private void verifyCorruptedBlockCount(Path filePath, int expectedReplicas) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
    final LocatedBlocks lBlocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
    // we expect only the first block of the file is used for this test
    LocatedBlock firstLocatedBlock = lBlocks.get(0);
    Assert.assertEquals(expectedReplicas, firstLocatedBlock.getLocations().length);
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 89 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestClientReportBadBlock method verifyFirstBlockCorrupted.

/**
   * Verify the first block of the file is corrupted (for all its replica).
   */
private void verifyFirstBlockCorrupted(Path filePath, boolean isCorrupted) throws AccessControlException, FileNotFoundException, UnresolvedLinkException, IOException {
    final LocatedBlocks locatedBlocks = dfs.dfs.getNamenode().getBlockLocations(filePath.toUri().getPath(), 0, Long.MAX_VALUE);
    final LocatedBlock firstLocatedBlock = locatedBlocks.get(0);
    Assert.assertEquals(isCorrupted, firstLocatedBlock.isCorrupt());
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock)

Example 90 with LocatedBlock

use of org.apache.hadoop.hdfs.protocol.LocatedBlock in project hadoop by apache.

the class TestDFSClientRetries method testClientDNProtocolTimeout.

/** Test that timeout occurs when DN does not respond to RPC.
   * Start up a server and ask it to sleep for n seconds. Make an
   * RPC to the server and set rpcTimeout to less than n and ensure
   * that socketTimeoutException is obtained
   */
@Test
public void testClientDNProtocolTimeout() throws IOException {
    final Server server = new TestServer(1, true);
    server.start();
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);
    DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
    ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
    LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
    ClientDatanodeProtocol proxy = null;
    try {
        proxy = DFSUtilClient.createClientDatanodeProtocolProxy(fakeDnId, conf, 500, false, fakeBlock);
        proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
        fail("Did not get expected exception: SocketTimeoutException");
    } catch (SocketTimeoutException e) {
        LOG.info("Got the expected Exception: SocketTimeoutException");
    } finally {
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        server.stop();
    }
}
Also used : DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) SocketTimeoutException(java.net.SocketTimeoutException) Server(org.apache.hadoop.ipc.Server) InetSocketAddress(java.net.InetSocketAddress) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) Test(org.junit.Test)

Aggregations

LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)206 Test (org.junit.Test)94 Path (org.apache.hadoop.fs.Path)86 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)78 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)52 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)51 Configuration (org.apache.hadoop.conf.Configuration)43 IOException (java.io.IOException)36 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)33 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)33 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)25 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)25 ArrayList (java.util.ArrayList)24 StorageType (org.apache.hadoop.fs.StorageType)24 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)24 Block (org.apache.hadoop.hdfs.protocol.Block)16 FileSystem (org.apache.hadoop.fs.FileSystem)15 InetSocketAddress (java.net.InetSocketAddress)11 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)10