Search in sources :

Example 41 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class NamenodeFsck method getBlockLocations.

private LocatedBlocks getBlockLocations(String path, HdfsFileStatus file) throws IOException {
    long fileLen = file.getLen();
    LocatedBlocks blocks = null;
    final FSNamesystem fsn = namenode.getNamesystem();
    fsn.readLock();
    try {
        blocks = FSDirStatAndListingOp.getBlockLocations(fsn.getFSDirectory(), fsn.getPermissionChecker(), path, 0, fileLen, false).blocks;
    } catch (FileNotFoundException fnfe) {
        blocks = null;
    } finally {
        fsn.readUnlock("fsckGetBlockLocations");
    }
    return blocks;
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FileNotFoundException(java.io.FileNotFoundException)

Example 42 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDecommission method testDecommissionWithOpenfile.

@Test(timeout = 120000)
public void testDecommissionWithOpenfile() throws IOException, InterruptedException {
    LOG.info("Starting test testDecommissionWithOpenfile");
    //At most 4 nodes will be decommissioned
    startCluster(1, 7);
    FileSystem fileSys = getCluster().getFileSystem(0);
    FSNamesystem ns = getCluster().getNamesystem(0);
    String openFile = "/testDecommissionWithOpenfile.dat";
    writeFile(fileSys, new Path(openFile), (short) 3);
    // make sure the file was open for write
    FSDataOutputStream fdos = fileSys.append(new Path(openFile));
    LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(getCluster().getNameNode(0), openFile, 0, fileSize);
    DatanodeInfo[] dnInfos4LastBlock = lbs.getLastLocatedBlock().getLocations();
    DatanodeInfo[] dnInfos4FirstBlock = lbs.get(0).getLocations();
    ArrayList<String> nodes = new ArrayList<String>();
    ArrayList<DatanodeInfo> dnInfos = new ArrayList<DatanodeInfo>();
    DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
    for (DatanodeInfo datanodeInfo : dnInfos4FirstBlock) {
        DatanodeInfo found = datanodeInfo;
        for (DatanodeInfo dif : dnInfos4LastBlock) {
            if (datanodeInfo.equals(dif)) {
                found = null;
            }
        }
        if (found != null) {
            nodes.add(found.getXferAddr());
            dnInfos.add(dm.getDatanode(found));
        }
    }
    //decommission one of the 3 nodes which have last block
    nodes.add(dnInfos4LastBlock[0].getXferAddr());
    dnInfos.add(dm.getDatanode(dnInfos4LastBlock[0]));
    initExcludeHosts(nodes);
    refreshNodes(0);
    for (DatanodeInfo dn : dnInfos) {
        waitNodeState(dn, AdminStates.DECOMMISSIONED);
    }
    fdos.close();
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeManager(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) FileSystem(org.apache.hadoop.fs.FileSystem) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ArrayList(java.util.ArrayList) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) Test(org.junit.Test)

Example 43 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestBlockReplacement method testBlockMoveAcrossStorageInSameNode.

@Test
public void testBlockMoveAcrossStorageInSameNode() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    // create only one datanode in the cluster to verify movement within
    // datanode.
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).storageTypes(new StorageType[] { StorageType.DISK, StorageType.ARCHIVE }).build();
    try {
        cluster.waitActive();
        final DistributedFileSystem dfs = cluster.getFileSystem();
        final Path file = new Path("/testBlockMoveAcrossStorageInSameNode/file");
        DFSTestUtil.createFile(dfs, file, 1024, (short) 1, 1024);
        LocatedBlocks locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
        // get the current 
        LocatedBlock locatedBlock = locatedBlocks.get(0);
        ExtendedBlock block = locatedBlock.getBlock();
        DatanodeInfo[] locations = locatedBlock.getLocations();
        assertEquals(1, locations.length);
        StorageType[] storageTypes = locatedBlock.getStorageTypes();
        // current block should be written to DISK
        assertTrue(storageTypes[0] == StorageType.DISK);
        DatanodeInfo source = locations[0];
        // move block to ARCHIVE by using same DataNodeInfo for source, proxy and
        // destination so that movement happens within datanode 
        assertTrue(replaceBlock(block, source, source, source, StorageType.ARCHIVE, Status.SUCCESS));
        // wait till namenode notified
        Thread.sleep(3000);
        locatedBlocks = dfs.getClient().getLocatedBlocks(file.toString(), 0);
        // get the current 
        locatedBlock = locatedBlocks.get(0);
        assertEquals("Storage should be only one", 1, locatedBlock.getLocations().length);
        assertTrue("Block should be moved to ARCHIVE", locatedBlock.getStorageTypes()[0] == StorageType.ARCHIVE);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) StorageType(org.apache.hadoop.fs.StorageType) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) Test(org.junit.Test)

Example 44 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestFileTruncate method checkBlockRecovery.

public static void checkBlockRecovery(Path p, DistributedFileSystem dfs, int attempts, long sleepMs) throws IOException {
    boolean success = false;
    for (int i = 0; i < attempts; i++) {
        LocatedBlocks blocks = getLocatedBlocks(p, dfs);
        boolean noLastBlock = blocks.getLastLocatedBlock() == null;
        if (!blocks.isUnderConstruction() && (noLastBlock || blocks.isLastBlockComplete())) {
            success = true;
            break;
        }
        try {
            Thread.sleep(sleepMs);
        } catch (InterruptedException ignored) {
        }
    }
    assertThat("inode should complete in ~" + sleepMs * attempts + " ms.", success, is(true));
}
Also used : LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks)

Example 45 with LocatedBlocks

use of org.apache.hadoop.hdfs.protocol.LocatedBlocks in project hadoop by apache.

the class TestDFSClientRetries method testIdempotentAllocateBlockAndClose.

/**
   * Test that getAdditionalBlock() and close() are idempotent. This allows
   * a client to safely retry a call and still produce a correct
   * file. See HDFS-3031.
   */
@Test
public void testIdempotentAllocateBlockAndClose() throws Exception {
    final String src = "/testIdempotentAllocateBlock";
    Path file = new Path(src);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
        NamenodeProtocols spyNN = spy(preSpyNN);
        DFSClient client = new DFSClient(null, spyNN, conf, null);
        // Make the call to addBlock() get called twice, as if it were retried
        // due to an IPC issue.
        doAnswer(new Answer<LocatedBlock>() {

            private int getBlockCount(LocatedBlock ret) throws IOException {
                LocatedBlocks lb = cluster.getNameNodeRpc().getBlockLocations(src, 0, Long.MAX_VALUE);
                assertEquals(lb.getLastLocatedBlock().getBlock(), ret.getBlock());
                return lb.getLocatedBlocks().size();
            }

            @Override
            public LocatedBlock answer(InvocationOnMock invocation) throws Throwable {
                LOG.info("Called addBlock: " + Arrays.toString(invocation.getArguments()));
                // call first time
                // warp NotReplicatedYetException with RemoteException as rpc does.
                final LocatedBlock ret;
                try {
                    ret = (LocatedBlock) invocation.callRealMethod();
                } catch (NotReplicatedYetException e) {
                    throw new RemoteException(e.getClass().getName(), e.getMessage());
                }
                final int blockCount = getBlockCount(ret);
                // Retrying should result in a new block at the end of the file.
                // (abandoning the old one)
                // It should not have NotReplicatedYetException.
                final LocatedBlock ret2;
                try {
                    ret2 = (LocatedBlock) invocation.callRealMethod();
                } catch (NotReplicatedYetException e) {
                    throw new AssertionError("Unexpected exception", e);
                }
                final int blockCount2 = getBlockCount(ret2);
                // We shouldn't have gained an extra block by the RPC.
                assertEquals(blockCount, blockCount2);
                return ret2;
            }
        }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any(), Mockito.anyLong(), Mockito.<String[]>any(), Mockito.<EnumSet<AddBlockFlag>>any());
        doAnswer(new Answer<Boolean>() {

            @Override
            public Boolean answer(InvocationOnMock invocation) throws Throwable {
                // complete() may return false a few times before it returns
                // true. We want to wait until it returns true, and then
                // make it retry one more time after that.
                LOG.info("Called complete:");
                if (!(Boolean) invocation.callRealMethod()) {
                    LOG.info("Complete call returned false, not faking a retry RPC");
                    return false;
                }
                // We got a successful close. Call it again to check idempotence.
                try {
                    boolean ret = (Boolean) invocation.callRealMethod();
                    LOG.info("Complete call returned true, faked second RPC. " + "Returned: " + ret);
                    return ret;
                } catch (Throwable t) {
                    LOG.error("Idempotent retry threw exception", t);
                    throw t;
                }
            }
        }).when(spyNN).complete(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), anyLong());
        OutputStream stm = client.create(file.toString(), true);
        try {
            AppendTestUtil.write(stm, 0, 10000);
            stm.close();
            stm = null;
        } finally {
            IOUtils.cleanup(LOG, stm);
        }
        // Make sure the mock was actually properly injected.
        Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), Mockito.<DatanodeInfo[]>any(), Mockito.anyLong(), Mockito.<String[]>any(), Mockito.<EnumSet<AddBlockFlag>>any());
        Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(Mockito.anyString(), Mockito.anyString(), Mockito.<ExtendedBlock>any(), anyLong());
        AppendTestUtil.check(fs, file, 10000);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) InvocationOnMock(org.mockito.invocation.InvocationOnMock) FileSystem(org.apache.hadoop.fs.FileSystem) RemoteException(org.apache.hadoop.ipc.RemoteException) Matchers.anyBoolean(org.mockito.Matchers.anyBoolean) NotReplicatedYetException(org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException) Test(org.junit.Test)

Aggregations

LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)118 Test (org.junit.Test)67 Path (org.apache.hadoop.fs.Path)65 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)52 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)33 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)32 Configuration (org.apache.hadoop.conf.Configuration)29 IOException (java.io.IOException)20 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)20 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)20 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)18 FileSystem (org.apache.hadoop.fs.FileSystem)17 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)17 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)13 Block (org.apache.hadoop.hdfs.protocol.Block)11 InetSocketAddress (java.net.InetSocketAddress)10 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 FSDataInputStream (org.apache.hadoop.fs.FSDataInputStream)9 HdfsFileStatus (org.apache.hadoop.hdfs.protocol.HdfsFileStatus)7 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)7