Search in sources :

Example 51 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testHedgingWhenConnectException.

@Test
public void testHedgingWhenConnectException() throws Exception {
    NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(active.getStats()).thenThrow(new ConnectException());
    NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(standby.getStats()).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
    try {
        provider.getProxy().proxy.getStats();
        Assert.fail("Should fail since the active namenode throws" + " ConnectException!");
    } catch (MultiException me) {
        for (Exception ex : me.getExceptions().values()) {
            if (ex instanceof RemoteException) {
                Exception rEx = ((RemoteException) ex).unwrapRemoteException();
                Assert.assertTrue("Unexpected RemoteException: " + rEx.getMessage(), rEx instanceof StandbyException);
            } else {
                Assert.assertTrue(ex instanceof ConnectException);
            }
        }
    }
    Mockito.verify(active).getStats();
    Mockito.verify(standby).getStats();
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) StandbyException(org.apache.hadoop.ipc.StandbyException) RemoteException(org.apache.hadoop.ipc.RemoteException) MultiException(org.apache.hadoop.io.retry.MultiException) URISyntaxException(java.net.URISyntaxException) ConnectException(java.net.ConnectException) MultiException(org.apache.hadoop.io.retry.MultiException) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) ConnectException(java.net.ConnectException) Test(org.junit.Test)

Example 52 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testPerformFailoverWith3Proxies.

@Test
public void testPerformFailoverWith3Proxies() throws Exception {
    conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2,nn3");
    conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3", "machine3.foo.bar:9820");
    final AtomicInteger counter = new AtomicInteger(0);
    final int[] isGood = { 1 };
    final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {

        @Override
        public long[] answer(InvocationOnMock invocation) throws Throwable {
            counter.incrementAndGet();
            if (isGood[0] == 1) {
                Thread.sleep(1000);
                return new long[] { 1 };
            }
            throw new IOException("Was Good mock !!");
        }
    });
    final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {

        @Override
        public long[] answer(InvocationOnMock invocation) throws Throwable {
            counter.incrementAndGet();
            if (isGood[0] == 2) {
                Thread.sleep(1000);
                return new long[] { 2 };
            }
            throw new IOException("Bad mock !!");
        }
    });
    final NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(worseMock.getStats()).thenAnswer(new Answer<long[]>() {

        @Override
        public long[] answer(InvocationOnMock invocation) throws Throwable {
            counter.incrementAndGet();
            if (isGood[0] == 3) {
                Thread.sleep(1000);
                return new long[] { 3 };
            }
            throw new IOException("Worse mock !!");
        }
    });
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(goodMock, badMock, worseMock));
    long[] stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(1, stats[0]);
    Assert.assertEquals(3, counter.get());
    Mockito.verify(badMock).getStats();
    Mockito.verify(goodMock).getStats();
    Mockito.verify(worseMock).getStats();
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(1, stats[0]);
    // Ensure only the previous successful one is invoked
    Mockito.verifyNoMoreInteractions(badMock);
    Mockito.verifyNoMoreInteractions(worseMock);
    Assert.assertEquals(4, counter.get());
    // Flip to standby.. so now this should fail
    isGood[0] = 2;
    try {
        provider.getProxy().proxy.getStats();
        Assert.fail("Should fail since previously successful proxy now fails ");
    } catch (Exception ex) {
        Assert.assertTrue(ex instanceof IOException);
    }
    Assert.assertEquals(5, counter.get());
    provider.performFailover(provider.getProxy().proxy);
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(2, stats[0]);
    // Counter updates twice since both proxies are tried on failure
    Assert.assertEquals(7, counter.get());
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(2, stats[0]);
    // Counter updates only once now
    Assert.assertEquals(8, counter.get());
    // Flip to Other standby.. so now this should fail
    isGood[0] = 3;
    try {
        provider.getProxy().proxy.getStats();
        Assert.fail("Should fail since previously successful proxy now fails ");
    } catch (Exception ex) {
        Assert.assertTrue(ex instanceof IOException);
    }
    // Counter should ipdate only 1 time
    Assert.assertEquals(9, counter.get());
    provider.performFailover(provider.getProxy().proxy);
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    // Ensure correct proxy was called
    Assert.assertEquals(3, stats[0]);
    // Counter updates twice since both proxies are tried on failure
    Assert.assertEquals(11, counter.get());
    stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Assert.assertEquals(3, stats[0]);
    // Counter updates only once now
    Assert.assertEquals(12, counter.get());
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) InvocationOnMock(org.mockito.invocation.InvocationOnMock) IOException(java.io.IOException) URISyntaxException(java.net.URISyntaxException) ConnectException(java.net.ConnectException) MultiException(org.apache.hadoop.io.retry.MultiException) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 53 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestOpenFilesWithSnapshot method testOpenFilesWithRename.

@Test
public void testOpenFilesWithRename() throws Exception {
    Path path = new Path("/test");
    doWriteAndAbort(fs, path);
    // check for zero sized blocks
    Path fileWithEmptyBlock = new Path("/test/test/test4");
    fs.create(fileWithEmptyBlock);
    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
    String clientName = fs.getClient().getClientName();
    // create one empty block
    nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
    fs.createSnapshot(path, "s2");
    fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
    fs.delete(new Path("/test/test-renamed"), true);
    restartNameNode();
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) Test(org.junit.Test)

Example 54 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestNetworkTopology method testInvalidNetworkTopologiesNotCachedInHdfs.

@Test(timeout = 180000)
public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
    // start a cluster
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = null;
    try {
        // bad rack topology
        String[] racks = { "/a/b", "/c" };
        String[] hosts = { "foo1.example.com", "foo2.example.com" };
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build();
        cluster.waitActive();
        NamenodeProtocols nn = cluster.getNameNodeRpc();
        Assert.assertNotNull(nn);
        // Wait for one DataNode to register.
        // The other DataNode will not be able to register up because of the rack mismatch.
        DatanodeInfo[] info;
        while (true) {
            info = nn.getDatanodeReport(DatanodeReportType.LIVE);
            Assert.assertFalse(info.length == 2);
            if (info.length == 1) {
                break;
            }
            Thread.sleep(1000);
        }
        // Set the network topology of the other node to the match the network
        // topology of the node that came up.
        int validIdx = info[0].getHostName().equals(hosts[0]) ? 0 : 1;
        int invalidIdx = validIdx == 1 ? 0 : 1;
        StaticMapping.addNodeToRack(hosts[invalidIdx], racks[validIdx]);
        LOG.info("datanode " + validIdx + " came up with network location " + info[0].getNetworkLocation());
        // Restart the DN with the invalid topology and wait for it to register.
        cluster.restartDataNode(invalidIdx);
        Thread.sleep(5000);
        while (true) {
            info = nn.getDatanodeReport(DatanodeReportType.LIVE);
            if (info.length == 2) {
                break;
            }
            if (info.length == 0) {
                LOG.info("got no valid DNs");
            } else if (info.length == 1) {
                LOG.info("got one valid DN: " + info[0].getHostName() + " (at " + info[0].getNetworkLocation() + ")");
            }
            Thread.sleep(1000);
        }
        Assert.assertEquals(info[0].getNetworkLocation(), info[1].getNetworkLocation());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Aggregations

NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)54 Test (org.junit.Test)45 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)21 Path (org.apache.hadoop.fs.Path)19 FileSystem (org.apache.hadoop.fs.FileSystem)16 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)15 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)12 RemoteException (org.apache.hadoop.ipc.RemoteException)10 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 File (java.io.File)8 FileNotFoundException (java.io.FileNotFoundException)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)7 StandbyException (org.apache.hadoop.ipc.StandbyException)7 EOFException (java.io.EOFException)6 ConnectException (java.net.ConnectException)6 URISyntaxException (java.net.URISyntaxException)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6