Search in sources :

Example 1 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestDFSClientRetries method testFailuresArePerOperation.

/**
   * This tests that DFSInputStream failures are counted for a given read
   * operation, and not over the lifetime of the stream. It is a regression
   * test for HDFS-127.
   */
@Test
public void testFailuresArePerOperation() throws Exception {
    long fileSize = 4096;
    Path file = new Path("/testFile");
    // Set short retry timeouts so this test runs faster
    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
    conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        FileSystem fs = cluster.getFileSystem();
        NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
        NamenodeProtocols spyNN = spy(preSpyNN);
        DFSClient client = new DFSClient(null, spyNN, conf, null);
        int maxBlockAcquires = client.getConf().getMaxBlockAcquireFailures();
        assertTrue(maxBlockAcquires > 0);
        DFSTestUtil.createFile(fs, file, fileSize, (short) 1, 12345L);
        // If the client will retry maxBlockAcquires times, then if we fail
        // any more than that number of times, the operation should entirely
        // fail.
        doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires + 1)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
        try {
            IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf, true);
            fail("Didn't get exception");
        } catch (IOException ioe) {
            DFSClient.LOG.info("Got expected exception", ioe);
        }
        // If we fail exactly that many times, then it should succeed.
        doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
        IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf, true);
        DFSClient.LOG.info("Starting test case for failure reset");
        // Now the tricky case - if we fail a few times on one read, then succeed,
        // then fail some more on another read, it shouldn't fail.
        doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
        DFSInputStream is = client.open(file.toString());
        byte[] buf = new byte[10];
        IOUtils.readFully(is, buf, 0, buf.length);
        DFSClient.LOG.info("First read successful after some failures.");
        // Further reads at this point will succeed since it has the good block locations.
        // So, force the block locations on this stream to be refreshed from bad info.
        // When reading again, it should start from a fresh failure count, since
        // we're starting a new operation on the user level.
        doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
        is.openInfo(true);
        // Seek to beginning forces a reopen of the BlockReader - otherwise it'll
        // just keep reading on the existing stream and the fact that we've poisoned
        // the block info won't do anything.
        is.seek(0);
        IOUtils.readFully(is, buf, 0, buf.length);
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) IOException(java.io.IOException) IOUtils(org.apache.hadoop.io.IOUtils) FileSystem(org.apache.hadoop.fs.FileSystem) Test(org.junit.Test)

Example 2 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestDFSClientRetries method testLeaseRenewSocketTimeout.

/**
   * Test DFSClient can continue to function after renewLease RPC
   * receives SocketTimeoutException.
   */
@Test
public void testLeaseRenewSocketTimeout() throws Exception {
    String file1 = "/testFile1";
    String file2 = "/testFile2";
    // Set short retry timeouts so this test runs faster
    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
    conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    try {
        cluster.waitActive();
        NamenodeProtocols spyNN = spy(cluster.getNameNodeRpc());
        Mockito.doThrow(new SocketTimeoutException()).when(spyNN).renewLease(Mockito.anyString());
        DFSClient client = new DFSClient(null, spyNN, conf, null);
        // Get hold of the lease renewer instance used by the client
        LeaseRenewer leaseRenewer = client.getLeaseRenewer();
        leaseRenewer.setRenewalTime(100);
        OutputStream out1 = client.create(file1, false);
        Mockito.verify(spyNN, timeout(10000).times(1)).renewLease(Mockito.anyString());
        verifyEmptyLease(leaseRenewer);
        try {
            out1.write(new byte[256]);
            fail("existing output stream should be aborted");
        } catch (IOException e) {
        }
        // Verify DFSClient can do read operation after renewLease aborted.
        client.exists(file2);
        // Verify DFSClient can do write operation after renewLease no longer
        // throws SocketTimeoutException.
        Mockito.doNothing().when(spyNN).renewLease(Mockito.anyString());
        leaseRenewer = client.getLeaseRenewer();
        leaseRenewer.setRenewalTime(100);
        OutputStream out2 = client.create(file2, false);
        Mockito.verify(spyNN, timeout(10000).times(2)).renewLease(Mockito.anyString());
        out2.write(new byte[256]);
        out2.close();
        verifyEmptyLease(leaseRenewer);
    } finally {
        cluster.shutdown();
    }
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) SocketTimeoutException(java.net.SocketTimeoutException) LeaseRenewer(org.apache.hadoop.hdfs.client.impl.LeaseRenewer) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) OutputStream(java.io.OutputStream) Matchers.anyString(org.mockito.Matchers.anyString) IOException(java.io.IOException) Test(org.junit.Test)

Example 3 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testHedgingWhenOneFails.

@Test
public void testHedgingWhenOneFails() throws Exception {
    final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {

        @Override
        public long[] answer(InvocationOnMock invocation) throws Throwable {
            Thread.sleep(1000);
            return new long[] { 1 };
        }
    });
    final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(badMock, goodMock));
    long[] stats = provider.getProxy().proxy.getStats();
    Assert.assertTrue(stats.length == 1);
    Mockito.verify(badMock).getStats();
    Mockito.verify(goodMock).getStats();
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) InvocationOnMock(org.mockito.invocation.InvocationOnMock) IOException(java.io.IOException) Test(org.junit.Test)

Example 4 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testHedgingWhenBothFail.

@Test
public void testHedgingWhenBothFail() throws Exception {
    NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
    NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(worseMock.getStats()).thenThrow(new IOException("Worse mock !!"));
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(badMock, worseMock));
    try {
        provider.getProxy().proxy.getStats();
        Assert.fail("Should fail since both namenodes throw IOException !!");
    } catch (Exception e) {
        Assert.assertTrue(e instanceof MultiException);
    }
    Mockito.verify(badMock).getStats();
    Mockito.verify(worseMock).getStats();
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) IOException(java.io.IOException) MultiException(org.apache.hadoop.io.retry.MultiException) URISyntaxException(java.net.URISyntaxException) ConnectException(java.net.ConnectException) MultiException(org.apache.hadoop.io.retry.MultiException) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) Test(org.junit.Test)

Example 5 with NamenodeProtocols

use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.

the class TestRequestHedgingProxyProvider method testHedgingWhenConnectAndEOFException.

@Test
public void testHedgingWhenConnectAndEOFException() throws Exception {
    NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(active.getStats()).thenThrow(new EOFException());
    NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
    Mockito.when(standby.getStats()).thenThrow(new ConnectException());
    RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
    try {
        provider.getProxy().proxy.getStats();
        Assert.fail("Should fail since both active and standby namenodes throw" + " Exceptions!");
    } catch (MultiException me) {
        for (Exception ex : me.getExceptions().values()) {
            if (!(ex instanceof ConnectException) && !(ex instanceof EOFException)) {
                Assert.fail("Unexpected Exception " + ex.getMessage());
            }
        }
    }
    Mockito.verify(active).getStats();
    Mockito.verify(standby).getStats();
}
Also used : NamenodeProtocols(org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols) EOFException(java.io.EOFException) MultiException(org.apache.hadoop.io.retry.MultiException) URISyntaxException(java.net.URISyntaxException) ConnectException(java.net.ConnectException) MultiException(org.apache.hadoop.io.retry.MultiException) StandbyException(org.apache.hadoop.ipc.StandbyException) IOException(java.io.IOException) EOFException(java.io.EOFException) RemoteException(org.apache.hadoop.ipc.RemoteException) FileNotFoundException(java.io.FileNotFoundException) ConnectException(java.net.ConnectException) Test(org.junit.Test)

Aggregations

NamenodeProtocols (org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols)54 Test (org.junit.Test)45 IOException (java.io.IOException)24 Configuration (org.apache.hadoop.conf.Configuration)21 Path (org.apache.hadoop.fs.Path)19 FileSystem (org.apache.hadoop.fs.FileSystem)16 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)15 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)12 RemoteException (org.apache.hadoop.ipc.RemoteException)10 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)9 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)9 File (java.io.File)8 FileNotFoundException (java.io.FileNotFoundException)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)7 StandbyException (org.apache.hadoop.ipc.StandbyException)7 EOFException (java.io.EOFException)6 ConnectException (java.net.ConnectException)6 URISyntaxException (java.net.URISyntaxException)6 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)6