use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestDFSClientRetries method testFailuresArePerOperation.
/**
* This tests that DFSInputStream failures are counted for a given read
* operation, and not over the lifetime of the stream. It is a regression
* test for HDFS-127.
*/
@Test
public void testFailuresArePerOperation() throws Exception {
long fileSize = 4096;
Path file = new Path("/testFile");
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols preSpyNN = cluster.getNameNodeRpc();
NamenodeProtocols spyNN = spy(preSpyNN);
DFSClient client = new DFSClient(null, spyNN, conf, null);
int maxBlockAcquires = client.getConf().getMaxBlockAcquireFailures();
assertTrue(maxBlockAcquires > 0);
DFSTestUtil.createFile(fs, file, fileSize, (short) 1, 12345L);
// If the client will retry maxBlockAcquires times, then if we fail
// any more than that number of times, the operation should entirely
// fail.
doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires + 1)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
try {
IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf, true);
fail("Didn't get exception");
} catch (IOException ioe) {
DFSClient.LOG.info("Got expected exception", ioe);
}
// If we fail exactly that many times, then it should succeed.
doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
IOUtils.copyBytes(client.open(file.toString()), new IOUtils.NullOutputStream(), conf, true);
DFSClient.LOG.info("Starting test case for failure reset");
// Now the tricky case - if we fail a few times on one read, then succeed,
// then fail some more on another read, it shouldn't fail.
doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
DFSInputStream is = client.open(file.toString());
byte[] buf = new byte[10];
IOUtils.readFully(is, buf, 0, buf.length);
DFSClient.LOG.info("First read successful after some failures.");
// Further reads at this point will succeed since it has the good block locations.
// So, force the block locations on this stream to be refreshed from bad info.
// When reading again, it should start from a fresh failure count, since
// we're starting a new operation on the user level.
doAnswer(new FailNTimesAnswer(preSpyNN, maxBlockAcquires)).when(spyNN).getBlockLocations(anyString(), anyLong(), anyLong());
is.openInfo(true);
// Seek to beginning forces a reopen of the BlockReader - otherwise it'll
// just keep reading on the existing stream and the fact that we've poisoned
// the block info won't do anything.
is.seek(0);
IOUtils.readFully(is, buf, 0, buf.length);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestDFSClientRetries method testLeaseRenewSocketTimeout.
/**
* Test DFSClient can continue to function after renewLease RPC
* receives SocketTimeoutException.
*/
@Test
public void testLeaseRenewSocketTimeout() throws Exception {
String file1 = "/testFile1";
String file2 = "/testFile2";
// Set short retry timeouts so this test runs faster
conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
NamenodeProtocols spyNN = spy(cluster.getNameNodeRpc());
Mockito.doThrow(new SocketTimeoutException()).when(spyNN).renewLease(Mockito.anyString());
DFSClient client = new DFSClient(null, spyNN, conf, null);
// Get hold of the lease renewer instance used by the client
LeaseRenewer leaseRenewer = client.getLeaseRenewer();
leaseRenewer.setRenewalTime(100);
OutputStream out1 = client.create(file1, false);
Mockito.verify(spyNN, timeout(10000).times(1)).renewLease(Mockito.anyString());
verifyEmptyLease(leaseRenewer);
try {
out1.write(new byte[256]);
fail("existing output stream should be aborted");
} catch (IOException e) {
}
// Verify DFSClient can do read operation after renewLease aborted.
client.exists(file2);
// Verify DFSClient can do write operation after renewLease no longer
// throws SocketTimeoutException.
Mockito.doNothing().when(spyNN).renewLease(Mockito.anyString());
leaseRenewer = client.getLeaseRenewer();
leaseRenewer.setRenewalTime(100);
OutputStream out2 = client.create(file2, false);
Mockito.verify(spyNN, timeout(10000).times(2)).renewLease(Mockito.anyString());
out2.write(new byte[256]);
out2.close();
verifyEmptyLease(leaseRenewer);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testHedgingWhenOneFails.
@Test
public void testHedgingWhenOneFails() throws Exception {
final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation) throws Throwable {
Thread.sleep(1000);
return new long[] { 1 };
}
});
final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(badMock, goodMock));
long[] stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Mockito.verify(badMock).getStats();
Mockito.verify(goodMock).getStats();
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testHedgingWhenBothFail.
@Test
public void testHedgingWhenBothFail() throws Exception {
NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(badMock.getStats()).thenThrow(new IOException("Bad mock !!"));
NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(worseMock.getStats()).thenThrow(new IOException("Worse mock !!"));
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(badMock, worseMock));
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since both namenodes throw IOException !!");
} catch (Exception e) {
Assert.assertTrue(e instanceof MultiException);
}
Mockito.verify(badMock).getStats();
Mockito.verify(worseMock).getStats();
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testHedgingWhenConnectAndEOFException.
@Test
public void testHedgingWhenConnectAndEOFException() throws Exception {
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
Mockito.when(active.getStats()).thenThrow(new EOFException());
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
Mockito.when(standby.getStats()).thenThrow(new ConnectException());
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since both active and standby namenodes throw" + " Exceptions!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
if (!(ex instanceof ConnectException) && !(ex instanceof EOFException)) {
Assert.fail("Unexpected Exception " + ex.getMessage());
}
}
}
Mockito.verify(active).getStats();
Mockito.verify(standby).getStats();
}
Aggregations