use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testHedgingWhenConnectException.
@Test
public void testHedgingWhenConnectException() throws Exception {
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
Mockito.when(active.getStats()).thenThrow(new ConnectException());
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
Mockito.when(standby.getStats()).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since the active namenode throws" + " ConnectException!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
if (ex instanceof RemoteException) {
Exception rEx = ((RemoteException) ex).unwrapRemoteException();
Assert.assertTrue("Unexpected RemoteException: " + rEx.getMessage(), rEx instanceof StandbyException);
} else {
Assert.assertTrue(ex instanceof ConnectException);
}
}
}
Mockito.verify(active).getStats();
Mockito.verify(standby).getStats();
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testPerformFailoverWith3Proxies.
@Test
public void testPerformFailoverWith3Proxies() throws Exception {
conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2,nn3");
conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3", "machine3.foo.bar:9820");
final AtomicInteger counter = new AtomicInteger(0);
final int[] isGood = { 1 };
final NamenodeProtocols goodMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(goodMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation) throws Throwable {
counter.incrementAndGet();
if (isGood[0] == 1) {
Thread.sleep(1000);
return new long[] { 1 };
}
throw new IOException("Was Good mock !!");
}
});
final NamenodeProtocols badMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(badMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation) throws Throwable {
counter.incrementAndGet();
if (isGood[0] == 2) {
Thread.sleep(1000);
return new long[] { 2 };
}
throw new IOException("Bad mock !!");
}
});
final NamenodeProtocols worseMock = Mockito.mock(NamenodeProtocols.class);
Mockito.when(worseMock.getStats()).thenAnswer(new Answer<long[]>() {
@Override
public long[] answer(InvocationOnMock invocation) throws Throwable {
counter.incrementAndGet();
if (isGood[0] == 3) {
Thread.sleep(1000);
return new long[] { 3 };
}
throw new IOException("Worse mock !!");
}
});
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(goodMock, badMock, worseMock));
long[] stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(1, stats[0]);
Assert.assertEquals(3, counter.get());
Mockito.verify(badMock).getStats();
Mockito.verify(goodMock).getStats();
Mockito.verify(worseMock).getStats();
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(1, stats[0]);
// Ensure only the previous successful one is invoked
Mockito.verifyNoMoreInteractions(badMock);
Mockito.verifyNoMoreInteractions(worseMock);
Assert.assertEquals(4, counter.get());
// Flip to standby.. so now this should fail
isGood[0] = 2;
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since previously successful proxy now fails ");
} catch (Exception ex) {
Assert.assertTrue(ex instanceof IOException);
}
Assert.assertEquals(5, counter.get());
provider.performFailover(provider.getProxy().proxy);
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(2, stats[0]);
// Counter updates twice since both proxies are tried on failure
Assert.assertEquals(7, counter.get());
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(2, stats[0]);
// Counter updates only once now
Assert.assertEquals(8, counter.get());
// Flip to Other standby.. so now this should fail
isGood[0] = 3;
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since previously successful proxy now fails ");
} catch (Exception ex) {
Assert.assertTrue(ex instanceof IOException);
}
// Counter should ipdate only 1 time
Assert.assertEquals(9, counter.get());
provider.performFailover(provider.getProxy().proxy);
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
// Ensure correct proxy was called
Assert.assertEquals(3, stats[0]);
// Counter updates twice since both proxies are tried on failure
Assert.assertEquals(11, counter.get());
stats = provider.getProxy().proxy.getStats();
Assert.assertTrue(stats.length == 1);
Assert.assertEquals(3, stats[0]);
// Counter updates only once now
Assert.assertEquals(12, counter.get());
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestOpenFilesWithSnapshot method testOpenFilesWithRename.
@Test
public void testOpenFilesWithRename() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
// check for zero sized blocks
Path fileWithEmptyBlock = new Path("/test/test/test4");
fs.create(fileWithEmptyBlock);
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
String clientName = fs.getClient().getClientName();
// create one empty block
nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
fs.createSnapshot(path, "s2");
fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
fs.delete(new Path("/test/test-renamed"), true);
restartNameNode();
}
use of org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols in project hadoop by apache.
the class TestNetworkTopology method testInvalidNetworkTopologiesNotCachedInHdfs.
@Test(timeout = 180000)
public void testInvalidNetworkTopologiesNotCachedInHdfs() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
// bad rack topology
String[] racks = { "/a/b", "/c" };
String[] hosts = { "foo1.example.com", "foo2.example.com" };
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).racks(racks).hosts(hosts).build();
cluster.waitActive();
NamenodeProtocols nn = cluster.getNameNodeRpc();
Assert.assertNotNull(nn);
// Wait for one DataNode to register.
// The other DataNode will not be able to register up because of the rack mismatch.
DatanodeInfo[] info;
while (true) {
info = nn.getDatanodeReport(DatanodeReportType.LIVE);
Assert.assertFalse(info.length == 2);
if (info.length == 1) {
break;
}
Thread.sleep(1000);
}
// Set the network topology of the other node to the match the network
// topology of the node that came up.
int validIdx = info[0].getHostName().equals(hosts[0]) ? 0 : 1;
int invalidIdx = validIdx == 1 ? 0 : 1;
StaticMapping.addNodeToRack(hosts[invalidIdx], racks[validIdx]);
LOG.info("datanode " + validIdx + " came up with network location " + info[0].getNetworkLocation());
// Restart the DN with the invalid topology and wait for it to register.
cluster.restartDataNode(invalidIdx);
Thread.sleep(5000);
while (true) {
info = nn.getDatanodeReport(DatanodeReportType.LIVE);
if (info.length == 2) {
break;
}
if (info.length == 0) {
LOG.info("got no valid DNs");
} else if (info.length == 1) {
LOG.info("got one valid DN: " + info[0].getHostName() + " (at " + info[0].getNetworkLocation() + ")");
}
Thread.sleep(1000);
}
Assert.assertEquals(info[0].getNetworkLocation(), info[1].getNetworkLocation());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
Aggregations