use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.
the class FSNamesystem method appendFile.
/**
* Append to an existing file in the namespace.
*/
LastBlockWithStatus appendFile(String srcArg, String holder, String clientMachine, EnumSet<CreateFlag> flag, boolean logRetryCache) throws IOException {
final String operationName = "append";
boolean newBlock = flag.contains(CreateFlag.NEW_BLOCK);
if (newBlock) {
requireEffectiveLayoutVersionForFeature(Feature.APPEND_NEW_BLOCK);
}
NameNode.stateChangeLog.debug("DIR* NameSystem.appendFile: src={}, holder={}, clientMachine={}", srcArg, holder, clientMachine);
try {
boolean skipSync = false;
LastBlockWithStatus lbs = null;
final FSPermissionChecker pc = getPermissionChecker();
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
checkNameNodeSafeMode("Cannot append to file" + srcArg);
lbs = FSDirAppendOp.appendFile(this, srcArg, pc, holder, clientMachine, newBlock, logRetryCache);
} catch (StandbyException se) {
skipSync = true;
throw se;
} finally {
writeUnlock(operationName);
// They need to be sync'ed even when an exception was thrown.
if (!skipSync) {
getEditLog().logSync();
}
}
logAuditEvent(true, operationName, srcArg);
return lbs;
} catch (AccessControlException e) {
logAuditEvent(false, operationName, srcArg);
throw e;
}
}
use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.
the class TestBPOfferService method testReportBadBlocksWhenNNThrowsStandbyException.
/**
* This test case doesn't add the reportBadBlock request to
* {@link BPServiceActor#bpThreadEnqueue} when the Standby namenode throws
* {@link StandbyException}
* @throws Exception
*/
@Test
public void testReportBadBlocksWhenNNThrowsStandbyException() throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
// Should start with neither NN as active.
assertNull(bpos.getActiveNN());
// Have NN1 claim active at txid 1
mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
bpos.triggerHeartbeatForTests();
// Now mockNN1 is acting like active namenode and mockNN2 as Standby
assertSame(mockNN1, bpos.getActiveNN());
// Return nothing when active Active Namenode calls reportBadBlocks
Mockito.doNothing().when(mockNN1).reportBadBlocks(Mockito.any(LocatedBlock[].class));
RemoteException re = new RemoteException(StandbyException.class.getName(), "Operation category WRITE is not supported in state " + "standby", RpcErrorCodeProto.ERROR_APPLICATION);
// Return StandbyException wrapped in RemoteException when Standby NN
// calls reportBadBlocks
Mockito.doThrow(re).when(mockNN2).reportBadBlocks(Mockito.any(LocatedBlock[].class));
bpos.reportBadBlocks(FAKE_BLOCK, mockFSDataset.getVolume(FAKE_BLOCK).getStorageID(), mockFSDataset.getVolume(FAKE_BLOCK).getStorageType());
// Send heartbeat so that the BpServiceActor can report bad block to
// namenode
bpos.triggerHeartbeatForTests();
Mockito.verify(mockNN2, Mockito.times(1)).reportBadBlocks(Mockito.any(LocatedBlock[].class));
// Trigger another heartbeat, this will send reportBadBlock again if it
// is present in the queue.
bpos.triggerHeartbeatForTests();
Mockito.verify(mockNN2, Mockito.times(1)).reportBadBlocks(Mockito.any(LocatedBlock[].class));
} finally {
bpos.stop();
bpos.join();
}
}
use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.
the class TestDelegationTokensWithHA method testDelegationTokenDuringNNFailover.
/**
* Test if correct exception (StandbyException or RetriableException) can be
* thrown during the NN failover.
*/
@Test(timeout = 300000)
public void testDelegationTokenDuringNNFailover() throws Exception {
EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer();
// stop the editLogTailer of nn1
editLogTailer.stop();
Configuration conf = (Configuration) Whitebox.getInternalState(editLogTailer, "conf");
nn1.getNamesystem().setEditLogTailerForTests(new EditLogTailerForTest(nn1.getNamesystem(), conf));
// create token
final Token<DelegationTokenIdentifier> token = getDelegationToken(fs, "JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
// Ensure that it's present in the nn0 secret manager and can
// be renewed directly from there.
LOG.info("A valid token should have non-null password, " + "and should be renewed successfully");
assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken(token, "JobTracker");
// transition nn0 to standby
cluster.transitionToStandby(0);
try {
cluster.getNameNodeRpc(0).renewDelegationToken(token);
fail("StandbyException is expected since nn0 is in standby state");
} catch (StandbyException e) {
GenericTestUtils.assertExceptionContains(HAServiceState.STANDBY.toString(), e);
}
new Thread() {
@Override
public void run() {
try {
cluster.transitionToActive(1);
} catch (Exception e) {
LOG.error("Transition nn1 to active failed", e);
}
}
}.start();
Thread.sleep(1000);
try {
nn1.getNamesystem().verifyToken(token.decodeIdentifier(), token.getPassword());
fail("RetriableException/StandbyException is expected since nn1 is in transition");
} catch (IOException e) {
assertTrue(e instanceof StandbyException || e instanceof RetriableException);
LOG.info("Got expected exception", e);
}
catchup = true;
synchronized (this) {
this.notifyAll();
}
Configuration clientConf = dfs.getConf();
doRenewOrCancel(token, clientConf, TokenTestAction.RENEW);
doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL);
}
use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.
the class TestHASafeMode method testIsInSafemode.
/**
* DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
*
* @throws Exception
*/
@Test
public void testIsInSafemode() throws Exception {
// Check for the standby nn without client failover.
NameNode nn2 = cluster.getNameNode(1);
assertTrue("nn2 should be in standby state", nn2.isStandbyState());
InetSocketAddress nameNodeAddress = nn2.getNameNodeAddress();
Configuration conf = new Configuration();
DistributedFileSystem dfs = new DistributedFileSystem();
try {
dfs.initialize(URI.create("hdfs://" + nameNodeAddress.getHostName() + ":" + nameNodeAddress.getPort()), conf);
dfs.isInSafeMode();
fail("StandBy should throw exception for isInSafeMode");
} catch (IOException e) {
if (e instanceof RemoteException) {
assertEquals("RPC Error code should indicate app failure.", RpcErrorCodeProto.ERROR_APPLICATION, ((RemoteException) e).getErrorCode());
IOException sbExcpetion = ((RemoteException) e).unwrapRemoteException();
assertTrue("StandBy nn should not support isInSafeMode", sbExcpetion instanceof StandbyException);
} else {
throw e;
}
} finally {
if (null != dfs) {
dfs.close();
}
}
// Check with Client FailOver
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
DistributedFileSystem dfsWithFailOver = (DistributedFileSystem) fs;
assertTrue("ANN should be in SafeMode", dfsWithFailOver.isInSafeMode());
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode());
}
use of org.apache.hadoop.ipc.StandbyException in project hadoop by apache.
the class TestRequestHedgingProxyProvider method testHedgingWhenConnectException.
@Test
public void testHedgingWhenConnectException() throws Exception {
NamenodeProtocols active = Mockito.mock(NamenodeProtocols.class);
Mockito.when(active.getStats()).thenThrow(new ConnectException());
NamenodeProtocols standby = Mockito.mock(NamenodeProtocols.class);
Mockito.when(standby.getStats()).thenThrow(new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode"));
RequestHedgingProxyProvider<NamenodeProtocols> provider = new RequestHedgingProxyProvider<>(conf, nnUri, NamenodeProtocols.class, createFactory(active, standby));
try {
provider.getProxy().proxy.getStats();
Assert.fail("Should fail since the active namenode throws" + " ConnectException!");
} catch (MultiException me) {
for (Exception ex : me.getExceptions().values()) {
if (ex instanceof RemoteException) {
Exception rEx = ((RemoteException) ex).unwrapRemoteException();
Assert.assertTrue("Unexpected RemoteException: " + rEx.getMessage(), rEx instanceof StandbyException);
} else {
Assert.assertTrue(ex instanceof ConnectException);
}
}
}
Mockito.verify(active).getStats();
Mockito.verify(standby).getStats();
}
Aggregations