use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestBPOfferService method testReportBadBlocksWhenNNThrowsStandbyException.
/**
* This test case doesn't add the reportBadBlock request to
* {@link BPServiceActor#bpThreadEnqueue} when the Standby namenode throws
* {@link StandbyException}
* @throws Exception
*/
@Test
public void testReportBadBlocksWhenNNThrowsStandbyException() throws Exception {
BPOfferService bpos = setupBPOSForNNs(mockNN1, mockNN2);
bpos.start();
try {
waitForInitialization(bpos);
// Should start with neither NN as active.
assertNull(bpos.getActiveNN());
// Have NN1 claim active at txid 1
mockHaStatuses[0] = new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1);
bpos.triggerHeartbeatForTests();
// Now mockNN1 is acting like active namenode and mockNN2 as Standby
assertSame(mockNN1, bpos.getActiveNN());
// Return nothing when active Active Namenode calls reportBadBlocks
Mockito.doNothing().when(mockNN1).reportBadBlocks(Mockito.any(LocatedBlock[].class));
RemoteException re = new RemoteException(StandbyException.class.getName(), "Operation category WRITE is not supported in state " + "standby", RpcErrorCodeProto.ERROR_APPLICATION);
// Return StandbyException wrapped in RemoteException when Standby NN
// calls reportBadBlocks
Mockito.doThrow(re).when(mockNN2).reportBadBlocks(Mockito.any(LocatedBlock[].class));
bpos.reportBadBlocks(FAKE_BLOCK, mockFSDataset.getVolume(FAKE_BLOCK).getStorageID(), mockFSDataset.getVolume(FAKE_BLOCK).getStorageType());
// Send heartbeat so that the BpServiceActor can report bad block to
// namenode
bpos.triggerHeartbeatForTests();
Mockito.verify(mockNN2, Mockito.times(1)).reportBadBlocks(Mockito.any(LocatedBlock[].class));
// Trigger another heartbeat, this will send reportBadBlock again if it
// is present in the queue.
bpos.triggerHeartbeatForTests();
Mockito.verify(mockNN2, Mockito.times(1)).reportBadBlocks(Mockito.any(LocatedBlock[].class));
} finally {
bpos.stop();
bpos.join();
}
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class FSXAttrBaseTest method testRemoveXAttrPermissions.
/**
* removexattr tests. Test that removexattr throws an exception if any of
* the following are true:
* an xattr that was requested doesn't exist
* the caller specifies an unknown namespace
* the caller doesn't have access to the namespace
* the caller doesn't have permission to get the value of the xattr
* the caller does not have "execute" (scan) access to the parent directory
* the caller has only read access to the owning directory
* the caller has only execute access to the owning directory and execute
* access to the actual entity
* the caller does not have execute access to the owning directory and write
* access to the actual entity
*/
@Test(timeout = 120000)
public void testRemoveXAttrPermissions() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
try {
fs.removeXAttr(path, name2);
fs.removeXAttr(path, name2);
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No matching attributes found", e);
}
/* Unknown namespace should throw an exception. */
final String expectedExceptionString = "An XAttr name must be prefixed " + "with user/trusted/security/system/raw, followed by a '.'";
try {
fs.removeXAttr(path, "wackynamespace.foo");
Assert.fail("expected IOException");
} catch (RemoteException e) {
assertEquals("Unexpected RemoteException: " + e, e.getClassName(), HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
}
/*
* The 'trusted' namespace should not be accessible and should throw an
* exception.
*/
final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
fs.setXAttr(path, "trusted.foo", "1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(path, "trusted.foo");
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
} finally {
fs.removeXAttr(path, "trusted.foo");
}
/*
* Test that an exception is thrown if the caller doesn't have permission to
* get the value of the xattr.
*/
/* Set access so that only the owner has access. */
fs.setPermission(path, new FsPermission((short) 0700));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(path, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* The caller must have "execute" (scan) access to the parent directory.
*/
final Path childDir = new Path(path, "child" + pathCount);
/* Set access to parent so that only the owner has access. */
FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short) 0700));
fs.setXAttr(childDir, name1, "1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/* Check that read access to the owning directory is not good enough. */
fs.setPermission(path, new FsPermission((short) 0704));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that execute access to the owning directory and scan access to
* the actual entity with extended attributes is not good enough.
*/
fs.setPermission(path, new FsPermission((short) 0701));
fs.setPermission(childDir, new FsPermission((short) 0701));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that execute access to the owning directory and write access to
* the actual entity with extended attributes is good enough.
*/
fs.setPermission(path, new FsPermission((short) 0701));
fs.setPermission(childDir, new FsPermission((short) 0706));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestDiskspaceQuotaUpdate method testTruncateOverQuota.
/**
* Test truncate over quota does not mark file as UC or create a lease
*/
@Test(timeout = 60000)
public void testTruncateOverQuota() throws Exception {
final Path dir = getParent(GenericTestUtils.getMethodName());
final Path file = new Path(dir, "file");
// create partial block file
getDFS().mkdirs(dir);
DFSTestUtil.createFile(getDFS(), file, BLOCKSIZE / 2, REPLICATION, seed);
// lower quota to cause exception when appending to partial block
getDFS().setQuota(dir, Long.MAX_VALUE - 1, 1);
final INodeDirectory dirNode = getFSDirectory().getINode4Write(dir.toString()).asDirectory();
final long spaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
try {
getDFS().truncate(file, BLOCKSIZE / 2 - 1);
Assert.fail("truncate didn't fail");
} catch (RemoteException e) {
assertTrue(e.getClassName().contains("DSQuotaExceededException"));
}
// check that the file exists, isn't UC, and has no dangling lease
LeaseManager lm = cluster.getNamesystem().getLeaseManager();
INodeFile inode = getFSDirectory().getINode(file.toString()).asFile();
Assert.assertNotNull(inode);
Assert.assertFalse("should not be UC", inode.isUnderConstruction());
Assert.assertNull("should not have a lease", lm.getLease(inode));
// make sure the quota usage is unchanged
final long newSpaceUsed = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed().getStorageSpace();
assertEquals(spaceUsed, newSpaceUsed);
// make sure edits aren't corrupted
getDFS().recoverLease(file);
cluster.restartNameNode(true);
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestHASafeMode method testIsInSafemode.
/**
* DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
*
* @throws Exception
*/
@Test
public void testIsInSafemode() throws Exception {
// Check for the standby nn without client failover.
NameNode nn2 = cluster.getNameNode(1);
assertTrue("nn2 should be in standby state", nn2.isStandbyState());
InetSocketAddress nameNodeAddress = nn2.getNameNodeAddress();
Configuration conf = new Configuration();
DistributedFileSystem dfs = new DistributedFileSystem();
try {
dfs.initialize(URI.create("hdfs://" + nameNodeAddress.getHostName() + ":" + nameNodeAddress.getPort()), conf);
dfs.isInSafeMode();
fail("StandBy should throw exception for isInSafeMode");
} catch (IOException e) {
if (e instanceof RemoteException) {
assertEquals("RPC Error code should indicate app failure.", RpcErrorCodeProto.ERROR_APPLICATION, ((RemoteException) e).getErrorCode());
IOException sbExcpetion = ((RemoteException) e).unwrapRemoteException();
assertTrue("StandBy nn should not support isInSafeMode", sbExcpetion instanceof StandbyException);
} else {
throw e;
}
} finally {
if (null != dfs) {
dfs.close();
}
}
// Check with Client FailOver
cluster.transitionToStandby(0);
cluster.transitionToActive(1);
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
DistributedFileSystem dfsWithFailOver = (DistributedFileSystem) fs;
assertTrue("ANN should be in SafeMode", dfsWithFailOver.isInSafeMode());
cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode());
}
use of org.apache.hadoop.ipc.RemoteException in project hadoop by apache.
the class TestNNHealthCheck method doNNHealthCheckTest.
private void doNNHealthCheckTest() throws IOException {
MockNameNodeResourceChecker mockResourceChecker = new MockNameNodeResourceChecker(conf);
cluster.getNameNode(0).getNamesystem().setNNResourceChecker(mockResourceChecker);
NNHAServiceTarget haTarget = new NNHAServiceTarget(conf, DFSUtil.getNamenodeNameServiceId(conf), "nn1");
final String expectedTargetString;
if (conf.get(DFS_NAMENODE_LIFELINE_RPC_ADDRESS_KEY + "." + DFSUtil.getNamenodeNameServiceId(conf) + ".nn1") != null) {
expectedTargetString = haTarget.getHealthMonitorAddress().toString();
} else {
expectedTargetString = haTarget.getAddress().toString();
}
assertTrue("Expected haTarget " + haTarget + " containing " + expectedTargetString, haTarget.toString().contains(expectedTargetString));
HAServiceProtocol rpc = haTarget.getHealthMonitorProxy(conf, conf.getInt(HA_HM_RPC_TIMEOUT_KEY, HA_HM_RPC_TIMEOUT_DEFAULT));
// Should not throw error, which indicates healthy.
rpc.monitorHealth();
mockResourceChecker.setResourcesAvailable(false);
try {
// Should throw error - NN is unhealthy.
rpc.monitorHealth();
fail("Should not have succeeded in calling monitorHealth");
} catch (HealthCheckFailedException hcfe) {
GenericTestUtils.assertExceptionContains("The NameNode has no resources available", hcfe);
} catch (RemoteException re) {
GenericTestUtils.assertExceptionContains("The NameNode has no resources available", re.unwrapRemoteException(HealthCheckFailedException.class));
}
}
Aggregations