use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class BootstrapStandby method parseConfAndFindOtherNN.
private void parseConfAndFindOtherNN() throws IOException {
Configuration conf = getConf();
nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(conf, nsId)) {
throw new HadoopIllegalArgumentException("HA is not enabled for this namenode.");
}
nnId = HAUtil.getNameNodeId(conf, nsId);
NameNode.initializeGenericKeys(conf, nsId, nnId);
if (!HAUtil.usesSharedEditsDir(conf)) {
throw new HadoopIllegalArgumentException("Shared edits storage is not enabled for this namenode.");
}
remoteNNs = RemoteNameNodeInfo.getRemoteNameNodes(conf, nsId);
// validate the configured NNs
List<RemoteNameNodeInfo> remove = new ArrayList<RemoteNameNodeInfo>(remoteNNs.size());
for (RemoteNameNodeInfo info : remoteNNs) {
InetSocketAddress address = info.getIpcAddress();
LOG.info("Found nn: " + info.getNameNodeID() + ", ipc: " + info.getIpcAddress());
if (address.getPort() == 0 || address.getAddress().isAnyLocalAddress()) {
LOG.error("Could not determine valid IPC address for other NameNode (" + info.getNameNodeID() + ") , got: " + address);
remove.add(info);
}
}
// remove any invalid nns
remoteNNs.removeAll(remove);
// make sure we have at least one left to read
Preconditions.checkArgument(!remoteNNs.isEmpty(), "Could not find any valid namenodes!");
dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
editUrisToFormat = FSNamesystem.getNamespaceEditsDirs(conf, false);
sharedEditsUris = FSNamesystem.getSharedEditsDirs(conf);
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class DFSZKFailoverController method create.
public static DFSZKFailoverController create(Configuration conf) {
Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
throw new HadoopIllegalArgumentException("HA is not enabled for this namenode.");
}
String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
if (nnId == null) {
String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode.";
throw new HadoopIllegalArgumentException(msg);
}
NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
NNHAServiceTarget localTarget = new NNHAServiceTarget(localNNConf, nsId, nnId);
return new DFSZKFailoverController(localNNConf, localTarget);
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class FSXAttrBaseTest method testRemoveXAttrPermissions.
/**
* removexattr tests. Test that removexattr throws an exception if any of
* the following are true:
* an xattr that was requested doesn't exist
* the caller specifies an unknown namespace
* the caller doesn't have access to the namespace
* the caller doesn't have permission to get the value of the xattr
* the caller does not have "execute" (scan) access to the parent directory
* the caller has only read access to the owning directory
* the caller has only execute access to the owning directory and execute
* access to the actual entity
* the caller does not have execute access to the owning directory and write
* access to the actual entity
*/
@Test(timeout = 120000)
public void testRemoveXAttrPermissions() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
try {
fs.removeXAttr(path, name2);
fs.removeXAttr(path, name2);
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No matching attributes found", e);
}
/* Unknown namespace should throw an exception. */
final String expectedExceptionString = "An XAttr name must be prefixed " + "with user/trusted/security/system/raw, followed by a '.'";
try {
fs.removeXAttr(path, "wackynamespace.foo");
Assert.fail("expected IOException");
} catch (RemoteException e) {
assertEquals("Unexpected RemoteException: " + e, e.getClassName(), HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
}
/*
* The 'trusted' namespace should not be accessible and should throw an
* exception.
*/
final UserGroupInformation user = UserGroupInformation.createUserForTesting("user", new String[] { "mygroup" });
fs.setXAttr(path, "trusted.foo", "1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(path, "trusted.foo");
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
} finally {
fs.removeXAttr(path, "trusted.foo");
}
/*
* Test that an exception is thrown if the caller doesn't have permission to
* get the value of the xattr.
*/
/* Set access so that only the owner has access. */
fs.setPermission(path, new FsPermission((short) 0700));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(path, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* The caller must have "execute" (scan) access to the parent directory.
*/
final Path childDir = new Path(path, "child" + pathCount);
/* Set access to parent so that only the owner has access. */
FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short) 0700));
fs.setXAttr(childDir, name1, "1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/* Check that read access to the owning directory is not good enough. */
fs.setPermission(path, new FsPermission((short) 0704));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that execute access to the owning directory and scan access to
* the actual entity with extended attributes is not good enough.
*/
fs.setPermission(path, new FsPermission((short) 0701));
fs.setPermission(childDir, new FsPermission((short) 0701));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that execute access to the owning directory and write access to
* the actual entity with extended attributes is good enough.
*/
fs.setPermission(path, new FsPermission((short) 0701));
fs.setPermission(childDir, new FsPermission((short) 0706));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class TestFileTruncate method testTruncateFailure.
/**
* Failure / recovery test for truncate.
* In this failure the DNs fail to recover the blocks and the NN triggers
* lease recovery.
* File stays in RecoveryInProgress until DataNodes report recovery.
*/
@Test
public void testTruncateFailure() throws IOException {
int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
int toTruncate = 1;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path dir = new Path("/dir");
final Path p = new Path(dir, "testTruncateFailure");
{
FSDataOutputStream out = fs.create(p, false, BLOCK_SIZE, REPLICATION, BLOCK_SIZE);
out.write(contents, 0, startingFileSize);
try {
fs.truncate(p, 0);
fail("Truncate must fail on open file.");
} catch (IOException expected) {
GenericTestUtils.assertExceptionContains("Failed to TRUNCATE_FILE", expected);
} finally {
out.close();
}
}
{
FSDataOutputStream out = fs.append(p);
try {
fs.truncate(p, 0);
fail("Truncate must fail for append.");
} catch (IOException expected) {
GenericTestUtils.assertExceptionContains("Failed to TRUNCATE_FILE", expected);
} finally {
out.close();
}
}
try {
fs.truncate(p, -1);
fail("Truncate must fail for a negative new length.");
} catch (HadoopIllegalArgumentException expected) {
GenericTestUtils.assertExceptionContains("Cannot truncate to a negative file size", expected);
}
try {
fs.truncate(p, startingFileSize + 1);
fail("Truncate must fail for a larger new length.");
} catch (Exception expected) {
GenericTestUtils.assertExceptionContains("Cannot truncate to a larger file size", expected);
}
try {
fs.truncate(dir, 0);
fail("Truncate must fail for a directory.");
} catch (Exception expected) {
GenericTestUtils.assertExceptionContains("Path is not a file", expected);
}
try {
fs.truncate(new Path(dir, "non-existing"), 0);
fail("Truncate must fail for a non-existing file.");
} catch (Exception expected) {
GenericTestUtils.assertExceptionContains("File does not exist", expected);
}
fs.setPermission(p, FsPermission.createImmutable((short) 0664));
{
final UserGroupInformation fooUgi = UserGroupInformation.createUserForTesting("foo", new String[] { "foo" });
try {
final FileSystem foofs = DFSTestUtil.getFileSystemAs(fooUgi, conf);
foofs.truncate(p, 0);
fail("Truncate must fail for no WRITE permission.");
} catch (Exception expected) {
GenericTestUtils.assertExceptionContains("Permission denied", expected);
}
}
cluster.shutdownDataNodes();
NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT);
int newLength = startingFileSize - toTruncate;
boolean isReady = fs.truncate(p, newLength);
assertThat("truncate should have triggered block recovery.", isReady, is(false));
{
try {
fs.truncate(p, 0);
fail("Truncate must fail since a trancate is already in pregress.");
} catch (IOException expected) {
GenericTestUtils.assertExceptionContains("Failed to TRUNCATE_FILE", expected);
}
}
boolean recoveryTriggered = false;
for (int i = 0; i < RECOVERY_ATTEMPTS; i++) {
String leaseHolder = NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), p.toUri().getPath());
if (leaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
recoveryTriggered = true;
break;
}
try {
Thread.sleep(SLEEP);
} catch (InterruptedException ignored) {
}
}
assertThat("lease recovery should have occurred in ~" + SLEEP * RECOVERY_ATTEMPTS + " ms.", recoveryTriggered, is(true));
cluster.startDataNodes(conf, DATANODE_NUM, true, StartupOption.REGULAR, null);
cluster.waitActive();
checkBlockRecovery(p);
NameNodeAdapter.getLeaseManager(cluster.getNamesystem()).setLeasePeriod(HdfsConstants.LEASE_SOFTLIMIT_PERIOD, HdfsConstants.LEASE_HARDLIMIT_PERIOD);
checkFullFile(p, newLength, contents);
fs.delete(p, false);
}
use of org.apache.hadoop.HadoopIllegalArgumentException in project hadoop by apache.
the class HAUtil method getRMHAId.
/**
* @param conf Configuration. Please use verifyAndSetRMHAId to check.
* @return RM Id on success
*/
public static String getRMHAId(Configuration conf) {
int found = 0;
String currentRMId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
if (currentRMId == null) {
for (String rmId : getRMHAIds(conf)) {
String key = addSuffix(YarnConfiguration.RM_ADDRESS, rmId);
String addr = conf.get(key);
if (addr == null) {
continue;
}
InetSocketAddress s;
try {
s = NetUtils.createSocketAddr(addr);
} catch (Exception e) {
LOG.warn("Exception in creating socket address " + addr, e);
continue;
}
if (!s.isUnresolved() && NetUtils.isLocalAddress(s.getAddress())) {
currentRMId = rmId.trim();
found++;
}
}
}
if (found > 1) {
// Only one address must match the local address
String msg = "The HA Configuration has multiple addresses that match " + "local node's address.";
throw new HadoopIllegalArgumentException(msg);
}
return currentRMId;
}
Aggregations