use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestDFSUpgradeWithHA method runFinalizeCommand.
private void runFinalizeCommand(MiniDFSCluster cluster) throws IOException {
HATestUtil.setFailoverConfigurations(cluster, conf);
new DFSAdmin(conf).finalizeUpgrade();
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestRefreshUserMappings method testGroupMappingRefresh.
@Test
public void testGroupMappingRefresh() throws Exception {
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refreshUserToGroupsMappings" };
Groups groups = Groups.getUserToGroupsMappingService(config);
String user = UserGroupInformation.getCurrentUser().getUserName();
System.out.println("first attempt:");
List<String> g1 = groups.getGroups(user);
String[] str_groups = new String[g1.size()];
g1.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
System.out.println("second attempt, should be same:");
List<String> g2 = groups.getGroups(user);
g2.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i = 0; i < g2.size(); i++) {
assertEquals("Should be same group ", g1.get(i), g2.get(i));
}
admin.run(args);
System.out.println("third attempt(after refresh command), should be different:");
List<String> g3 = groups.getGroups(user);
g3.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i = 0; i < g3.size(); i++) {
assertFalse("Should be different group: " + g1.get(i) + " and " + g3.get(i), g1.get(i).equals(g3.get(i)));
}
// test time out
Thread.sleep(groupRefreshTimeoutSec * 1100);
System.out.println("fourth attempt(after timeout), should be different:");
List<String> g4 = groups.getGroups(user);
g4.toArray(str_groups);
System.out.println(Arrays.toString(str_groups));
for (int i = 0; i < g4.size(); i++) {
assertFalse("Should be different group ", g3.get(i).equals(g4.get(i)));
}
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestRefreshUserMappings method testRefreshSuperUserGroupsConfiguration.
@Test
public void testRefreshSuperUserGroupsConfiguration() throws Exception {
final String SUPER_USER = "super_user";
final List<String> groupNames1 = new ArrayList<>();
groupNames1.add("gr1");
groupNames1.add("gr2");
final List<String> groupNames2 = new ArrayList<>();
groupNames2.add("gr3");
groupNames2.add("gr4");
//keys in conf
String userKeyGroups = DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(SUPER_USER);
String userKeyHosts = DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(SUPER_USER);
// superuser can proxy for this group
config.set(userKeyGroups, "gr3,gr4,gr5");
config.set(userKeyHosts, "127.0.0.1");
ProxyUsers.refreshSuperUserGroupsConfiguration(config);
UserGroupInformation ugi1 = mock(UserGroupInformation.class);
UserGroupInformation ugi2 = mock(UserGroupInformation.class);
UserGroupInformation suUgi = mock(UserGroupInformation.class);
when(ugi1.getRealUser()).thenReturn(suUgi);
when(ugi2.getRealUser()).thenReturn(suUgi);
// super user
when(suUgi.getShortUserName()).thenReturn(SUPER_USER);
// super user
when(suUgi.getUserName()).thenReturn(SUPER_USER + "L");
when(ugi1.getShortUserName()).thenReturn("user1");
when(ugi2.getShortUserName()).thenReturn("user2");
when(ugi1.getUserName()).thenReturn("userL1");
when(ugi2.getUserName()).thenReturn("userL2");
// set groups for users
when(ugi1.getGroups()).thenReturn(groupNames1);
when(ugi2.getGroups()).thenReturn(groupNames2);
// check before
try {
ProxyUsers.authorize(ugi1, "127.0.0.1");
fail("first auth for " + ugi1.getShortUserName() + " should've failed ");
} catch (AuthorizationException e) {
// expected
System.err.println("auth for " + ugi1.getUserName() + " failed");
}
try {
ProxyUsers.authorize(ugi2, "127.0.0.1");
System.err.println("auth for " + ugi2.getUserName() + " succeeded");
// expected
} catch (AuthorizationException e) {
fail("first auth for " + ugi2.getShortUserName() + " should've succeeded: " + e.getLocalizedMessage());
}
// refresh will look at configuration on the server side
// add additional resource with the new value
// so the server side will pick it up
String rsrc = "testGroupMappingRefresh_rsrc.xml";
addNewConfigResource(rsrc, userKeyGroups, "gr2", userKeyHosts, "127.0.0.1");
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refreshSuperUserGroupsConfiguration" };
admin.run(args);
try {
ProxyUsers.authorize(ugi2, "127.0.0.1");
fail("second auth for " + ugi2.getShortUserName() + " should've failed ");
} catch (AuthorizationException e) {
// expected
System.err.println("auth for " + ugi2.getUserName() + " failed");
}
try {
ProxyUsers.authorize(ugi1, "127.0.0.1");
System.err.println("auth for " + ugi1.getUserName() + " succeeded");
// expected
} catch (AuthorizationException e) {
fail("second auth for " + ugi1.getShortUserName() + " should've succeeded: " + e.getLocalizedMessage());
}
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestCheckPointForSecurityTokens method testSaveNamespace.
/**
* Tests save namespace.
*/
@Test
public void testSaveNamespace() throws IOException {
DistributedFileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fs = cluster.getFileSystem();
FSNamesystem namesystem = cluster.getNamesystem();
String renewer = UserGroupInformation.getLoginUser().getUserName();
Token<DelegationTokenIdentifier> token1 = namesystem.getDelegationToken(new Text(renewer));
Token<DelegationTokenIdentifier> token2 = namesystem.getDelegationToken(new Text(renewer));
// Saving image without safe mode should fail
DFSAdmin admin = new DFSAdmin(conf);
String[] args = new String[] { "-saveNamespace" };
// verify that the edits file is NOT empty
NameNode nn = cluster.getNameNode();
for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.scanLog(Long.MAX_VALUE, true);
long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should have 5 transactions", 5, numTransactions);
;
}
// Saving image in safe mode should succeed
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
} catch (Exception e) {
throw new IOException(e.getMessage());
}
// verify that the edits file is empty except for the START txn
for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
assertTrue(log.isInProgress());
log.scanLog(Long.MAX_VALUE, true);
long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
assertEquals("In-progress log " + log + " should only have START txn", 1, numTransactions);
}
// restart cluster
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
//Should be able to renew & cancel the delegation token after cluster restart
try {
renewToken(token1);
renewToken(token2);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
namesystem = cluster.getNamesystem();
Token<DelegationTokenIdentifier> token3 = namesystem.getDelegationToken(new Text(renewer));
Token<DelegationTokenIdentifier> token4 = namesystem.getDelegationToken(new Text(renewer));
// restart cluster again
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
Token<DelegationTokenIdentifier> token5 = namesystem.getDelegationToken(new Text(renewer));
try {
renewToken(token1);
renewToken(token2);
renewToken(token3);
renewToken(token4);
renewToken(token5);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
// restart cluster again
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
try {
renewToken(token1);
cancelToken(token1);
renewToken(token2);
cancelToken(token2);
renewToken(token3);
cancelToken(token3);
renewToken(token4);
cancelToken(token4);
renewToken(token5);
cancelToken(token5);
} catch (IOException e) {
fail("Could not renew or cancel the token");
}
} finally {
if (fs != null)
fs.close();
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.
the class TestGenericRefresh method testExceptionResultsInNormalError.
@Test
public void testExceptionResultsInNormalError() throws Exception {
// In this test, we ensure that all handlers are called even if we throw an exception in one
RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class);
Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class);
Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toThrow(new RuntimeException("More Exceptions"));
RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler);
RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
DFSAdmin admin = new DFSAdmin(config);
String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "exceptional" };
int exitCode = admin.run(args);
// Exceptions result in a -1
assertEquals(-1, exitCode);
Mockito.verify(exceptionalHandler).handleRefresh("exceptional", new String[] {});
Mockito.verify(otherExceptionalHandler).handleRefresh("exceptional", new String[] {});
RefreshRegistry.defaultRegistry().unregisterAll("exceptional");
}
Aggregations