Search in sources :

Example 31 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestDFSUpgradeWithHA method runFinalizeCommand.

private void runFinalizeCommand(MiniDFSCluster cluster) throws IOException {
    HATestUtil.setFailoverConfigurations(cluster, conf);
    new DFSAdmin(conf).finalizeUpgrade();
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin)

Example 32 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestRefreshUserMappings method testGroupMappingRefresh.

@Test
public void testGroupMappingRefresh() throws Exception {
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refreshUserToGroupsMappings" };
    Groups groups = Groups.getUserToGroupsMappingService(config);
    String user = UserGroupInformation.getCurrentUser().getUserName();
    System.out.println("first attempt:");
    List<String> g1 = groups.getGroups(user);
    String[] str_groups = new String[g1.size()];
    g1.toArray(str_groups);
    System.out.println(Arrays.toString(str_groups));
    System.out.println("second attempt, should be same:");
    List<String> g2 = groups.getGroups(user);
    g2.toArray(str_groups);
    System.out.println(Arrays.toString(str_groups));
    for (int i = 0; i < g2.size(); i++) {
        assertEquals("Should be same group ", g1.get(i), g2.get(i));
    }
    admin.run(args);
    System.out.println("third attempt(after refresh command), should be different:");
    List<String> g3 = groups.getGroups(user);
    g3.toArray(str_groups);
    System.out.println(Arrays.toString(str_groups));
    for (int i = 0; i < g3.size(); i++) {
        assertFalse("Should be different group: " + g1.get(i) + " and " + g3.get(i), g1.get(i).equals(g3.get(i)));
    }
    // test time out
    Thread.sleep(groupRefreshTimeoutSec * 1100);
    System.out.println("fourth attempt(after timeout), should be different:");
    List<String> g4 = groups.getGroups(user);
    g4.toArray(str_groups);
    System.out.println(Arrays.toString(str_groups));
    for (int i = 0; i < g4.size(); i++) {
        assertFalse("Should be different group ", g3.get(i).equals(g4.get(i)));
    }
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 33 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestRefreshUserMappings method testRefreshSuperUserGroupsConfiguration.

@Test
public void testRefreshSuperUserGroupsConfiguration() throws Exception {
    final String SUPER_USER = "super_user";
    final List<String> groupNames1 = new ArrayList<>();
    groupNames1.add("gr1");
    groupNames1.add("gr2");
    final List<String> groupNames2 = new ArrayList<>();
    groupNames2.add("gr3");
    groupNames2.add("gr4");
    //keys in conf
    String userKeyGroups = DefaultImpersonationProvider.getTestProvider().getProxySuperuserGroupConfKey(SUPER_USER);
    String userKeyHosts = DefaultImpersonationProvider.getTestProvider().getProxySuperuserIpConfKey(SUPER_USER);
    // superuser can proxy for this group
    config.set(userKeyGroups, "gr3,gr4,gr5");
    config.set(userKeyHosts, "127.0.0.1");
    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
    UserGroupInformation ugi1 = mock(UserGroupInformation.class);
    UserGroupInformation ugi2 = mock(UserGroupInformation.class);
    UserGroupInformation suUgi = mock(UserGroupInformation.class);
    when(ugi1.getRealUser()).thenReturn(suUgi);
    when(ugi2.getRealUser()).thenReturn(suUgi);
    // super user
    when(suUgi.getShortUserName()).thenReturn(SUPER_USER);
    // super user
    when(suUgi.getUserName()).thenReturn(SUPER_USER + "L");
    when(ugi1.getShortUserName()).thenReturn("user1");
    when(ugi2.getShortUserName()).thenReturn("user2");
    when(ugi1.getUserName()).thenReturn("userL1");
    when(ugi2.getUserName()).thenReturn("userL2");
    // set groups for users
    when(ugi1.getGroups()).thenReturn(groupNames1);
    when(ugi2.getGroups()).thenReturn(groupNames2);
    // check before
    try {
        ProxyUsers.authorize(ugi1, "127.0.0.1");
        fail("first auth for " + ugi1.getShortUserName() + " should've failed ");
    } catch (AuthorizationException e) {
        // expected
        System.err.println("auth for " + ugi1.getUserName() + " failed");
    }
    try {
        ProxyUsers.authorize(ugi2, "127.0.0.1");
        System.err.println("auth for " + ugi2.getUserName() + " succeeded");
    // expected
    } catch (AuthorizationException e) {
        fail("first auth for " + ugi2.getShortUserName() + " should've succeeded: " + e.getLocalizedMessage());
    }
    // refresh will look at configuration on the server side
    // add additional resource with the new value
    // so the server side will pick it up
    String rsrc = "testGroupMappingRefresh_rsrc.xml";
    addNewConfigResource(rsrc, userKeyGroups, "gr2", userKeyHosts, "127.0.0.1");
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refreshSuperUserGroupsConfiguration" };
    admin.run(args);
    try {
        ProxyUsers.authorize(ugi2, "127.0.0.1");
        fail("second auth for " + ugi2.getShortUserName() + " should've failed ");
    } catch (AuthorizationException e) {
        // expected
        System.err.println("auth for " + ugi2.getUserName() + " failed");
    }
    try {
        ProxyUsers.authorize(ugi1, "127.0.0.1");
        System.err.println("auth for " + ugi1.getUserName() + " succeeded");
    // expected
    } catch (AuthorizationException e) {
        fail("second auth for " + ugi1.getShortUserName() + " should've succeeded: " + e.getLocalizedMessage());
    }
}
Also used : AuthorizationException(org.apache.hadoop.security.authorize.AuthorizationException) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) ArrayList(java.util.ArrayList) Test(org.junit.Test)

Example 34 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestCheckPointForSecurityTokens method testSaveNamespace.

/**
   * Tests save namespace.
   */
@Test
public void testSaveNamespace() throws IOException {
    DistributedFileSystem fs = null;
    try {
        Configuration conf = new HdfsConfiguration();
        conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
        cluster.waitActive();
        fs = cluster.getFileSystem();
        FSNamesystem namesystem = cluster.getNamesystem();
        String renewer = UserGroupInformation.getLoginUser().getUserName();
        Token<DelegationTokenIdentifier> token1 = namesystem.getDelegationToken(new Text(renewer));
        Token<DelegationTokenIdentifier> token2 = namesystem.getDelegationToken(new Text(renewer));
        // Saving image without safe mode should fail
        DFSAdmin admin = new DFSAdmin(conf);
        String[] args = new String[] { "-saveNamespace" };
        // verify that the edits file is NOT empty
        NameNode nn = cluster.getNameNode();
        for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
            EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
            assertTrue(log.isInProgress());
            log.scanLog(Long.MAX_VALUE, true);
            long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
            assertEquals("In-progress log " + log + " should have 5 transactions", 5, numTransactions);
            ;
        }
        // Saving image in safe mode should succeed
        fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
        try {
            admin.run(args);
        } catch (Exception e) {
            throw new IOException(e.getMessage());
        }
        // verify that the edits file is empty except for the START txn
        for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
            EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
            assertTrue(log.isInProgress());
            log.scanLog(Long.MAX_VALUE, true);
            long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
            assertEquals("In-progress log " + log + " should only have START txn", 1, numTransactions);
        }
        // restart cluster
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        //Should be able to renew & cancel the delegation token after cluster restart
        try {
            renewToken(token1);
            renewToken(token2);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
        namesystem = cluster.getNamesystem();
        Token<DelegationTokenIdentifier> token3 = namesystem.getDelegationToken(new Text(renewer));
        Token<DelegationTokenIdentifier> token4 = namesystem.getDelegationToken(new Text(renewer));
        // restart cluster again
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        namesystem = cluster.getNamesystem();
        Token<DelegationTokenIdentifier> token5 = namesystem.getDelegationToken(new Text(renewer));
        try {
            renewToken(token1);
            renewToken(token2);
            renewToken(token3);
            renewToken(token4);
            renewToken(token5);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
        // restart cluster again
        cluster.shutdown();
        cluster = null;
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
        cluster.waitActive();
        namesystem = cluster.getNamesystem();
        try {
            renewToken(token1);
            cancelToken(token1);
            renewToken(token2);
            cancelToken(token2);
            renewToken(token3);
            cancelToken(token3);
            renewToken(token4);
            cancelToken(token4);
            renewToken(token5);
            cancelToken(token5);
        } catch (IOException e) {
            fail("Could not renew or cancel the token");
        }
    } finally {
        if (fs != null)
            fs.close();
        if (cluster != null)
            cluster.shutdown();
    }
}
Also used : MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) DelegationTokenIdentifier(org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier) EditLogFile(org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile) Text(org.apache.hadoop.io.Text) StorageDirectory(org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) IOException(java.io.IOException) DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) Test(org.junit.Test)

Example 35 with DFSAdmin

use of org.apache.hadoop.hdfs.tools.DFSAdmin in project hadoop by apache.

the class TestGenericRefresh method testExceptionResultsInNormalError.

@Test
public void testExceptionResultsInNormalError() throws Exception {
    // In this test, we ensure that all handlers are called even if we throw an exception in one
    RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class);
    Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
    RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class);
    Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class))).toThrow(new RuntimeException("More Exceptions"));
    RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler);
    RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
    DFSAdmin admin = new DFSAdmin(config);
    String[] args = new String[] { "-refresh", "localhost:" + cluster.getNameNodePort(), "exceptional" };
    int exitCode = admin.run(args);
    // Exceptions result in a -1
    assertEquals(-1, exitCode);
    Mockito.verify(exceptionalHandler).handleRefresh("exceptional", new String[] {});
    Mockito.verify(otherExceptionalHandler).handleRefresh("exceptional", new String[] {});
    RefreshRegistry.defaultRegistry().unregisterAll("exceptional");
}
Also used : DFSAdmin(org.apache.hadoop.hdfs.tools.DFSAdmin) RefreshHandler(org.apache.hadoop.ipc.RefreshHandler) Test(org.junit.Test)

Aggregations

DFSAdmin (org.apache.hadoop.hdfs.tools.DFSAdmin)41 Test (org.junit.Test)31 Configuration (org.apache.hadoop.conf.Configuration)15 Path (org.apache.hadoop.fs.Path)14 CoreMatchers.containsString (org.hamcrest.CoreMatchers.containsString)9 IOException (java.io.IOException)6 FileSystem (org.apache.hadoop.fs.FileSystem)6 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)5 DSQuotaExceededException (org.apache.hadoop.hdfs.protocol.DSQuotaExceededException)4 NSQuotaExceededException (org.apache.hadoop.hdfs.protocol.NSQuotaExceededException)4 QuotaExceededException (org.apache.hadoop.hdfs.protocol.QuotaExceededException)4 ContentSummary (org.apache.hadoop.fs.ContentSummary)3 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)3 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 ByteArrayOutputStream (java.io.ByteArrayOutputStream)2 File (java.io.File)2 QuotaUsage (org.apache.hadoop.fs.QuotaUsage)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2