Search in sources :

Example 6 with LocalHBaseCluster

use of org.apache.hadoop.hbase.LocalHBaseCluster in project phoenix by apache.

the class HttpParamImpersonationQueryServerIT method setUp.

/**
 * Setup and start kerberos, hbase
 */
@BeforeClass
public static void setUp() throws Exception {
    final Configuration conf = UTIL.getConfiguration();
    // Ensure the dirs we need are created/empty
    ensureIsEmptyDirectory(TEMP_DIR);
    ensureIsEmptyDirectory(KEYTAB_DIR);
    KEYTAB = new File(KEYTAB_DIR, "test.keytab");
    // Start a MiniKDC
    KDC = UTIL.setupMiniKdc(KEYTAB);
    // Create a service principal and spnego principal in one keytab
    // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
    // use separate identies for HBase and HDFS results in a GSS initiate error. The quick
    // solution is to just use a single "service" principal instead of "hbase" and "hdfs"
    // (or "dn" and "nn") per usual.
    KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
    // Start ZK by hand
    UTIL.startMiniZKCluster();
    // Create a number of unprivileged users
    createUsers(2);
    // Set configuration for HBase
    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
    HBaseKerberosUtils.setSecuredConfiguration(conf);
    setHdfsSecuredConfiguration(conf);
    UserGroupInformation.setConfiguration(conf);
    conf.setInt(HConstants.MASTER_PORT, 0);
    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
    conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
    conf.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, AccessController.class.getName());
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName(), TokenProvider.class.getName());
    // Secure Phoenix setup
    conf.set("phoenix.queryserver.kerberos.http.principal", SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
    conf.set("phoenix.queryserver.http.keytab.file", KEYTAB.getAbsolutePath());
    conf.set("phoenix.queryserver.kerberos.principal", PQS_PRINCIPAL + "@" + KDC.getRealm());
    conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
    conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
    conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
    // Required so that PQS can impersonate the end-users to HBase
    conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
    conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
    // user1 is allowed to impersonate others, user2 is not
    conf.set("hadoop.proxyuser.user1.groups", "*");
    conf.set("hadoop.proxyuser.user1.hosts", "*");
    conf.setBoolean(QueryServices.QUERY_SERVER_WITH_REMOTEUSEREXTRACTOR_ATTRIB, true);
    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {

        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });
    updateDefaultRealm();
    // Start HDFS
    UTIL.startMiniDFSCluster(1);
    // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
    // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
    // classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
    Path rootdir = UTIL.getDataTestDirOnTestFS(HttpParamImpersonationQueryServerIT.class.getSimpleName());
    FSUtils.setRootDir(conf, rootdir);
    HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
    HBASE_CLUSTER.startup();
    // Then fork a thread with PQS in it.
    startQueryServer();
}
Also used : Path(org.apache.hadoop.fs.Path) TokenProvider(org.apache.hadoop.hbase.security.token.TokenProvider) AccessController(org.apache.hadoop.hbase.security.access.AccessController) Configuration(org.apache.hadoop.conf.Configuration) ConfigurationFactory(org.apache.phoenix.query.ConfigurationFactory) LocalHBaseCluster(org.apache.hadoop.hbase.LocalHBaseCluster) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Example 7 with LocalHBaseCluster

use of org.apache.hadoop.hbase.LocalHBaseCluster in project phoenix by apache.

the class SecureQueryServerIT method setUp.

/**
 * Setup and start kerberos, hbase
 */
@BeforeClass
public static void setUp() throws Exception {
    final Configuration conf = UTIL.getConfiguration();
    // Ensure the dirs we need are created/empty
    ensureIsEmptyDirectory(TEMP_DIR);
    ensureIsEmptyDirectory(KEYTAB_DIR);
    KEYTAB = new File(KEYTAB_DIR, "test.keytab");
    // Start a MiniKDC
    KDC = UTIL.setupMiniKdc(KEYTAB);
    // Create a service principal and spnego principal in one keytab
    // NB. Due to some apparent limitations between HDFS and HBase in the same JVM, trying to
    // use separate identies for HBase and HDFS results in a GSS initiate error. The quick
    // solution is to just use a single "service" principal instead of "hbase" and "hdfs"
    // (or "dn" and "nn") per usual.
    KDC.createPrincipal(KEYTAB, SPNEGO_PRINCIPAL, PQS_PRINCIPAL, SERVICE_PRINCIPAL);
    // Start ZK by hand
    UTIL.startMiniZKCluster();
    // Create a number of unprivileged users
    createUsers(3);
    // Set configuration for HBase
    HBaseKerberosUtils.setPrincipalForTesting(SERVICE_PRINCIPAL + "@" + KDC.getRealm());
    HBaseKerberosUtils.setSecuredConfiguration(conf);
    setHdfsSecuredConfiguration(conf);
    UserGroupInformation.setConfiguration(conf);
    conf.setInt(HConstants.MASTER_PORT, 0);
    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName());
    // Secure Phoenix setup
    conf.set("phoenix.queryserver.kerberos.http.principal", SPNEGO_PRINCIPAL + "@" + KDC.getRealm());
    conf.set("phoenix.queryserver.http.keytab.file", KEYTAB.getAbsolutePath());
    conf.set("phoenix.queryserver.kerberos.principal", PQS_PRINCIPAL + "@" + KDC.getRealm());
    conf.set("phoenix.queryserver.keytab.file", KEYTAB.getAbsolutePath());
    conf.setBoolean(QueryServices.QUERY_SERVER_DISABLE_KERBEROS_LOGIN, true);
    conf.setInt(QueryServices.QUERY_SERVER_HTTP_PORT_ATTRIB, 0);
    // Required so that PQS can impersonate the end-users to HBase
    conf.set("hadoop.proxyuser.phoenixqs.groups", "*");
    conf.set("hadoop.proxyuser.phoenixqs.hosts", "*");
    // Clear the cached singletons so we can inject our own.
    InstanceResolver.clearSingletons();
    // Make sure the ConnectionInfo doesn't try to pull a default Configuration
    InstanceResolver.getSingleton(ConfigurationFactory.class, new ConfigurationFactory() {

        @Override
        public Configuration getConfiguration() {
            return conf;
        }

        @Override
        public Configuration getConfiguration(Configuration confToClone) {
            Configuration copy = new Configuration(conf);
            copy.addResource(confToClone);
            return copy;
        }
    });
    updateDefaultRealm();
    // Start HDFS
    UTIL.startMiniDFSCluster(1);
    // Use LocalHBaseCluster to avoid HBaseTestingUtility from doing something wrong
    // NB. I'm not actually sure what HTU does incorrect, but this was pulled from some test
    // classes in HBase itself. I couldn't get HTU to work myself (2017/07/06)
    Path rootdir = UTIL.getDataTestDirOnTestFS(SecureQueryServerIT.class.getSimpleName());
    FSUtils.setRootDir(conf, rootdir);
    HBASE_CLUSTER = new LocalHBaseCluster(conf, 1);
    HBASE_CLUSTER.startup();
    // Then fork a thread with PQS in it.
    startQueryServer();
}
Also used : Path(org.apache.hadoop.fs.Path) TokenProvider(org.apache.hadoop.hbase.security.token.TokenProvider) Configuration(org.apache.hadoop.conf.Configuration) ConfigurationFactory(org.apache.phoenix.query.ConfigurationFactory) LocalHBaseCluster(org.apache.hadoop.hbase.LocalHBaseCluster) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Example 8 with LocalHBaseCluster

use of org.apache.hadoop.hbase.LocalHBaseCluster in project hbase by apache.

the class HMasterCommandLine method startMaster.

private int startMaster() {
    Configuration conf = getConf();
    try {
        // and regionserver both in the one JVM.
        if (LocalHBaseCluster.isLocal(conf)) {
            DefaultMetricsSystem.setMiniClusterMode(true);
            final MiniZooKeeperCluster zooKeeperCluster = new MiniZooKeeperCluster(conf);
            File zkDataPath = new File(conf.get(HConstants.ZOOKEEPER_DATA_DIR));
            // find out the default client port
            int zkClientPort = 0;
            // If the zookeeper client port is specified in server quorum, use it.
            String zkserver = conf.get(HConstants.ZOOKEEPER_QUORUM);
            if (zkserver != null) {
                String[] zkservers = zkserver.split(",");
                if (zkservers.length > 1) {
                    // In local mode deployment, we have the master + a region server and zookeeper server
                    // started in the same process. Therefore, we only support one zookeeper server.
                    String errorMsg = "Could not start ZK with " + zkservers.length + " ZK servers in local mode deployment. Aborting as clients (e.g. shell) will not " + "be able to find this ZK quorum.";
                    System.err.println(errorMsg);
                    throw new IOException(errorMsg);
                }
                String[] parts = zkservers[0].split(":");
                if (parts.length == 2) {
                    // the second part is the client port
                    zkClientPort = Integer.parseInt(parts[1]);
                }
            }
            // If the client port could not be find in server quorum conf, try another conf
            if (zkClientPort == 0) {
                zkClientPort = conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, 0);
                // The client port has to be set by now; if not, throw exception.
                if (zkClientPort == 0) {
                    throw new IOException("No config value for " + HConstants.ZOOKEEPER_CLIENT_PORT);
                }
            }
            zooKeeperCluster.setDefaultClientPort(zkClientPort);
            // set the ZK tick time if specified
            int zkTickTime = conf.getInt(HConstants.ZOOKEEPER_TICK_TIME, 0);
            if (zkTickTime > 0) {
                zooKeeperCluster.setTickTime(zkTickTime);
            }
            // login the zookeeper server principal (if using security)
            ZKAuthentication.loginServer(conf, HConstants.ZK_SERVER_KEYTAB_FILE, HConstants.ZK_SERVER_KERBEROS_PRINCIPAL, null);
            int localZKClusterSessionTimeout = conf.getInt(HConstants.ZK_SESSION_TIMEOUT + ".localHBaseCluster", 10 * 1000);
            conf.setInt(HConstants.ZK_SESSION_TIMEOUT, localZKClusterSessionTimeout);
            LOG.info("Starting a zookeeper cluster");
            int clientPort = zooKeeperCluster.startup(zkDataPath);
            if (clientPort != zkClientPort) {
                String errorMsg = "Could not start ZK at requested port of " + zkClientPort + ".  ZK was started at port: " + clientPort + ".  Aborting as clients (e.g. shell) will not be able to find " + "this ZK quorum.";
                System.err.println(errorMsg);
                throw new IOException(errorMsg);
            }
            conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
            // Need to have the zk cluster shutdown when master is shutdown.
            // Run a subclass that does the zk cluster shutdown on its way out.
            int mastersCount = conf.getInt("hbase.masters", 1);
            int regionServersCount = conf.getInt("hbase.regionservers", 1);
            // Set start timeout to 5 minutes for cmd line start operations
            conf.setIfUnset("hbase.master.start.timeout.localHBaseCluster", "300000");
            LOG.info("Starting up instance of localHBaseCluster; master=" + mastersCount + ", regionserversCount=" + regionServersCount);
            LocalHBaseCluster cluster = new LocalHBaseCluster(conf, mastersCount, regionServersCount, LocalHMaster.class, HRegionServer.class);
            ((LocalHMaster) cluster.getMaster(0)).setZKCluster(zooKeeperCluster);
            cluster.startup();
            waitOnMasterThreads(cluster);
        } else {
            logProcessInfo(getConf());
            HMaster master = HMaster.constructMaster(masterClass, conf);
            if (master.isStopped()) {
                LOG.info("Won't bring the Master up as a shutdown is requested");
                return 1;
            }
            master.start();
            master.join();
            if (master.isAborted())
                throw new RuntimeException("HMaster Aborted");
        }
    } catch (Throwable t) {
        LOG.error("Master exiting", t);
        return 1;
    }
    return 0;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) MiniZooKeeperCluster(org.apache.hadoop.hbase.zookeeper.MiniZooKeeperCluster) IOException(java.io.IOException) LocalHBaseCluster(org.apache.hadoop.hbase.LocalHBaseCluster) File(java.io.File)

Example 9 with LocalHBaseCluster

use of org.apache.hadoop.hbase.LocalHBaseCluster in project hbase by apache.

the class TestInfoServersACL method beforeClass.

@BeforeClass
public static void beforeClass() throws Exception {
    conf = UTIL.getConfiguration();
    KDC = UTIL.setupMiniKdc(KEYTAB_FILE);
    USERNAME = UserGroupInformation.getLoginUser().getShortUserName();
    PRINCIPAL = USERNAME + "/" + HOST;
    HTTP_PRINCIPAL = "HTTP/" + HOST;
    // Create principals for services and the test users
    KDC.createPrincipal(KEYTAB_FILE, PRINCIPAL, HTTP_PRINCIPAL, USER_ADMIN_STR, USER_NONE_STR);
    UTIL.startMiniZKCluster();
    HBaseKerberosUtils.setSecuredConfiguration(conf, PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm());
    HBaseKerberosUtils.setSSLConfiguration(UTIL, TestInfoServersACL.class);
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, TokenProvider.class.getName());
    UTIL.startMiniDFSCluster(1);
    Path rootdir = UTIL.getDataTestDirOnTestFS("TestInfoServersACL");
    CommonFSUtils.setRootDir(conf, rootdir);
    // The info servers do not run in tests by default.
    // Set them to ephemeral ports so they will start
    // setup configuration
    conf.setInt(HConstants.MASTER_INFO_PORT, 0);
    conf.setInt(HConstants.REGIONSERVER_INFO_PORT, 0);
    conf.set(HttpServer.HTTP_UI_AUTHENTICATION, "kerberos");
    conf.set(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY, HTTP_PRINCIPAL);
    conf.set(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY, KEYTAB_FILE.getAbsolutePath());
    // ACL lists work only when "hadoop.security.authorization" is set to true
    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
    // only user admin will have acl access
    conf.set(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, USER_ADMIN_STR);
    // conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, "");
    CLUSTER = new LocalHBaseCluster(conf, 1);
    CLUSTER.startup();
    CLUSTER.getActiveMaster().waitForMetaOnline();
}
Also used : Path(org.apache.hadoop.fs.Path) TokenProvider(org.apache.hadoop.hbase.security.token.TokenProvider) LocalHBaseCluster(org.apache.hadoop.hbase.LocalHBaseCluster) BeforeClass(org.junit.BeforeClass)

Example 10 with LocalHBaseCluster

use of org.apache.hadoop.hbase.LocalHBaseCluster in project incubator-atlas by apache.

the class HBaseTestUtils method startCluster.

public static void startCluster() throws Exception {
    Configuration hbaseConf = HBaseBasedAuditRepository.getHBaseConfiguration(ApplicationProperties.get());
    hbaseTestUtility = HBaseTestingUtility.createLocalHTU(hbaseConf);
    int zkPort = hbaseConf.getInt("hbase.zookeeper.property.clientPort", 19026);
    hbaseTestUtility.startMiniZKCluster(1, zkPort);
    hbaseCluster = new LocalHBaseCluster(hbaseTestUtility.getConfiguration());
    hbaseCluster.startup();
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) LocalHBaseCluster(org.apache.hadoop.hbase.LocalHBaseCluster)

Aggregations

LocalHBaseCluster (org.apache.hadoop.hbase.LocalHBaseCluster)12 Configuration (org.apache.hadoop.conf.Configuration)6 Path (org.apache.hadoop.fs.Path)5 File (java.io.File)4 TokenProvider (org.apache.hadoop.hbase.security.token.TokenProvider)4 BeforeClass (org.junit.BeforeClass)4 HBaseTestingUtil (org.apache.hadoop.hbase.HBaseTestingUtil)3 HBaseConfiguration (org.apache.hadoop.hbase.HBaseConfiguration)2 MasterThread (org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread)2 ConfigurationFactory (org.apache.phoenix.query.ConfigurationFactory)2 Test (org.junit.Test)2 IOException (java.io.IOException)1 Map (java.util.Map)1 ServerName (org.apache.hadoop.hbase.ServerName)1 SingleProcessHBaseCluster (org.apache.hadoop.hbase.SingleProcessHBaseCluster)1 StartTestingClusterOption (org.apache.hadoop.hbase.StartTestingClusterOption)1 RegionInfo (org.apache.hadoop.hbase.client.RegionInfo)1 RetriesExhaustedException (org.apache.hadoop.hbase.client.RetriesExhaustedException)1 ConnectionClosedException (org.apache.hadoop.hbase.exceptions.ConnectionClosedException)1 AccessController (org.apache.hadoop.hbase.security.access.AccessController)1