Search in sources :

Example 41 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class HiveServer2 method stop.

@Override
public synchronized void stop() {
    LOG.info("Shutting down HiveServer2");
    HiveConf hiveConf = this.getHiveConf();
    super.stop();
    if (webServer != null) {
        try {
            webServer.stop();
            LOG.info("Web UI has stopped");
        } catch (Exception e) {
            LOG.error("Error stopping Web UI: ", e);
        }
    }
    // Shutdown Metrics
    if (MetricsFactory.getInstance() != null) {
        try {
            MetricsFactory.close();
        } catch (Exception e) {
            LOG.error("error in Metrics deinit: " + e.getClass().getName() + " " + e.getMessage(), e);
        }
    }
    // Remove this server instance from ZooKeeper if dynamic service discovery is set
    if (hiveConf != null && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY)) {
        try {
            removeServerInstanceFromZooKeeper();
        } catch (Exception e) {
            LOG.error("Error removing znode for this HiveServer2 instance from ZooKeeper.", e);
        }
    }
    // If not, ignoring is fine while stopping HiveServer2.
    if (hiveConf != null && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS)) {
        try {
            TezSessionPoolManager.getInstance().stop();
        } catch (Exception e) {
            LOG.error("Tez session pool manager stop had an error during stop of HiveServer2. " + "Shutting down HiveServer2 anyway.", e);
        }
    }
    if (hiveConf != null && hiveConf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
        try {
            SparkSessionManagerImpl.getInstance().shutdown();
        } catch (Exception ex) {
            LOG.error("Spark session pool manager failed to stop during HiveServer2 shutdown.", ex);
        }
    }
}
Also used : HiveConf(org.apache.hadoop.hive.conf.HiveConf) ServiceException(org.apache.hive.service.ServiceException) ParseException(org.apache.commons.cli.ParseException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) LogInitializationException(org.apache.hadoop.hive.common.LogUtils.LogInitializationException) KeeperException(org.apache.zookeeper.KeeperException) IOException(java.io.IOException)

Example 42 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class HiveServer2 method deleteServerInstancesFromZooKeeper.

/**
   * Remove all znodes corresponding to the given version number from ZooKeeper
   *
   * @param versionNumber
   * @throws Exception
   */
static void deleteServerInstancesFromZooKeeper(String versionNumber) throws Exception {
    HiveConf hiveConf = new HiveConf();
    String zooKeeperEnsemble = ZooKeeperHiveHelper.getQuorumServers(hiveConf);
    String rootNamespace = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE);
    int baseSleepTime = (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME, TimeUnit.MILLISECONDS);
    int maxRetries = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES);
    CuratorFramework zooKeeperClient = CuratorFrameworkFactory.builder().connectString(zooKeeperEnsemble).retryPolicy(new ExponentialBackoffRetry(baseSleepTime, maxRetries)).build();
    zooKeeperClient.start();
    List<String> znodePaths = zooKeeperClient.getChildren().forPath(ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace);
    List<String> znodePathsUpdated;
    // Now for each path that is for the given versionNumber, delete the znode from ZooKeeper
    for (int i = 0; i < znodePaths.size(); i++) {
        String znodePath = znodePaths.get(i);
        deleteSignal = new CountDownLatch(1);
        if (znodePath.contains("version=" + versionNumber + ";")) {
            String fullZnodePath = ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + znodePath;
            LOG.warn("Will attempt to remove the znode: " + fullZnodePath + " from ZooKeeper");
            System.out.println("Will attempt to remove the znode: " + fullZnodePath + " from ZooKeeper");
            zooKeeperClient.delete().guaranteed().inBackground(new DeleteCallBack()).forPath(fullZnodePath);
            // Wait for the delete to complete
            deleteSignal.await();
            // Get the updated path list
            znodePathsUpdated = zooKeeperClient.getChildren().forPath(ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace);
            // Gives a list of any new paths that may have been created to maintain the persistent ephemeral node
            znodePathsUpdated.removeAll(znodePaths);
            // Add the new paths to the znodes list. We'll try for their removal as well.
            znodePaths.addAll(znodePathsUpdated);
        }
    }
    zooKeeperClient.close();
}
Also used : CuratorFramework(org.apache.curator.framework.CuratorFramework) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) HiveConf(org.apache.hadoop.hive.conf.HiveConf) CountDownLatch(java.util.concurrent.CountDownLatch)

Example 43 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class EmbeddedThriftBinaryCLIService method init.

@Override
public synchronized void init(HiveConf hiveConf) {
    // independent of hiveConf object. Create new HiveConf object here in this case.
    if (hiveConf == null) {
        hiveConf = new HiveConf();
    }
    cliService.init(hiveConf);
    cliService.start();
    super.init(hiveConf);
}
Also used : HiveConf(org.apache.hadoop.hive.conf.HiveConf)

Example 44 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class TestUserSearchFilter method setup.

@Before
public void setup() {
    conf = new HiveConf();
    factory = new UserSearchFilterFactory();
}
Also used : HiveConf(org.apache.hadoop.hive.conf.HiveConf) Before(org.junit.Before)

Example 45 with HiveConf

use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.

the class TestLdapAuthenticationProviderImpl method setup.

@Before
public void setup() throws AuthenticationException {
    conf = new HiveConf();
    conf.set("hive.root.logger", "DEBUG,console");
    conf.set("hive.server2.authentication.ldap.url", "localhost");
    when(factory.getInstance(any(HiveConf.class), anyString(), anyString())).thenReturn(search);
}
Also used : HiveConf(org.apache.hadoop.hive.conf.HiveConf) Before(org.junit.Before)

Aggregations

HiveConf (org.apache.hadoop.hive.conf.HiveConf)404 BeforeClass (org.junit.BeforeClass)73 Test (org.junit.Test)66 Path (org.apache.hadoop.fs.Path)54 Before (org.junit.Before)50 Driver (org.apache.hadoop.hive.ql.Driver)46 CliSessionState (org.apache.hadoop.hive.cli.CliSessionState)44 IOException (java.io.IOException)39 ArrayList (java.util.ArrayList)37 File (java.io.File)31 HashMap (java.util.HashMap)26 FileSystem (org.apache.hadoop.fs.FileSystem)26 SessionState (org.apache.hadoop.hive.ql.session.SessionState)22 LinkedHashMap (java.util.LinkedHashMap)17 List (java.util.List)16 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)15 MiniHS2 (org.apache.hive.jdbc.miniHS2.MiniHS2)14 Map (java.util.Map)12 HiveMetaStoreClient (org.apache.hadoop.hive.metastore.HiveMetaStoreClient)12 MetaException (org.apache.hadoop.hive.metastore.api.MetaException)12