use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class HiveServer2 method stop.
@Override
public synchronized void stop() {
LOG.info("Shutting down HiveServer2");
HiveConf hiveConf = this.getHiveConf();
super.stop();
if (webServer != null) {
try {
webServer.stop();
LOG.info("Web UI has stopped");
} catch (Exception e) {
LOG.error("Error stopping Web UI: ", e);
}
}
// Shutdown Metrics
if (MetricsFactory.getInstance() != null) {
try {
MetricsFactory.close();
} catch (Exception e) {
LOG.error("error in Metrics deinit: " + e.getClass().getName() + " " + e.getMessage(), e);
}
}
// Remove this server instance from ZooKeeper if dynamic service discovery is set
if (hiveConf != null && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY)) {
try {
removeServerInstanceFromZooKeeper();
} catch (Exception e) {
LOG.error("Error removing znode for this HiveServer2 instance from ZooKeeper.", e);
}
}
// If not, ignoring is fine while stopping HiveServer2.
if (hiveConf != null && hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS)) {
try {
TezSessionPoolManager.getInstance().stop();
} catch (Exception e) {
LOG.error("Tez session pool manager stop had an error during stop of HiveServer2. " + "Shutting down HiveServer2 anyway.", e);
}
}
if (hiveConf != null && hiveConf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
try {
SparkSessionManagerImpl.getInstance().shutdown();
} catch (Exception ex) {
LOG.error("Spark session pool manager failed to stop during HiveServer2 shutdown.", ex);
}
}
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class HiveServer2 method deleteServerInstancesFromZooKeeper.
/**
* Remove all znodes corresponding to the given version number from ZooKeeper
*
* @param versionNumber
* @throws Exception
*/
static void deleteServerInstancesFromZooKeeper(String versionNumber) throws Exception {
HiveConf hiveConf = new HiveConf();
String zooKeeperEnsemble = ZooKeeperHiveHelper.getQuorumServers(hiveConf);
String rootNamespace = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE);
int baseSleepTime = (int) hiveConf.getTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME, TimeUnit.MILLISECONDS);
int maxRetries = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES);
CuratorFramework zooKeeperClient = CuratorFrameworkFactory.builder().connectString(zooKeeperEnsemble).retryPolicy(new ExponentialBackoffRetry(baseSleepTime, maxRetries)).build();
zooKeeperClient.start();
List<String> znodePaths = zooKeeperClient.getChildren().forPath(ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace);
List<String> znodePathsUpdated;
// Now for each path that is for the given versionNumber, delete the znode from ZooKeeper
for (int i = 0; i < znodePaths.size(); i++) {
String znodePath = znodePaths.get(i);
deleteSignal = new CountDownLatch(1);
if (znodePath.contains("version=" + versionNumber + ";")) {
String fullZnodePath = ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace + ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + znodePath;
LOG.warn("Will attempt to remove the znode: " + fullZnodePath + " from ZooKeeper");
System.out.println("Will attempt to remove the znode: " + fullZnodePath + " from ZooKeeper");
zooKeeperClient.delete().guaranteed().inBackground(new DeleteCallBack()).forPath(fullZnodePath);
// Wait for the delete to complete
deleteSignal.await();
// Get the updated path list
znodePathsUpdated = zooKeeperClient.getChildren().forPath(ZooKeeperHiveHelper.ZOOKEEPER_PATH_SEPARATOR + rootNamespace);
// Gives a list of any new paths that may have been created to maintain the persistent ephemeral node
znodePathsUpdated.removeAll(znodePaths);
// Add the new paths to the znodes list. We'll try for their removal as well.
znodePaths.addAll(znodePathsUpdated);
}
}
zooKeeperClient.close();
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class EmbeddedThriftBinaryCLIService method init.
@Override
public synchronized void init(HiveConf hiveConf) {
// independent of hiveConf object. Create new HiveConf object here in this case.
if (hiveConf == null) {
hiveConf = new HiveConf();
}
cliService.init(hiveConf);
cliService.start();
super.init(hiveConf);
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class TestUserSearchFilter method setup.
@Before
public void setup() {
conf = new HiveConf();
factory = new UserSearchFilterFactory();
}
use of org.apache.hadoop.hive.conf.HiveConf in project hive by apache.
the class TestLdapAuthenticationProviderImpl method setup.
@Before
public void setup() throws AuthenticationException {
conf = new HiveConf();
conf.set("hive.root.logger", "DEBUG,console");
conf.set("hive.server2.authentication.ldap.url", "localhost");
when(factory.getInstance(any(HiveConf.class), anyString(), anyString())).thenReturn(search);
}
Aggregations