use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class QTestUtil method cliInit.
public String cliInit(String tname, boolean recreate) throws Exception {
if (recreate) {
cleanUp(tname);
createSources(tname);
}
HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, "org.apache.hadoop.hive.ql.security.DummyAuthenticator");
Utilities.clearWorkMap(conf);
CliSessionState ss = createSessionState();
assert ss != null;
ss.in = System.in;
String outFileExtension = getOutFileExtension(tname);
String stdoutName = null;
if (outDir != null) {
// TODO: why is this needed?
File qf = new File(outDir, tname);
stdoutName = qf.getName().concat(outFileExtension);
} else {
stdoutName = tname + outFileExtension;
}
File outf = new File(logDir, stdoutName);
OutputStream fo = new BufferedOutputStream(new FileOutputStream(outf));
if (qSortQuerySet.contains(tname)) {
ss.out = new SortPrintStream(fo, "UTF-8");
} else if (qHashQuerySet.contains(tname)) {
ss.out = new DigestPrintStream(fo, "UTF-8");
} else if (qSortNHashQuerySet.contains(tname)) {
ss.out = new SortAndDigestPrintStream(fo, "UTF-8");
} else {
ss.out = new PrintStream(fo, true, "UTF-8");
}
ss.err = new CachingPrintStream(fo, true, "UTF-8");
ss.setIsSilent(true);
SessionState oldSs = SessionState.get();
boolean canReuseSession = !qNoSessionReuseQuerySet.contains(tname);
if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
// Copy the tezSessionState from the old CliSessionState.
tezSessionState = oldSs.getTezSession();
oldSs.setTezSession(null);
ss.setTezSession(tezSessionState);
oldSs.close();
}
if (oldSs != null && clusterType.getCoreClusterType() == CoreClusterType.SPARK) {
sparkSession = oldSs.getSparkSession();
ss.setSparkSession(sparkSession);
oldSs.setSparkSession(null);
oldSs.close();
}
if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
oldSs.out.close();
}
SessionState.start(ss);
cliDriver = new CliDriver();
if (tname.equals("init_file.q")) {
ss.initFiles.add(AbstractCliConfig.HIVE_ROOT + "/data/scripts/test_init_file.sql");
}
cliDriver.processInitFiles(ss);
return outf.getAbsolutePath();
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestMarkPartition method setUp.
@Override
protected void setUp() throws Exception {
super.setUp();
System.setProperty("hive.metastore.event.clean.freq", "2");
System.setProperty("hive.metastore.event.expiry.duration", "5");
hiveConf = new HiveConf(this.getClass());
hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
SessionState.start(new CliSessionState(hiveConf));
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestMetaStoreEndFunctionListener method setUp.
@Override
protected void setUp() throws Exception {
super.setUp();
System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName());
System.setProperty("hive.metastore.pre.event.listeners", DummyPreListener.class.getName());
System.setProperty("hive.metastore.end.function.listeners", DummyEndFunctionListener.class.getName());
int port = MetaStoreUtils.findFreePort();
MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
hiveConf = new HiveConf(this.getClass());
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
SessionState.start(new CliSessionState(hiveConf));
msc = new HiveMetaStoreClient(hiveConf);
driver = new Driver(hiveConf);
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestMetaStoreEventListener method setUp.
@Override
protected void setUp() throws Exception {
super.setUp();
System.setProperty("hive.metastore.event.listeners", DummyListener.class.getName());
System.setProperty("hive.metastore.pre.event.listeners", DummyPreListener.class.getName());
int port = MetaStoreUtils.findFreePort();
MetaStoreUtils.startMetaStore(port, ShimLoader.getHadoopThriftAuthBridge());
hiveConf = new HiveConf(this.getClass());
hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port);
hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
SessionState.start(new CliSessionState(hiveConf));
msc = new HiveMetaStoreClient(hiveConf);
driver = new Driver(hiveConf);
driver.run("drop database if exists " + dbName + " cascade");
DummyListener.notifyList.clear();
DummyPreListener.notifyList.clear();
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestReplicationScenarios method setUpBeforeClass.
// if verifySetup is set to true, all the test setup we do will perform additional
// verifications as well, which is useful to verify that our setup occurred
// correctly when developing and debugging tests. These verifications, however
// do not test any new functionality for replication, and thus, are not relevant
// for testing replication itself. For steady state, we want this to be false.
@BeforeClass
public static void setUpBeforeClass() throws Exception {
hconf = new HiveConf(TestReplicationScenarios.class);
String metastoreUri = System.getProperty("test." + HiveConf.ConfVars.METASTOREURIS.varname);
if (metastoreUri != null) {
hconf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUri);
useExternalMS = true;
return;
}
hconf.setVar(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS, // turn on db notification listener on metastore
DBNOTIF_LISTENER_CLASSNAME);
hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true);
hconf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true);
hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/");
msPort = MetaStoreUtils.startMetaStore(hconf);
hconf.setVar(HiveConf.ConfVars.REPLDIR, TEST_PATH + "/hrepl/");
hconf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + msPort);
hconf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " ");
System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " ");
Path testPath = new Path(TEST_PATH);
FileSystem fs = FileSystem.get(testPath.toUri(), hconf);
fs.mkdirs(testPath);
driver = new Driver(hconf);
SessionState.start(new CliSessionState(hconf));
metaStoreClient = new HiveMetaStoreClient(hconf);
}
Aggregations