use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestMetastoreVersion method testVersionMisMatch.
/**
* Store garbage version in metastore and verify that hive fails when verification is on
* @throws Exception
*/
public void testVersionMisMatch() throws Exception {
System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString(), "false");
hiveConf = new HiveConf(this.getClass());
SessionState.start(new CliSessionState(hiveConf));
driver = DriverFactory.newDriver(hiveConf);
driver.run("show tables");
ObjectStore.setSchemaVerified(false);
System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString(), "true");
hiveConf = new HiveConf(this.getClass());
setVersion(hiveConf, "fooVersion");
SessionState.start(new CliSessionState(hiveConf));
driver = DriverFactory.newDriver(hiveConf);
CommandProcessorResponse proc = driver.run("show tables");
assertTrue(proc.getResponseCode() != 0);
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestMetastoreVersion method testMetastoreVersion.
/**
* Test that with no verification, and record verification enabled, hive populates the schema
* and version correctly
* @throws Exception
*/
public void testMetastoreVersion() throws Exception {
// let the schema and version be auto created
System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString(), "false");
System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION.toString(), "true");
hiveConf = new HiveConf(this.getClass());
SessionState.start(new CliSessionState(hiveConf));
driver = DriverFactory.newDriver(hiveConf);
driver.run("show tables");
// correct version stored by Metastore during startup
assertEquals(metastoreSchemaInfo.getHiveSchemaVersion(), getVersion(hiveConf));
setVersion(hiveConf, "foo");
assertEquals("foo", getVersion(hiveConf));
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestMetastoreVersion method testVersionMatching.
/**
* Test that with verification enabled, hive works when the correct schema is already populated
* @throws Exception
*/
public void testVersionMatching() throws Exception {
System.setProperty(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION.toString(), "false");
hiveConf = new HiveConf(this.getClass());
SessionState.start(new CliSessionState(hiveConf));
driver = DriverFactory.newDriver(hiveConf);
driver.run("show tables");
ObjectStore.setSchemaVerified(false);
hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_SCHEMA_VERIFICATION, true);
setVersion(hiveConf, metastoreSchemaInfo.getHiveSchemaVersion());
driver = DriverFactory.newDriver(hiveConf);
CommandProcessorResponse proc = driver.run("show tables");
assertTrue(proc.getResponseCode() == 0);
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class QTestUtil method cliInit.
public String cliInit(File file, boolean recreate) throws Exception {
String fileName = file.getName();
if (recreate) {
cleanUp(fileName);
createSources(fileName);
}
initDataSetForTest(file);
HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER, "org.apache.hadoop.hive.ql.security.DummyAuthenticator");
Utilities.clearWorkMap(conf);
CliSessionState ss = new CliSessionState(conf);
assert ss != null;
ss.in = System.in;
String outFileExtension = getOutFileExtension(fileName);
String stdoutName = null;
if (outDir != null) {
// TODO: why is this needed?
File qf = new File(outDir, fileName);
stdoutName = qf.getName().concat(outFileExtension);
} else {
stdoutName = fileName + outFileExtension;
}
File outf = new File(logDir, stdoutName);
OutputStream fo = new BufferedOutputStream(new FileOutputStream(outf));
if (qSortQuerySet.contains(fileName)) {
ss.out = new SortPrintStream(fo, "UTF-8");
} else if (qHashQuerySet.contains(fileName)) {
ss.out = new DigestPrintStream(fo, "UTF-8");
} else if (qSortNHashQuerySet.contains(fileName)) {
ss.out = new SortAndDigestPrintStream(fo, "UTF-8");
} else {
ss.out = new PrintStream(fo, true, "UTF-8");
}
ss.err = new CachingPrintStream(fo, true, "UTF-8");
ss.setIsSilent(true);
SessionState oldSs = SessionState.get();
boolean canReuseSession = !qNoSessionReuseQuerySet.contains(fileName);
if (oldSs != null && canReuseSession && clusterType.getCoreClusterType() == CoreClusterType.TEZ) {
// Copy the tezSessionState from the old CliSessionState.
TezSessionState tezSessionState = oldSs.getTezSession();
oldSs.setTezSession(null);
ss.setTezSession(tezSessionState);
oldSs.close();
}
if (oldSs != null && clusterType.getCoreClusterType() == CoreClusterType.SPARK) {
sparkSession = oldSs.getSparkSession();
ss.setSparkSession(sparkSession);
oldSs.setSparkSession(null);
oldSs.close();
}
if (oldSs != null && oldSs.out != null && oldSs.out != System.out) {
oldSs.out.close();
}
if (oldSs != null) {
oldSs.close();
}
SessionState.start(ss);
cliDriver = new CliDriver();
if (fileName.equals("init_file.q")) {
ss.initFiles.add(AbstractCliConfig.HIVE_ROOT + "/data/scripts/test_init_file.sql");
}
cliDriver.processInitFiles(ss);
return outf.getAbsolutePath();
}
use of org.apache.hadoop.hive.cli.CliSessionState in project hive by apache.
the class TestHiveHistory method testSimpleQuery.
/**
* Check history file output for this query.
*/
public void testSimpleQuery() {
new LineageInfo();
try {
// before any of the other core hive classes are loaded
try {
LogUtils.initHiveLog4j();
} catch (LogInitializationException e) {
}
HiveConf hconf = new HiveConf(SessionState.class);
hconf.setBoolVar(ConfVars.HIVE_SESSION_HISTORY_ENABLED, true);
CliSessionState ss = new CliSessionState(hconf);
ss.in = System.in;
try {
ss.out = new PrintStream(System.out, true, "UTF-8");
ss.err = new PrintStream(System.err, true, "UTF-8");
} catch (UnsupportedEncodingException e) {
System.exit(3);
}
SessionState.start(ss);
String cmd = "select a.key+1 from src a";
IDriver d = DriverFactory.newDriver(conf);
int ret = d.run(cmd).getResponseCode();
if (ret != 0) {
fail("Failed");
}
HiveHistoryViewer hv = new HiveHistoryViewer(SessionState.get().getHiveHistory().getHistFileName());
Map<String, QueryInfo> jobInfoMap = hv.getJobInfoMap();
Map<String, TaskInfo> taskInfoMap = hv.getTaskInfoMap();
if (jobInfoMap.size() != 1) {
fail("jobInfo Map size not 1");
}
if (taskInfoMap.size() != 1) {
fail("jobInfo Map size not 1");
}
cmd = (String) jobInfoMap.keySet().toArray()[0];
QueryInfo ji = jobInfoMap.get(cmd);
if (!ji.hm.get(Keys.QUERY_NUM_TASKS.name()).equals("1")) {
fail("Wrong number of tasks");
}
} catch (Exception e) {
e.printStackTrace();
fail("Failed");
}
}
Aggregations