use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class TestUpdateDeleteSemanticAnalyzer method parseAndAnalyze.
private ReturnInfo parseAndAnalyze(String query, String testName) throws IOException, ParseException, HiveException {
SessionState.start(conf);
Context ctx = new Context(conf);
ctx.setCmd(query);
ctx.setHDFSCleanup(true);
ASTNode tree = ParseUtils.parse(query, ctx);
BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, tree);
SessionState.get().initTxnMgr(conf);
SessionState.get().getTxnMgr().openTxn(ctx, conf.getUser());
db = sem.getDb();
// I have to create the tables here (rather than in setup()) because I need the Hive
// connection, which is conveniently created by the semantic analyzer.
Map<String, String> params = new HashMap<String, String>(1);
params.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
db.createTable("T", Arrays.asList("a", "b"), null, OrcInputFormat.class, OrcOutputFormat.class, 2, Arrays.asList("a"), params);
db.createTable("U", Arrays.asList("a", "b"), Arrays.asList("ds"), OrcInputFormat.class, OrcOutputFormat.class, 2, Arrays.asList("a"), params);
Table u = db.getTable("U");
Map<String, String> partVals = new HashMap<String, String>(2);
partVals.put("ds", "yesterday");
db.createPartition(u, partVals);
partVals.clear();
partVals.put("ds", "today");
db.createPartition(u, partVals);
sem.analyze(tree, ctx);
// validate the plan
sem.validate();
QueryPlan plan = new QueryPlan(query, sem, 0L, testName, null, null);
return new ReturnInfo(sem, plan);
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class AuthorizationTestUtil method analyze.
/**
* Create DDLWork from given ast
* @param ast
* @param conf
* @param db
* @return
* @throws Exception
*/
public static DDLWork analyze(ASTNode ast, QueryState queryState, Hive db) throws Exception {
DDLSemanticAnalyzer analyzer = new DDLSemanticAnalyzer(queryState, db);
SessionState.start(queryState.getConf());
analyzer.analyze(ast, new Context(queryState.getConf()));
List<Task<? extends Serializable>> rootTasks = analyzer.getRootTasks();
return (DDLWork) inList(rootTasks).ofSize(1).get(0).getWork();
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class PartialScanTask method execute.
@Override
public /**
* start a new map-reduce job to do partial scan to calculate Stats,
* almost the same as BlockMergeTask or ExecDriver.
*/
int execute(DriverContext driverContext) {
HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, CombineHiveInputFormat.class.getName());
success = true;
HiveFileFormatUtils.prepareJobOutput(job);
job.setOutputFormat(HiveOutputFormatImpl.class);
job.setMapperClass(work.getMapperClass());
Context ctx = driverContext.getCtx();
boolean ctxCreated = false;
try {
if (ctx == null) {
ctx = new Context(job);
ctxCreated = true;
}
} catch (IOException e) {
e.printStackTrace();
console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
return 5;
}
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(NullWritable.class);
if (work.getNumMapTasks() != null) {
job.setNumMapTasks(work.getNumMapTasks());
}
// zero reducers
job.setNumReduceTasks(0);
if (work.getMinSplitSize() != null) {
HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize().longValue());
}
if (work.getInputformat() != null) {
HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work.getInputformat());
}
String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
LOG.info("Using " + inpFormat);
try {
job.setInputFormat(JavaUtils.loadClass(inpFormat));
} catch (ClassNotFoundException e) {
throw new RuntimeException(e.getMessage(), e);
}
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
int returnVal = 0;
RunningJob rj = null;
boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
String jobName = null;
if (noName && this.getQueryPlan() != null) {
int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
}
if (noName) {
// This is for a special case to ensure unit tests pass
job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
}
// pass aggregation key to mapper
HiveConf.setVar(job, HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX, work.getAggKey());
job.set(StatsSetupConst.STATS_TMP_LOC, work.getStatsTmpDir());
try {
addInputPaths(job, work);
MapredWork mrWork = new MapredWork();
mrWork.setMapWork(work);
Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
// remove the pwd from conf file so that job tracker doesn't show this
// logs
String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
if (pwd != null) {
HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
}
JobClient jc = new JobClient(job);
String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
if (!addedJars.isEmpty()) {
job.set("tmpjars", addedJars);
}
// make this client wait if job trcker is not behaving well.
Throttle.checkJobTracker(job, LOG);
if (work.isGatheringStats()) {
// initialize stats publishing table
StatsPublisher statsPublisher;
StatsFactory factory = StatsFactory.newFactory(job);
if (factory != null) {
statsPublisher = factory.getStatsPublisher();
StatsCollectionContext sc = new StatsCollectionContext(job);
sc.setStatsTmpDir(work.getStatsTmpDir());
if (!statsPublisher.init(sc)) {
// creating stats table if not exists
if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
}
}
}
}
// Finally SUBMIT the JOB!
rj = jc.submitJob(job);
this.jobID = rj.getJobID();
returnVal = jobExecHelper.progress(rj, jc, ctx);
success = (returnVal == 0);
} catch (Exception e) {
e.printStackTrace();
setException(e);
String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
if (rj != null) {
mesg = "Ended Job = " + rj.getJobID() + mesg;
} else {
mesg = "Job Submission failed" + mesg;
}
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
success = false;
returnVal = 1;
} finally {
try {
if (ctxCreated) {
ctx.clear();
}
if (rj != null) {
if (returnVal != 0) {
rj.killJob();
}
}
} catch (Exception e) {
LOG.warn("Failed in cleaning up ", e);
} finally {
HadoopJobExecHelper.runningJobs.remove(rj);
}
}
return (returnVal);
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class TestReplicationSemanticAnalyzer method testReplLoadAnalyze.
//@Test
public void testReplLoadAnalyze() throws Exception {
ParseDriver pd = new ParseDriver();
ASTNode root;
String replRoot = conf.getVar(HiveConf.ConfVars.REPLDIR);
FileSystem fs = FileSystem.get(conf);
Path dumpRoot = new Path(replRoot, "next");
System.out.println(replRoot);
System.out.println(dumpRoot);
String newDB = "default_bak";
// First create a dump
String query = "repl dump " + defaultDB;
root = (ASTNode) pd.parse(query).getChild(0);
ReplicationSemanticAnalyzer rs = (ReplicationSemanticAnalyzer) SemanticAnalyzerFactory.get(queryState, root);
rs.analyze(root, new Context(conf));
// Then analyze load
query = "repl load from '" + dumpRoot.toString() + "'";
root = (ASTNode) pd.parse(query).getChild(0);
rs = (ReplicationSemanticAnalyzer) SemanticAnalyzerFactory.get(queryState, root);
rs.analyze(root, new Context(conf));
List<Task<? extends Serializable>> roots = rs.getRootTasks();
assertEquals(1, roots.size());
query = "repl load " + newDB + " from '" + dumpRoot.toString() + "'";
root = (ASTNode) pd.parse(query).getChild(0);
rs = (ReplicationSemanticAnalyzer) SemanticAnalyzerFactory.get(queryState, root);
rs.analyze(root, new Context(conf));
roots = rs.getRootTasks();
assertEquals(1, roots.size());
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class TestReplicationSemanticAnalyzer method testReplStatusAnalyze.
@Test
public void testReplStatusAnalyze() throws Exception {
ParseDriver pd = new ParseDriver();
ASTNode root;
// Repl status command
String query = "repl status " + defaultDB;
root = (ASTNode) pd.parse(query).getChild(0);
ReplicationSemanticAnalyzer rs = (ReplicationSemanticAnalyzer) SemanticAnalyzerFactory.get(queryState, root);
rs.analyze(root, new Context(conf));
FetchTask fetchTask = rs.getFetchTask();
assertNotNull(fetchTask);
}
Aggregations