use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class TezTask method execute.
@Override
public int execute(DriverContext driverContext) {
int rc = 1;
boolean cleanContext = false;
Context ctx = null;
TezSessionState session = null;
try {
// Get or create Context object. If we create it we have to clean it later as well.
ctx = driverContext.getCtx();
if (ctx == null) {
ctx = new Context(conf);
cleanContext = true;
}
// Need to remove this static hack. But this is the way currently to get a session.
SessionState ss = SessionState.get();
session = ss.getTezSession();
if (session != null && !session.isOpen()) {
LOG.warn("The session: " + session + " has not been opened");
}
session = TezSessionPoolManager.getInstance().getSession(session, conf, false, getWork().getLlapMode());
ss.setTezSession(session);
try {
// jobConf will hold all the configuration for hadoop, tez, and hive
JobConf jobConf = utils.createConfiguration(conf);
// Get all user jars from work (e.g. input format stuff).
String[] inputOutputJars = work.configureJobConfAndExtractJars(jobConf);
// we will localize all the files (jars, plans, hashtables) to the
// scratch dir. let's create this and tmp first.
Path scratchDir = ctx.getMRScratchDir();
// create the tez tmp dir
scratchDir = utils.createTezDir(scratchDir, conf);
Map<String, LocalResource> inputOutputLocalResources = getExtraLocalResources(jobConf, scratchDir, inputOutputJars);
// Ensure the session is open and has the necessary local resources
updateSession(session, jobConf, scratchDir, inputOutputJars, inputOutputLocalResources);
List<LocalResource> additionalLr = session.getLocalizedResources();
logResources(additionalLr);
// unless already installed on all the cluster nodes, we'll have to
// localize hive-exec.jar as well.
LocalResource appJarLr = session.getAppJarLr();
// next we translate the TezWork to a Tez DAG
DAG dag = build(jobConf, work, scratchDir, appJarLr, additionalLr, ctx);
CallerContext callerContext = CallerContext.create("HIVE", queryPlan.getQueryId(), "HIVE_QUERY_ID", queryPlan.getQueryStr());
dag.setCallerContext(callerContext);
// Add the extra resources to the dag
addExtraResourcesToDag(session, dag, inputOutputJars, inputOutputLocalResources);
// submit will send the job to the cluster and start executing
dagClient = submit(jobConf, dag, scratchDir, appJarLr, session, additionalLr, inputOutputJars, inputOutputLocalResources);
// finally monitor will print progress until the job is done
TezJobMonitor monitor = new TezJobMonitor(work.getWorkMap(), dagClient, conf, dag, ctx);
rc = monitor.monitorExecution();
if (rc != 0) {
this.setException(new HiveException(monitor.getDiagnostics()));
}
// fetch the counters
try {
Set<StatusGetOpts> statusGetOpts = EnumSet.of(StatusGetOpts.GET_COUNTERS);
counters = dagClient.getDAGStatus(statusGetOpts).getDAGCounters();
} catch (Exception err) {
// Don't fail execution due to counters - just don't print summary info
LOG.warn("Failed to get counters. Ignoring, summary info will be incomplete. " + err, err);
counters = null;
}
} finally {
// We return this to the pool even if it's unusable; reopen is supposed to handle this.
try {
TezSessionPoolManager.getInstance().returnSession(session, getWork().getLlapMode());
} catch (Exception e) {
LOG.error("Failed to return session: {} to pool", session, e);
throw e;
}
}
if (LOG.isInfoEnabled() && counters != null && (HiveConf.getBoolVar(conf, HiveConf.ConfVars.TEZ_EXEC_SUMMARY) || Utilities.isPerfOrAboveLogging(conf))) {
for (CounterGroup group : counters) {
LOG.info(group.getDisplayName() + ":");
for (TezCounter counter : group) {
LOG.info(" " + counter.getDisplayName() + ": " + counter.getValue());
}
}
}
} catch (Exception e) {
LOG.error("Failed to execute tez graph.", e);
// rc will be 1 at this point indicating failure.
} finally {
Utilities.clearWork(conf);
// Clear gWorkMap
for (BaseWork w : work.getAllWork()) {
JobConf workCfg = workToConf.get(w);
if (workCfg != null) {
Utilities.clearWorkMapForConf(workCfg);
}
}
if (cleanContext) {
try {
ctx.clear();
} catch (Exception e) {
/*best effort*/
LOG.warn("Failed to clean up after tez job", e);
}
}
// need to either move tmp files or remove them
if (dagClient != null) {
// rc will only be overwritten if close errors out
rc = close(work, rc);
}
}
return rc;
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class GenMRUnion1 method processSubQueryUnionCreateIntermediate.
/**
* Process the union when the parent is a map-reduce job. Create a temporary
* output, and let the union task read from the temporary output.
*
* The files created for all the inputs are in the union context and later
* used to initialize the union plan
*
* @param parent
* @param child
* @param uTask
* @param ctx
* @param uCtxTask
*/
private void processSubQueryUnionCreateIntermediate(Operator<? extends OperatorDesc> parent, Operator<? extends OperatorDesc> child, Task<? extends Serializable> uTask, GenMRProcContext ctx, GenMRUnionCtx uCtxTask) {
ParseContext parseCtx = ctx.getParseCtx();
TableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils.getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol"));
// generate the temporary file
Context baseCtx = parseCtx.getContext();
Path taskTmpDir = baseCtx.getMRTmpPath();
// Create the temporary file, its corresponding FileSinkOperaotr, and
// its corresponding TableScanOperator.
TableScanOperator tableScanOp = GenMapRedUtils.createTemporaryFile(parent, child, taskTmpDir, tt_desc, parseCtx);
// Add the path to alias mapping
uCtxTask.addTaskTmpDir(taskTmpDir.toUri().toString());
uCtxTask.addTTDesc(tt_desc);
uCtxTask.addListTopOperators(tableScanOp);
// The union task is empty. The files created for all the inputs are
// assembled in the union context and later used to initialize the union
// plan
Task<? extends Serializable> currTask = ctx.getCurrTask();
currTask.addDependentTask(uTask);
if (ctx.getRootTasks().contains(uTask)) {
ctx.getRootTasks().remove(uTask);
if (!ctx.getRootTasks().contains(currTask) && shouldBeRootTask(currTask)) {
ctx.getRootTasks().add(currTask);
}
}
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class PartialScanTask method execute.
@Override
public /**
* start a new map-reduce job to do partial scan to calculate Stats,
* almost the same as BlockMergeTask or ExecDriver.
*/
int execute(DriverContext driverContext) {
HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, CombineHiveInputFormat.class.getName());
success = true;
HiveFileFormatUtils.prepareJobOutput(job);
job.setOutputFormat(HiveOutputFormatImpl.class);
job.setMapperClass(work.getMapperClass());
Context ctx = driverContext.getCtx();
boolean ctxCreated = false;
try {
if (ctx == null) {
ctx = new Context(job);
ctxCreated = true;
}
} catch (IOException e) {
e.printStackTrace();
console.printError("Error launching map-reduce job", "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
return 5;
}
job.setMapOutputKeyClass(NullWritable.class);
job.setMapOutputValueClass(NullWritable.class);
if (work.getNumMapTasks() != null) {
job.setNumMapTasks(work.getNumMapTasks());
}
// zero reducers
job.setNumReduceTasks(0);
if (work.getMinSplitSize() != null) {
HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize().longValue());
}
if (work.getInputformat() != null) {
HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work.getInputformat());
}
String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT);
LOG.info("Using " + inpFormat);
try {
job.setInputFormat(JavaUtils.loadClass(inpFormat));
} catch (ClassNotFoundException e) {
throw new RuntimeException(e.getMessage(), e);
}
job.setOutputKeyClass(NullWritable.class);
job.setOutputValueClass(NullWritable.class);
int returnVal = 0;
RunningJob rj = null;
boolean noName = StringUtils.isEmpty(job.get(MRJobConfig.JOB_NAME));
String jobName = null;
if (noName && this.getQueryPlan() != null) {
int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH);
jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6);
}
if (noName) {
// This is for a special case to ensure unit tests pass
job.set(MRJobConfig.JOB_NAME, jobName != null ? jobName : "JOB" + Utilities.randGen.nextInt());
}
// pass aggregation key to mapper
HiveConf.setVar(job, HiveConf.ConfVars.HIVE_STATS_KEY_PREFIX, work.getAggKey());
job.set(StatsSetupConst.STATS_TMP_LOC, work.getStatsTmpDir());
try {
addInputPaths(job, work);
MapredWork mrWork = new MapredWork();
mrWork.setMapWork(work);
Utilities.setMapRedWork(job, mrWork, ctx.getMRTmpPath());
// remove the pwd from conf file so that job tracker doesn't show this
// logs
String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD);
if (pwd != null) {
HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE");
}
JobClient jc = new JobClient(job);
String addedJars = Utilities.getResourceFiles(job, SessionState.ResourceType.JAR);
if (!addedJars.isEmpty()) {
job.set("tmpjars", addedJars);
}
// make this client wait if job trcker is not behaving well.
Throttle.checkJobTracker(job, LOG);
if (work.isGatheringStats()) {
// initialize stats publishing table
StatsPublisher statsPublisher;
StatsFactory factory = StatsFactory.newFactory(job);
if (factory != null) {
statsPublisher = factory.getStatsPublisher();
StatsCollectionContext sc = new StatsCollectionContext(job);
sc.setStatsTmpDir(work.getStatsTmpDir());
if (!statsPublisher.init(sc)) {
// creating stats table if not exists
if (HiveConf.getBoolVar(job, HiveConf.ConfVars.HIVE_STATS_RELIABLE)) {
throw new HiveException(ErrorMsg.STATSPUBLISHER_INITIALIZATION_ERROR.getErrorCodedMsg());
}
}
}
}
// Finally SUBMIT the JOB!
rj = jc.submitJob(job);
this.jobID = rj.getJobID();
returnVal = jobExecHelper.progress(rj, jc, ctx);
success = (returnVal == 0);
} catch (Exception e) {
e.printStackTrace();
setException(e);
String mesg = " with exception '" + Utilities.getNameMessage(e) + "'";
if (rj != null) {
mesg = "Ended Job = " + rj.getJobID() + mesg;
} else {
mesg = "Job Submission failed" + mesg;
}
// Has to use full name to make sure it does not conflict with
// org.apache.commons.lang.StringUtils
console.printError(mesg, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
success = false;
returnVal = 1;
} finally {
try {
if (ctxCreated) {
ctx.clear();
}
if (rj != null) {
if (returnVal != 0) {
rj.killJob();
}
}
} catch (Exception e) {
LOG.warn("Failed in cleaning up ", e);
} finally {
HadoopJobExecHelper.runningJobs.remove(rj);
}
}
return (returnVal);
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class ColumnStatsAutoGatherContext method genSelOpForAnalyze.
@SuppressWarnings("rawtypes")
private Operator genSelOpForAnalyze(String analyzeCommand, Context origCtx) throws IOException, ParseException, SemanticException {
//0. initialization
Context ctx = new Context(conf);
ctx.setExplainConfig(origCtx.getExplainConfig());
ASTNode tree = ParseUtils.parse(analyzeCommand, ctx);
//1. get the ColumnStatsSemanticAnalyzer
BaseSemanticAnalyzer baseSem = SemanticAnalyzerFactory.get(new QueryState(conf), tree);
ColumnStatsSemanticAnalyzer colSem = (ColumnStatsSemanticAnalyzer) baseSem;
//2. get the rewritten AST
ASTNode ast = colSem.rewriteAST(tree, this);
baseSem = SemanticAnalyzerFactory.get(new QueryState(conf), ast);
SemanticAnalyzer sem = (SemanticAnalyzer) baseSem;
QB qb = new QB(null, null, false);
ASTNode child = ast;
ParseContext subPCtx = ((SemanticAnalyzer) sem).getParseContext();
subPCtx.setContext(ctx);
((SemanticAnalyzer) sem).initParseCtx(subPCtx);
sem.doPhase1(child, qb, sem.initPhase1Ctx(), null);
// This will trigger new calls to metastore to collect metadata
// TODO: cache the information from the metastore
sem.getMetaData(qb);
Operator<?> operator = sem.genPlan(qb);
//3. populate the load file work so that ColumnStatsTask can work
loadFileWork.addAll(sem.getLoadFileWork());
//4. because there is only one TS for analyze statement, we can get it.
if (sem.topOps.values().size() != 1) {
throw new SemanticException("ColumnStatsAutoGatherContext is expecting exactly one TS, but finds " + sem.topOps.values().size());
}
operator = sem.topOps.values().iterator().next();
//5. get the first SEL after TS
while (!(operator instanceof SelectOperator)) {
operator = operator.getChildOperators().get(0);
}
return operator;
}
use of org.apache.hadoop.hive.ql.Context in project hive by apache.
the class TestHiveAuthorizationTaskFactory method setup.
@Before
public void setup() throws Exception {
queryState = new QueryState(null);
HiveConf conf = queryState.getConf();
conf.setVar(ConfVars.HIVE_AUTHORIZATION_TASK_FACTORY, TestHiveAuthorizationTaskFactory.DummyHiveAuthorizationTaskFactoryImpl.class.getName());
conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory");
db = Mockito.mock(Hive.class);
table = new Table(DB, TABLE);
partition = new Partition(table);
SessionState.start(conf);
context = new Context(conf);
parseDriver = new ParseDriver();
analyzer = new DDLSemanticAnalyzer(queryState, db);
Mockito.when(db.getTable(DB, TABLE, false)).thenReturn(table);
Mockito.when(db.getTable(TABLE_QNAME, false)).thenReturn(table);
Mockito.when(db.getPartition(table, new HashMap<String, String>(), false)).thenReturn(partition);
HadoopDefaultAuthenticator auth = new HadoopDefaultAuthenticator();
auth.setConf(conf);
currentUser = auth.getUserName();
DummyHiveAuthorizationTaskFactoryImpl.reset();
}
Aggregations