use of org.apache.hadoop.hive.ql.TaskQueue in project hive by apache.
the class TestMapRedTask method mrTaskSumbitViaChildWithImpersonation.
@Test
public void mrTaskSumbitViaChildWithImpersonation() throws IOException, LoginException {
Utils.getUGI().setAuthenticationMethod(PROXY);
Context ctx = Mockito.mock(Context.class);
when(ctx.getLocalTmpPath()).thenReturn(new Path(System.getProperty("java.io.tmpdir")));
TaskQueue taskQueue = new TaskQueue(ctx);
QueryState queryState = new QueryState.Builder().build();
HiveConf conf = queryState.getConf();
conf.setBoolVar(HiveConf.ConfVars.SUBMITVIACHILD, true);
MapredWork mrWork = new MapredWork();
mrWork.setMapWork(Mockito.mock(MapWork.class));
MapRedTask mrTask = Mockito.spy(new MapRedTask());
mrTask.setWork(mrWork);
mrTask.initialize(queryState, null, taskQueue, ctx);
mrTask.jobExecHelper = Mockito.mock(HadoopJobExecHelper.class);
when(mrTask.jobExecHelper.progressLocal(Mockito.any(Process.class), Mockito.anyString())).thenReturn(0);
mrTask.execute();
ArgumentCaptor<String[]> captor = ArgumentCaptor.forClass(String[].class);
verify(mrTask).spawn(Mockito.anyString(), Mockito.anyString(), captor.capture());
String expected = "HADOOP_PROXY_USER=" + Utils.getUGI().getUserName();
Assert.assertTrue(Arrays.asList(captor.getValue()).contains(expected));
}
use of org.apache.hadoop.hive.ql.TaskQueue in project hive by apache.
the class ExecDriver method main.
@SuppressWarnings("unchecked")
public static void main(String[] args) throws IOException, HiveException {
String planFileName = null;
String jobConfFileName = null;
boolean noLog = false;
String files = null;
String libjars = null;
boolean localtask = false;
try {
for (int i = 0; i < args.length; i++) {
if (args[i].equals("-plan")) {
planFileName = args[++i];
} else if (args[i].equals("-jobconffile")) {
jobConfFileName = args[++i];
} else if (args[i].equals("-nolog")) {
noLog = true;
} else if (args[i].equals("-files")) {
files = args[++i];
} else if (args[i].equals("-libjars")) {
libjars = args[++i];
} else if (args[i].equals("-localtask")) {
localtask = true;
}
}
} catch (IndexOutOfBoundsException e) {
System.err.println("Missing argument to option");
printUsage();
}
JobConf conf;
if (localtask) {
conf = new JobConf(MapredLocalTask.class);
} else {
conf = new JobConf(ExecDriver.class);
}
if (jobConfFileName != null) {
conf.addResource(new Path(jobConfFileName));
}
// Initialize the resources from command line
if (files != null) {
conf.set("tmpfiles", files);
}
if (libjars != null) {
conf.set("tmpjars", libjars);
}
if (UserGroupInformation.isSecurityEnabled()) {
String hadoopAuthToken = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
if (hadoopAuthToken != null) {
conf.set("mapreduce.job.credentials.binary", hadoopAuthToken);
}
}
boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT);
String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID, "").trim();
if (queryId.isEmpty()) {
queryId = "unknown-" + System.currentTimeMillis();
HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, queryId);
}
System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId);
LogUtils.registerLoggingContext(conf);
if (noLog) {
// If started from main(), and noLog is on, we should not output
// any logs. To turn the log on, please set -Dtest.silent=false
org.apache.logging.log4j.Logger logger = org.apache.logging.log4j.LogManager.getRootLogger();
NullAppender appender = NullAppender.createNullAppender();
appender.addToLogger(logger.getName(), Level.ERROR);
appender.start();
} else {
setupChildLog4j(conf);
}
Logger LOG = LoggerFactory.getLogger(ExecDriver.class.getName());
LogHelper console = new LogHelper(LOG, isSilent);
if (planFileName == null) {
console.printError("Must specify Plan File Name");
printUsage();
}
// that it's easy to find reason for local mode execution failures
for (Appender appender : ((org.apache.logging.log4j.core.Logger) LogManager.getRootLogger()).getAppenders().values()) {
if (appender instanceof FileAppender) {
console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName());
} else if (appender instanceof RollingFileAppender) {
console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName());
}
}
// the plan file should always be in local directory
Path p = new Path(planFileName);
FileSystem fs = FileSystem.getLocal(conf);
InputStream pathData = fs.open(p);
// child process. so we add it here explicitly
try {
// see also - code in CliDriver.java
ClassLoader loader = conf.getClassLoader();
if (StringUtils.isNotBlank(libjars)) {
AddToClassPathAction addAction = new AddToClassPathAction(loader, Arrays.asList(StringUtils.split(libjars, ",")));
loader = AccessController.doPrivileged(addAction);
}
conf.setClassLoader(loader);
// Also set this to the Thread ContextClassLoader, so new threads will
// inherit
// this class loader, and propagate into newly created Configurations by
// those
// new threads.
Thread.currentThread().setContextClassLoader(loader);
} catch (Exception e) {
throw new HiveException(e.getMessage(), e);
}
int ret;
if (localtask) {
memoryMXBean = ManagementFactory.getMemoryMXBean();
MapredLocalWork plan = SerializationUtilities.deserializePlan(pathData, MapredLocalWork.class);
MapredLocalTask ed = new MapredLocalTask(plan, conf, isSilent);
ed.initialize(null, null, new TaskQueue(), null);
ret = ed.executeInProcess();
} else {
MapredWork plan = SerializationUtilities.deserializePlan(pathData, MapredWork.class);
ExecDriver ed = new ExecDriver(plan, conf, isSilent);
ed.setTaskQueue(new TaskQueue());
ret = ed.execute();
}
if (ret != 0) {
System.exit(ret);
}
}
use of org.apache.hadoop.hive.ql.TaskQueue in project hive by apache.
the class TestSparkTask method testHandleInterruptedException.
@Test
public void testHandleInterruptedException() throws Exception {
HiveConf hiveConf = new HiveConf();
SparkTask sparkTask = new SparkTask();
sparkTask.setWork(mock(SparkWork.class));
TaskQueue mockTestQueue = mock(TaskQueue.class);
QueryState mockQueryState = mock(QueryState.class);
when(mockQueryState.getConf()).thenReturn(hiveConf);
sparkTask.initialize(mockQueryState, null, mockTestQueue, null);
SparkJobStatus mockSparkJobStatus = mock(SparkJobStatus.class);
when(mockSparkJobStatus.getMonitorError()).thenReturn(new InterruptedException());
SparkSession mockSparkSession = mock(SparkSession.class);
SparkJobRef mockSparkJobRef = mock(SparkJobRef.class);
when(mockSparkJobRef.monitorJob()).thenReturn(2);
when(mockSparkJobRef.getSparkJobStatus()).thenReturn(mockSparkJobStatus);
when(mockSparkSession.submit(any(), any(), any())).thenReturn(mockSparkJobRef);
SessionState.start(hiveConf);
SessionState.get().setSparkSession(mockSparkSession);
sparkTask.execute();
verify(mockSparkJobRef, atLeastOnce()).cancelJob();
when(mockSparkJobStatus.getMonitorError()).thenReturn(new HiveException(new InterruptedException()));
sparkTask.execute();
verify(mockSparkJobRef, atLeastOnce()).cancelJob();
}
use of org.apache.hadoop.hive.ql.TaskQueue in project hive by apache.
the class TestExecDriver method executePlan.
private void executePlan() throws Exception {
String testName = new Exception().getStackTrace()[1].getMethodName();
MapRedTask mrtask = new MapRedTask();
TaskQueue taskQueue = new TaskQueue();
mrtask.setWork(mr);
mrtask.initialize(queryState, null, taskQueue, null);
int exitVal = mrtask.execute();
if (exitVal != 0) {
LOG.error(testName + " execution failed with exit status: " + exitVal);
assertEquals(true, false);
}
LOG.info(testName + " execution completed successfully");
}
use of org.apache.hadoop.hive.ql.TaskQueue in project hive by apache.
the class TestMacroSemanticAnalyzer method analyze.
private void analyze(ASTNode ast) throws Exception {
BaseSemanticAnalyzer analyzer = DDLSemanticAnalyzerFactory.getAnalyzer(ast, queryState);
analyzer.analyze(ast, context);
List<Task<?>> rootTasks = analyzer.getRootTasks();
Assert.assertEquals(1, rootTasks.size());
for (Task<?> task : rootTasks) {
task.initialize(null, null, new TaskQueue(context), context);
task.setConf(conf);
Assert.assertEquals(0, task.executeTask(null));
}
}
Aggregations