Search in sources :

Example 21 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class TestHiveProtoLoggingHook method testQueueLogs.

@Test
public void testQueueLogs() throws Exception {
    context.setHookType(HookType.PRE_EXEC_HOOK);
    EventLogger evtLogger = new EventLogger(conf, SystemClock.getInstance());
    // This makes it MR task
    context.getQueryPlan().getRootTasks().add(new ExecDriver());
    evtLogger.handle(context);
    // This makes it Tez task
    MapWork mapWork = new MapWork();
    TezWork tezWork = new TezWork("test_queryid");
    tezWork.add(mapWork);
    TezTask task = new TezTask();
    task.setId("id1");
    task.setWork(tezWork);
    context.getQueryPlan().getRootTasks().add(task);
    context.getQueryPlan().getRootTasks().add(new TezTask());
    evtLogger.handle(context);
    // This makes it llap task
    mapWork.setLlapMode(true);
    evtLogger.handle(context);
    evtLogger.shutdown();
    ProtoMessageReader<HiveHookEventProto> reader = getTestReader(conf, tmpFolder);
    HiveHookEventProto event = reader.readEvent();
    Assert.assertNotNull(event);
    Assert.assertEquals(ExecutionMode.MR.name(), event.getExecutionMode());
    Assert.assertEquals(event.getQueue(), "mr_queue");
    event = reader.readEvent();
    Assert.assertNotNull(event);
    Assert.assertEquals(ExecutionMode.TEZ.name(), event.getExecutionMode());
    Assert.assertEquals(event.getQueue(), "tez_queue");
    event = reader.readEvent();
    Assert.assertNotNull(event);
    Assert.assertEquals(ExecutionMode.LLAP.name(), event.getExecutionMode());
    Assert.assertEquals(event.getQueue(), "llap_queue");
}
Also used : MapWork(org.apache.hadoop.hive.ql.plan.MapWork) EventLogger(org.apache.hadoop.hive.ql.hooks.HiveProtoLoggingHook.EventLogger) ExecDriver(org.apache.hadoop.hive.ql.exec.mr.ExecDriver) HiveHookEventProto(org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) TezWork(org.apache.hadoop.hive.ql.plan.TezWork) Test(org.junit.Test)

Example 22 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class GenericUDTFGetSplits method createPlanFragment.

public PlanFragment createPlanFragment(String query, int num) throws HiveException {
    HiveConf conf = new HiveConf(SessionState.get().getConf());
    HiveConf.setVar(conf, ConfVars.HIVEFETCHTASKCONVERSION, "none");
    HiveConf.setVar(conf, ConfVars.HIVEQUERYRESULTFILEFORMAT, PlanUtils.LLAP_OUTPUT_FORMAT_KEY);
    String originalMode = HiveConf.getVar(conf, ConfVars.HIVE_EXECUTION_MODE);
    HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, "llap");
    HiveConf.setBoolVar(conf, ConfVars.HIVE_TEZ_GENERATE_CONSISTENT_SPLITS, true);
    HiveConf.setBoolVar(conf, ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS, true);
    conf.setBoolean(TezSplitGrouper.TEZ_GROUPING_NODE_LOCAL_ONLY, true);
    // Tez/LLAP requires RPC query plan
    HiveConf.setBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN, true);
    try {
        jc = DagUtils.getInstance().createConfiguration(conf);
    } catch (IOException e) {
        throw new HiveException(e);
    }
    Driver driver = new Driver(conf);
    try {
        CommandProcessorResponse cpr = driver.compileAndRespond(query);
        if (cpr.getResponseCode() != 0) {
            throw new HiveException("Failed to compile query: " + cpr.getException());
        }
        QueryPlan plan = driver.getPlan();
        List<Task<?>> roots = plan.getRootTasks();
        Schema schema = convertSchema(plan.getResultSchema());
        if (roots == null || roots.size() != 1 || !(roots.get(0) instanceof TezTask)) {
            throw new HiveException("Was expecting a single TezTask.");
        }
        TezWork tezWork = ((TezTask) roots.get(0)).getWork();
        if (tezWork.getAllWork().size() != 1) {
            String tableName = "table_" + UUID.randomUUID().toString().replaceAll("[^A-Za-z0-9 ]", "");
            String ctas = "create temporary table " + tableName + " as " + query;
            LOG.info("Materializing the query for LLAPIF; CTAS: " + ctas);
            try {
                driver.resetQueryState();
                HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, originalMode);
                cpr = driver.run(ctas, false);
            } catch (CommandNeedRetryException e) {
                throw new HiveException(e);
            }
            if (cpr.getResponseCode() != 0) {
                throw new HiveException("Failed to create temp table: " + cpr.getException());
            }
            HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, "llap");
            query = "select * from " + tableName;
            cpr = driver.compileAndRespond(query);
            if (cpr.getResponseCode() != 0) {
                throw new HiveException("Failed to create temp table: " + cpr.getException());
            }
            plan = driver.getPlan();
            roots = plan.getRootTasks();
            schema = convertSchema(plan.getResultSchema());
            if (roots == null || roots.size() != 1 || !(roots.get(0) instanceof TezTask)) {
                throw new HiveException("Was expecting a single TezTask.");
            }
            tezWork = ((TezTask) roots.get(0)).getWork();
        }
        return new PlanFragment(tezWork, schema, jc);
    } finally {
        driver.close();
        driver.destroy();
    }
}
Also used : TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) Task(org.apache.hadoop.hive.ql.exec.Task) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Schema(org.apache.hadoop.hive.llap.Schema) Driver(org.apache.hadoop.hive.ql.Driver) IOException(java.io.IOException) QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) CommandNeedRetryException(org.apache.hadoop.hive.ql.CommandNeedRetryException) HiveConf(org.apache.hadoop.hive.conf.HiveConf) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 23 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class DDLTask method mergeFiles.

/**
 * First, make sure the source table/partition is not
 * archived/indexes/non-rcfile. If either of these is true, throw an
 * exception.
 *
 * The way how it does the merge is to create a BlockMergeTask from the
 * mergeFilesDesc.
 *
 * @param db
 * @param mergeFilesDesc
 * @return
 * @throws HiveException
 */
private int mergeFiles(Hive db, AlterTablePartMergeFilesDesc mergeFilesDesc, DriverContext driverContext) throws HiveException {
    ListBucketingCtx lbCtx = mergeFilesDesc.getLbCtx();
    boolean lbatc = lbCtx == null ? false : lbCtx.isSkewedStoredAsDir();
    int lbd = lbCtx == null ? 0 : lbCtx.calculateListBucketingLevel();
    // merge work only needs input and output.
    MergeFileWork mergeWork = new MergeFileWork(mergeFilesDesc.getInputDir(), mergeFilesDesc.getOutputDir(), mergeFilesDesc.getInputFormatClass().getName(), mergeFilesDesc.getTableDesc());
    LinkedHashMap<Path, ArrayList<String>> pathToAliases = new LinkedHashMap<>();
    ArrayList<String> inputDirstr = new ArrayList<String>(1);
    inputDirstr.add(mergeFilesDesc.getInputDir().toString());
    pathToAliases.put(mergeFilesDesc.getInputDir().get(0), inputDirstr);
    mergeWork.setPathToAliases(pathToAliases);
    mergeWork.setListBucketingCtx(mergeFilesDesc.getLbCtx());
    mergeWork.resolveConcatenateMerge(db.getConf());
    mergeWork.setMapperCannotSpanPartns(true);
    mergeWork.setSourceTableInputFormat(mergeFilesDesc.getInputFormatClass().getName());
    final FileMergeDesc fmd;
    if (mergeFilesDesc.getInputFormatClass().equals(RCFileInputFormat.class)) {
        fmd = new RCFileMergeDesc();
    } else {
        // safe to assume else is ORC as semantic analyzer will check for RC/ORC
        fmd = new OrcFileMergeDesc();
    }
    fmd.setDpCtx(null);
    fmd.setHasDynamicPartitions(false);
    fmd.setListBucketingAlterTableConcatenate(lbatc);
    fmd.setListBucketingDepth(lbd);
    fmd.setOutputPath(mergeFilesDesc.getOutputDir());
    CompilationOpContext opContext = driverContext.getCtx().getOpContext();
    Operator<? extends OperatorDesc> mergeOp = OperatorFactory.get(opContext, fmd);
    LinkedHashMap<String, Operator<? extends OperatorDesc>> aliasToWork = new LinkedHashMap<String, Operator<? extends OperatorDesc>>();
    aliasToWork.put(mergeFilesDesc.getInputDir().toString(), mergeOp);
    mergeWork.setAliasToWork(aliasToWork);
    DriverContext driverCxt = new DriverContext();
    Task<?> task;
    if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) {
        TezWork tezWork = new TezWork(queryState.getQueryId(), conf);
        mergeWork.setName("File Merge");
        tezWork.add(mergeWork);
        task = new TezTask();
        ((TezTask) task).setWork(tezWork);
    } else {
        task = new MergeFileTask();
        ((MergeFileTask) task).setWork(mergeWork);
    }
    // initialize the task and execute
    task.initialize(queryState, getQueryPlan(), driverCxt, opContext);
    subtask = task;
    int ret = task.execute(driverCxt);
    if (subtask.getException() != null) {
        setException(subtask.getException());
    }
    return ret;
}
Also used : Path(org.apache.hadoop.fs.Path) DriverContext(org.apache.hadoop.hive.ql.DriverContext) MergeFileWork(org.apache.hadoop.hive.ql.io.merge.MergeFileWork) RCFileMergeDesc(org.apache.hadoop.hive.ql.plan.RCFileMergeDesc) OrcFileMergeDesc(org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc) FileMergeDesc(org.apache.hadoop.hive.ql.plan.FileMergeDesc) RCFileMergeDesc(org.apache.hadoop.hive.ql.plan.RCFileMergeDesc) OrcFileMergeDesc(org.apache.hadoop.hive.ql.plan.OrcFileMergeDesc) ArrayList(java.util.ArrayList) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) SQLUniqueConstraint(org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint) CheckConstraint(org.apache.hadoop.hive.ql.metadata.CheckConstraint) NotNullConstraint(org.apache.hadoop.hive.ql.metadata.NotNullConstraint) SQLCheckConstraint(org.apache.hadoop.hive.metastore.api.SQLCheckConstraint) SQLDefaultConstraint(org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint) DefaultConstraint(org.apache.hadoop.hive.ql.metadata.DefaultConstraint) UniqueConstraint(org.apache.hadoop.hive.ql.metadata.UniqueConstraint) SQLNotNullConstraint(org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint) LinkedHashMap(java.util.LinkedHashMap) CompilationOpContext(org.apache.hadoop.hive.ql.CompilationOpContext) ListBucketingCtx(org.apache.hadoop.hive.ql.plan.ListBucketingCtx) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) TezWork(org.apache.hadoop.hive.ql.plan.TezWork) MergeFileTask(org.apache.hadoop.hive.ql.io.merge.MergeFileTask)

Example 24 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class OperatorHealthCheckerHook method run.

@Override
public void run(HookContext hookContext) throws Exception {
    List<Node> rootOps = Lists.newArrayList();
    List<Task<?>> roots = hookContext.getQueryPlan().getRootTasks();
    for (Task<?> task : roots) {
        Object work = task.getWork();
        if (work instanceof MapredWork) {
            MapredWork mapredWork = (MapredWork) work;
            MapWork mapWork = mapredWork.getMapWork();
            if (mapWork != null) {
                rootOps.addAll(mapWork.getAllRootOperators());
            }
            ReduceWork reduceWork = mapredWork.getReduceWork();
            if (reduceWork != null) {
                rootOps.addAll(reduceWork.getAllRootOperators());
            }
        }
        if (work instanceof TezWork) {
            for (BaseWork bw : ((TezWork) work).getAllWorkUnsorted()) {
                rootOps.addAll(bw.getAllRootOperators());
            }
        }
    }
    walkTree(rootOps);
}
Also used : Task(org.apache.hadoop.hive.ql.exec.Task) MapredWork(org.apache.hadoop.hive.ql.plan.MapredWork) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) Node(org.apache.hadoop.hive.ql.lib.Node) ReduceWork(org.apache.hadoop.hive.ql.plan.ReduceWork) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 25 with TezWork

use of org.apache.hadoop.hive.ql.plan.TezWork in project hive by apache.

the class TezTask method collectCommitInformation.

private void collectCommitInformation(TezWork work) throws IOException, TezException {
    for (BaseWork w : work.getAllWork()) {
        JobConf jobConf = workToConf.get(w);
        Vertex vertex = workToVertex.get(w);
        boolean hasIcebergCommitter = Optional.ofNullable(jobConf).map(JobConf::getOutputCommitter).map(Object::getClass).map(Class::getName).filter(name -> name.endsWith("HiveIcebergNoJobCommitter")).isPresent();
        // we should only consider jobs with Iceberg output committer and a data sink
        if (hasIcebergCommitter && !vertex.getDataSinks().isEmpty()) {
            VertexStatus status = dagClient.getVertexStatus(vertex.getName(), EnumSet.of(StatusGetOpts.GET_COUNTERS));
            String[] jobIdParts = status.getId().split("_");
            // status.getId() returns something like: vertex_1617722404520_0001_1_00
            // this should be transformed to a parsable JobID: job_16177224045200_0001
            int vertexId = Integer.parseInt(jobIdParts[jobIdParts.length - 1]);
            String jobId = String.format(JOB_ID_TEMPLATE, jobIdParts[1], vertexId, jobIdParts[2]);
            List<String> tables = new ArrayList<>();
            Map<String, String> icebergProperties = new HashMap<>();
            for (Map.Entry<String, String> entry : jobConf) {
                if (entry.getKey().startsWith(ICEBERG_SERIALIZED_TABLE_PREFIX)) {
                    // get all target tables this vertex wrote to
                    tables.add(entry.getKey().substring(ICEBERG_SERIALIZED_TABLE_PREFIX.length()));
                } else if (entry.getKey().startsWith(ICEBERG_PROPERTY_PREFIX)) {
                    // find iceberg props in jobConf as they can be needed, but not available, during job commit
                    icebergProperties.put(entry.getKey(), entry.getValue());
                }
            }
            // save information for each target table
            tables.forEach(table -> SessionStateUtil.addCommitInfo(jobConf, table, jobId, status.getProgress().getSucceededTaskCount(), icebergProperties));
        }
    }
}
Also used : Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) MetricsConstant(org.apache.hadoop.hive.common.metrics.common.MetricsConstant) DAGClient(org.apache.tez.dag.api.client.DAGClient) Arrays(java.util.Arrays) TezCounter(org.apache.tez.common.counters.TezCounter) VertexStatus(org.apache.tez.dag.api.client.VertexStatus) LoggerFactory(org.slf4j.LoggerFactory) CallerContext(org.apache.tez.client.CallerContext) FileSinkOperator(org.apache.hadoop.hive.ql.exec.FileSinkOperator) ReduceWork(org.apache.hadoop.hive.ql.plan.ReduceWork) DAGStatus(org.apache.tez.dag.api.client.DAGStatus) JSONObject(org.json.JSONObject) Map(java.util.Map) Configuration(org.apache.hadoop.conf.Configuration) ApplicationReport(org.apache.hadoop.yarn.api.records.ApplicationReport) Path(org.apache.hadoop.fs.Path) Context(org.apache.hadoop.hive.ql.Context) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) MergeJoinWork(org.apache.hadoop.hive.ql.plan.MergeJoinWork) CounterGroup(org.apache.tez.common.counters.CounterGroup) Vertex(org.apache.tez.dag.api.Vertex) EnumSet(java.util.EnumSet) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) EdgeType(org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType) Edge(org.apache.tez.dag.api.Edge) Collection(java.util.Collection) HiveConfUtil(org.apache.hadoop.hive.conf.HiveConfUtil) Set(java.util.Set) TezJobMonitor(org.apache.hadoop.hive.ql.exec.tez.monitoring.TezJobMonitor) DAG(org.apache.tez.dag.api.DAG) SessionNotRunning(org.apache.tez.dag.api.SessionNotRunning) SessionState(org.apache.hadoop.hive.ql.session.SessionState) List(java.util.List) MetastoreConf(org.apache.hadoop.hive.metastore.conf.MetastoreConf) ServerUtils(org.apache.hadoop.hive.common.ServerUtils) MapWork(org.apache.hadoop.hive.ql.plan.MapWork) DAGAccessControls(org.apache.tez.common.security.DAGAccessControls) Optional(java.util.Optional) SessionStateUtil(org.apache.hadoop.hive.ql.session.SessionStateUtil) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) TezConfiguration(org.apache.tez.dag.api.TezConfiguration) StatusGetOpts(org.apache.tez.dag.api.client.StatusGetOpts) HashMap(java.util.HashMap) MappingInput(org.apache.hadoop.hive.ql.exec.tez.UserPoolMapping.MappingInput) StageType(org.apache.hadoop.hive.ql.plan.api.StageType) ArrayList(java.util.ArrayList) Task(org.apache.hadoop.hive.ql.exec.Task) LinkedHashMap(java.util.LinkedHashMap) Utilities(org.apache.hadoop.hive.ql.exec.Utilities) VertexGroup(org.apache.tez.dag.api.VertexGroup) TezWork(org.apache.hadoop.hive.ql.plan.TezWork) StringUtils(org.apache.hadoop.util.StringUtils) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) LinkedList(java.util.LinkedList) Nullable(javax.annotation.Nullable) Private(org.apache.hadoop.classification.InterfaceAudience.Private) Ref(org.apache.hive.common.util.Ref) Logger(org.slf4j.Logger) UnionWork(org.apache.hadoop.hive.ql.plan.UnionWork) HiveConf(org.apache.hadoop.hive.conf.HiveConf) IOException(java.io.IOException) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) GroupInputEdge(org.apache.tez.dag.api.GroupInputEdge) TezException(org.apache.tez.dag.api.TezException) Operator(org.apache.hadoop.hive.ql.exec.Operator) TezEdgeProperty(org.apache.hadoop.hive.ql.plan.TezEdgeProperty) JobConf(org.apache.hadoop.mapred.JobConf) TezCounters(org.apache.tez.common.counters.TezCounters) TezClient(org.apache.tez.client.TezClient) WmContext(org.apache.hadoop.hive.ql.wm.WmContext) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Collections(java.util.Collections) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) TezRuntimeConfiguration(org.apache.tez.runtime.library.api.TezRuntimeConfiguration) Vertex(org.apache.tez.dag.api.Vertex) VertexStatus(org.apache.tez.dag.api.client.VertexStatus) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ArrayList(java.util.ArrayList) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) JobConf(org.apache.hadoop.mapred.JobConf) Map(java.util.Map) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap)

Aggregations

TezWork (org.apache.hadoop.hive.ql.plan.TezWork)31 BaseWork (org.apache.hadoop.hive.ql.plan.BaseWork)16 MapWork (org.apache.hadoop.hive.ql.plan.MapWork)13 TezTask (org.apache.hadoop.hive.ql.exec.tez.TezTask)11 Task (org.apache.hadoop.hive.ql.exec.Task)9 ArrayList (java.util.ArrayList)8 SparkWork (org.apache.hadoop.hive.ql.plan.SparkWork)8 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)7 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)7 MapredWork (org.apache.hadoop.hive.ql.plan.MapredWork)7 List (java.util.List)6 HiveConf (org.apache.hadoop.hive.conf.HiveConf)6 ConditionalTask (org.apache.hadoop.hive.ql.exec.ConditionalTask)6 Operator (org.apache.hadoop.hive.ql.exec.Operator)6 ReduceWork (org.apache.hadoop.hive.ql.plan.ReduceWork)6 Serializable (java.io.Serializable)5 LinkedList (java.util.LinkedList)5 Path (org.apache.hadoop.fs.Path)5 MoveWork (org.apache.hadoop.hive.ql.plan.MoveWork)5 IOException (java.io.IOException)4