Search in sources :

Example 36 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class PartitionPruner method prune.

/**
 * Get the partition list for the table that satisfies the partition pruner
 * condition.
 *
 * @param tab
 *          the table object for the alias
 * @param prunerExpr
 *          the pruner expression for the alias
 * @param conf
 *          for checking whether "strict" mode is on.
 * @param alias
 *          for generating error message only.
 * @param prunedPartitionsMap
 *          cached result for the table
 * @return the partition list for the table that satisfies the partition
 *         pruner condition.
 * @throws SemanticException
 */
public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, HiveConf conf, String alias, Map<String, PrunedPartitionList> prunedPartitionsMap) throws SemanticException {
    if (LOG.isTraceEnabled()) {
        LOG.trace("Started pruning partition");
        LOG.trace("dbname = " + tab.getDbName());
        LOG.trace("tabname = " + tab.getTableName());
        LOG.trace("prune Expression = " + (prunerExpr == null ? "" : prunerExpr));
    }
    String key = tab.getFullyQualifiedName() + ";";
    if (!tab.isPartitioned()) {
        // If the table is not partitioned, return empty list.
        return getAllPartsFromCacheOrServer(tab, key, false, prunedPartitionsMap);
    }
    if (!hasColumnExpr(prunerExpr)) {
        // If the "strict" mode is on, we have to provide partition pruner for each table.
        String error = StrictChecks.checkNoPartitionFilter(conf);
        if (error != null) {
            throw new SemanticException(error + " No partition predicate for Alias \"" + alias + "\" Table \"" + tab.getTableName() + "\"");
        }
    }
    if (prunerExpr == null) {
        // In non-strict mode and there is no predicates at all - get everything.
        return getAllPartsFromCacheOrServer(tab, key, false, prunedPartitionsMap);
    }
    Set<String> partColsUsedInFilter = new LinkedHashSet<String>();
    // Replace virtual columns with nulls. See javadoc for details.
    prunerExpr = removeNonPartCols(prunerExpr, extractPartColNames(tab), partColsUsedInFilter);
    // Remove all parts that are not partition columns. See javadoc for details.
    ExprNodeDesc compactExpr = compactExpr(prunerExpr.clone());
    String oldFilter = prunerExpr.getExprString();
    if (compactExpr == null || isBooleanExpr(compactExpr)) {
        if (isFalseExpr(compactExpr)) {
            return new PrunedPartitionList(tab, key + compactExpr.getExprString(), new LinkedHashSet<Partition>(0), new ArrayList<String>(0), false);
        }
        // For null and true values, return every partition
        return getAllPartsFromCacheOrServer(tab, key, true, prunedPartitionsMap);
    }
    String compactExprString = compactExpr.getExprString();
    if (LOG.isDebugEnabled()) {
        LOG.debug("Filter w/ compacting: " + compactExprString + "; filter w/o compacting: " + oldFilter);
    }
    key = key + compactExprString;
    PrunedPartitionList ppList = prunedPartitionsMap.get(key);
    if (ppList != null) {
        return ppList;
    }
    ppList = getPartitionsFromServer(tab, key, (ExprNodeGenericFuncDesc) compactExpr, conf, alias, partColsUsedInFilter, oldFilter.equals(compactExpr.getExprString()));
    prunedPartitionsMap.put(key, ppList);
    return ppList;
}
Also used : LinkedHashSet(java.util.LinkedHashSet) Partition(org.apache.hadoop.hive.ql.metadata.Partition) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) ExprNodeGenericFuncDesc(org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 37 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class PartitionPruner method getAllPartsFromCacheOrServer.

private static PrunedPartitionList getAllPartsFromCacheOrServer(Table tab, String key, boolean unknownPartitions, Map<String, PrunedPartitionList> partsCache) throws SemanticException {
    PrunedPartitionList ppList = partsCache == null ? null : partsCache.get(key);
    if (ppList != null) {
        return ppList;
    }
    Set<Partition> parts;
    try {
        parts = getAllPartitions(tab);
    } catch (HiveException e) {
        throw new SemanticException(e);
    }
    ppList = new PrunedPartitionList(tab, key, parts, null, unknownPartitions);
    if (partsCache != null) {
        partsCache.put(key, ppList);
    }
    return ppList;
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Example 38 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class SparkReduceSinkMapJoinProc method process.

/* (non-Javadoc)
   * This processor addresses the RS-MJ case that occurs in spark on the small/hash
   * table side of things. The work that RS will be a part of must be connected
   * to the MJ work via be a broadcast edge.
   * We should not walk down the tree when we encounter this pattern because:
   * the type of work (map work or reduce work) needs to be determined
   * on the basis of the big table side because it may be a mapwork (no need for shuffle)
   * or reduce work.
   */
@SuppressWarnings("unchecked")
@Override
public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procContext, Object... nodeOutputs) throws SemanticException {
    GenSparkProcContext context = (GenSparkProcContext) procContext;
    if (!nd.getClass().equals(MapJoinOperator.class)) {
        return null;
    }
    MapJoinOperator mapJoinOp = (MapJoinOperator) nd;
    if (stack.size() < 2 || !(stack.get(stack.size() - 2) instanceof ReduceSinkOperator)) {
        context.currentMapJoinOperators.add(mapJoinOp);
        return null;
    }
    context.preceedingWork = null;
    context.currentRootOperator = null;
    ReduceSinkOperator parentRS = (ReduceSinkOperator) stack.get(stack.size() - 2);
    // remove the tag for in-memory side of mapjoin
    parentRS.getConf().setSkipTag(true);
    parentRS.setSkipTag(true);
    // remember the original parent list before we start modifying it.
    if (!context.mapJoinParentMap.containsKey(mapJoinOp)) {
        List<Operator<?>> parents = new ArrayList<Operator<?>>(mapJoinOp.getParentOperators());
        context.mapJoinParentMap.put(mapJoinOp, parents);
    }
    List<BaseWork> mapJoinWork;
    /*
     *  If there was a pre-existing work generated for the big-table mapjoin side,
     *  we need to hook the work generated for the RS (associated with the RS-MJ pattern)
     *  with the pre-existing work.
     *
     *  Otherwise, we need to associate that the mapjoin op
     *  to be linked to the RS work (associated with the RS-MJ pattern).
     *
     */
    mapJoinWork = context.mapJoinWorkMap.get(mapJoinOp);
    int workMapSize = context.childToWorkMap.get(parentRS).size();
    Preconditions.checkArgument(workMapSize == 1, "AssertionError: expected context.childToWorkMap.get(parentRS).size() to be 1, but was " + workMapSize);
    BaseWork parentWork = context.childToWorkMap.get(parentRS).get(0);
    // set the link between mapjoin and parent vertex
    int pos = context.mapJoinParentMap.get(mapJoinOp).indexOf(parentRS);
    if (pos == -1) {
        throw new SemanticException("Cannot find position of parent in mapjoin");
    }
    LOG.debug("Mapjoin " + mapJoinOp + ", pos: " + pos + " --> " + parentWork.getName());
    mapJoinOp.getConf().getParentToInput().put(pos, parentWork.getName());
    SparkEdgeProperty edgeProp = new SparkEdgeProperty(SparkEdgeProperty.SHUFFLE_NONE);
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            // link the work with the work associated with the reduce sink that triggered this rule
            SparkWork sparkWork = context.currentTask.getWork();
            LOG.debug("connecting " + parentWork.getName() + " with " + myWork.getName());
            sparkWork.connect(parentWork, myWork, edgeProp);
        }
    }
    // remember in case we need to connect additional work later
    Map<BaseWork, SparkEdgeProperty> linkWorkMap = null;
    if (context.linkOpWithWorkMap.containsKey(mapJoinOp)) {
        linkWorkMap = context.linkOpWithWorkMap.get(mapJoinOp);
    } else {
        linkWorkMap = new HashMap<BaseWork, SparkEdgeProperty>();
    }
    linkWorkMap.put(parentWork, edgeProp);
    context.linkOpWithWorkMap.put(mapJoinOp, linkWorkMap);
    List<ReduceSinkOperator> reduceSinks = context.linkWorkWithReduceSinkMap.get(parentWork);
    if (reduceSinks == null) {
        reduceSinks = new ArrayList<ReduceSinkOperator>();
    }
    reduceSinks.add(parentRS);
    context.linkWorkWithReduceSinkMap.put(parentWork, reduceSinks);
    // create the dummy operators
    List<Operator<?>> dummyOperators = new ArrayList<Operator<?>>();
    // create an new operator: HashTableDummyOperator, which share the table desc
    HashTableDummyDesc desc = new HashTableDummyDesc();
    HashTableDummyOperator dummyOp = (HashTableDummyOperator) OperatorFactory.get(mapJoinOp.getCompilationOpContext(), desc);
    TableDesc tbl;
    // need to create the correct table descriptor for key/value
    RowSchema rowSchema = parentRS.getParentOperators().get(0).getSchema();
    tbl = PlanUtils.getReduceValueTableDesc(PlanUtils.getFieldSchemasFromRowSchema(rowSchema, ""));
    dummyOp.getConf().setTbl(tbl);
    Map<Byte, List<ExprNodeDesc>> keyExprMap = mapJoinOp.getConf().getKeys();
    List<ExprNodeDesc> keyCols = keyExprMap.get(Byte.valueOf((byte) 0));
    StringBuilder keyOrder = new StringBuilder();
    StringBuilder keyNullOrder = new StringBuilder();
    for (int i = 0; i < keyCols.size(); i++) {
        keyOrder.append("+");
        keyNullOrder.append("a");
    }
    TableDesc keyTableDesc = PlanUtils.getReduceKeyTableDesc(PlanUtils.getFieldSchemasFromColumnList(keyCols, "mapjoinkey"), keyOrder.toString(), keyNullOrder.toString());
    mapJoinOp.getConf().setKeyTableDesc(keyTableDesc);
    // let the dummy op be the parent of mapjoin op
    mapJoinOp.replaceParent(parentRS, dummyOp);
    List<Operator<? extends OperatorDesc>> dummyChildren = new ArrayList<Operator<? extends OperatorDesc>>();
    dummyChildren.add(mapJoinOp);
    dummyOp.setChildOperators(dummyChildren);
    dummyOperators.add(dummyOp);
    // cut the operator tree so as to not retain connections from the parent RS downstream
    List<Operator<? extends OperatorDesc>> childOperators = parentRS.getChildOperators();
    int childIndex = childOperators.indexOf(mapJoinOp);
    childOperators.remove(childIndex);
    // at task startup
    if (mapJoinWork != null) {
        for (BaseWork myWork : mapJoinWork) {
            myWork.addDummyOp(dummyOp);
        }
    }
    if (context.linkChildOpWithDummyOp.containsKey(mapJoinOp)) {
        for (Operator<?> op : context.linkChildOpWithDummyOp.get(mapJoinOp)) {
            dummyOperators.add(op);
        }
    }
    context.linkChildOpWithDummyOp.put(mapJoinOp, dummyOperators);
    // replace ReduceSinkOp with HashTableSinkOp for the RSops which are parents of MJop
    MapJoinDesc mjDesc = mapJoinOp.getConf();
    HiveConf conf = context.conf;
    // Unlike in MR, we may call this method multiple times, for each
    // small table HTS. But, since it's idempotent, it should be OK.
    mjDesc.resetOrder();
    float hashtableMemoryUsage;
    if (hasGroupBy(mapJoinOp, context)) {
        hashtableMemoryUsage = conf.getFloatVar(HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE);
    } else {
        hashtableMemoryUsage = conf.getFloatVar(HiveConf.ConfVars.HIVEHASHTABLEMAXMEMORYUSAGE);
    }
    mjDesc.setHashTableMemoryUsage(hashtableMemoryUsage);
    SparkHashTableSinkDesc hashTableSinkDesc = new SparkHashTableSinkDesc(mjDesc);
    SparkHashTableSinkOperator hashTableSinkOp = (SparkHashTableSinkOperator) OperatorFactory.get(mapJoinOp.getCompilationOpContext(), hashTableSinkDesc);
    byte tag = (byte) pos;
    int[] valueIndex = mjDesc.getValueIndex(tag);
    if (valueIndex != null) {
        List<ExprNodeDesc> newValues = new ArrayList<ExprNodeDesc>();
        List<ExprNodeDesc> values = hashTableSinkDesc.getExprs().get(tag);
        for (int index = 0; index < values.size(); index++) {
            if (valueIndex[index] < 0) {
                newValues.add(values.get(index));
            }
        }
        hashTableSinkDesc.getExprs().put(tag, newValues);
    }
    // get all parents of reduce sink
    List<Operator<? extends OperatorDesc>> rsParentOps = parentRS.getParentOperators();
    for (Operator<? extends OperatorDesc> parent : rsParentOps) {
        parent.replaceChild(parentRS, hashTableSinkOp);
    }
    hashTableSinkOp.setParentOperators(rsParentOps);
    hashTableSinkOp.getConf().setTag(tag);
    return true;
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) GroupByOperator(org.apache.hadoop.hive.ql.exec.GroupByOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) SparkHashTableSinkOperator(org.apache.hadoop.hive.ql.exec.SparkHashTableSinkOperator) ArrayList(java.util.ArrayList) ArrayList(java.util.ArrayList) List(java.util.List) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) BaseWork(org.apache.hadoop.hive.ql.plan.BaseWork) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) MapJoinOperator(org.apache.hadoop.hive.ql.exec.MapJoinOperator) SparkHashTableSinkDesc(org.apache.hadoop.hive.ql.plan.SparkHashTableSinkDesc) HashTableDummyDesc(org.apache.hadoop.hive.ql.plan.HashTableDummyDesc) RowSchema(org.apache.hadoop.hive.ql.exec.RowSchema) MapJoinDesc(org.apache.hadoop.hive.ql.plan.MapJoinDesc) SparkWork(org.apache.hadoop.hive.ql.plan.SparkWork) HashTableDummyOperator(org.apache.hadoop.hive.ql.exec.HashTableDummyOperator) SparkEdgeProperty(org.apache.hadoop.hive.ql.plan.SparkEdgeProperty) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) GenSparkProcContext(org.apache.hadoop.hive.ql.parse.spark.GenSparkProcContext) TableDesc(org.apache.hadoop.hive.ql.plan.TableDesc) OperatorDesc(org.apache.hadoop.hive.ql.plan.OperatorDesc) SparkHashTableSinkOperator(org.apache.hadoop.hive.ql.exec.SparkHashTableSinkOperator)

Example 39 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class HiveOpConverter method translateJoin.

private OpAttr translateJoin(RelNode joinRel) throws SemanticException {
    // 0. Additional data structures needed for the join optimization
    // through Hive
    String[] baseSrc = new String[joinRel.getInputs().size()];
    String tabAlias = getHiveDerivedTableAlias();
    // 1. Convert inputs
    OpAttr[] inputs = new OpAttr[joinRel.getInputs().size()];
    List<Operator<?>> children = new ArrayList<Operator<?>>(joinRel.getInputs().size());
    for (int i = 0; i < inputs.length; i++) {
        inputs[i] = dispatch(joinRel.getInput(i));
        children.add(inputs[i].inputs.get(0));
        baseSrc[i] = inputs[i].tabAlias;
    }
    // 2. Generate tags
    for (int tag = 0; tag < children.size(); tag++) {
        ReduceSinkOperator reduceSinkOp = (ReduceSinkOperator) children.get(tag);
        reduceSinkOp.getConf().setTag(tag);
    }
    // 3. Virtual columns
    Set<Integer> newVcolsInCalcite = new HashSet<Integer>();
    newVcolsInCalcite.addAll(inputs[0].vcolsInCalcite);
    if (joinRel instanceof HiveMultiJoin || !(joinRel instanceof SemiJoin)) {
        int shift = inputs[0].inputs.get(0).getSchema().getSignature().size();
        for (int i = 1; i < inputs.length; i++) {
            newVcolsInCalcite.addAll(HiveCalciteUtil.shiftVColsSet(inputs[i].vcolsInCalcite, shift));
            shift += inputs[i].inputs.get(0).getSchema().getSignature().size();
        }
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Translating operator rel#" + joinRel.getId() + ":" + joinRel.getRelTypeName() + " with row type: [" + joinRel.getRowType() + "]");
    }
    // 4. Extract join key expressions from HiveSortExchange
    ExprNodeDesc[][] joinExpressions = new ExprNodeDesc[inputs.length][];
    for (int i = 0; i < inputs.length; i++) {
        joinExpressions[i] = ((HiveSortExchange) joinRel.getInput(i)).getJoinExpressions();
    }
    // 5. Extract rest of join predicate info. We infer the rest of join condition
    // that will be added to the filters (join conditions that are not part of
    // the join key)
    List<RexNode> joinFilters;
    if (joinRel instanceof HiveJoin) {
        joinFilters = ImmutableList.of(((HiveJoin) joinRel).getJoinFilter());
    } else if (joinRel instanceof HiveMultiJoin) {
        joinFilters = ((HiveMultiJoin) joinRel).getJoinFilters();
    } else if (joinRel instanceof HiveSemiJoin) {
        joinFilters = ImmutableList.of(((HiveSemiJoin) joinRel).getJoinFilter());
    } else {
        throw new SemanticException("Can't handle join type: " + joinRel.getClass().getName());
    }
    List<List<ExprNodeDesc>> filterExpressions = Lists.newArrayList();
    for (int i = 0; i < joinFilters.size(); i++) {
        List<ExprNodeDesc> filterExpressionsForInput = new ArrayList<ExprNodeDesc>();
        if (joinFilters.get(i) != null) {
            for (RexNode conj : RelOptUtil.conjunctions(joinFilters.get(i))) {
                ExprNodeDesc expr = convertToExprNode(conj, joinRel, null, newVcolsInCalcite);
                filterExpressionsForInput.add(expr);
            }
        }
        filterExpressions.add(filterExpressionsForInput);
    }
    // 6. Generate Join operator
    JoinOperator joinOp = genJoin(joinRel, joinExpressions, filterExpressions, children, baseSrc, tabAlias);
    // 7. Return result
    return new OpAttr(tabAlias, newVcolsInCalcite, joinOp);
}
Also used : ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) SelectOperator(org.apache.hadoop.hive.ql.exec.SelectOperator) FilterOperator(org.apache.hadoop.hive.ql.exec.FilterOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) TableScanOperator(org.apache.hadoop.hive.ql.exec.TableScanOperator) Operator(org.apache.hadoop.hive.ql.exec.Operator) LimitOperator(org.apache.hadoop.hive.ql.exec.LimitOperator) JoinOperator(org.apache.hadoop.hive.ql.exec.JoinOperator) HiveMultiJoin(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveMultiJoin) ArrayList(java.util.ArrayList) HiveJoin(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin) SemiJoin(org.apache.calcite.rel.core.SemiJoin) HiveSemiJoin(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) ExprNodeDesc(org.apache.hadoop.hive.ql.plan.ExprNodeDesc) HashSet(java.util.HashSet) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) ReduceSinkOperator(org.apache.hadoop.hive.ql.exec.ReduceSinkOperator) HiveSemiJoin(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSemiJoin) RexNode(org.apache.calcite.rex.RexNode)

Example 40 with SemanticException

use of org.apache.hadoop.hive.ql.parse.SemanticException in project hive by apache.

the class DumpMetaData method loadDumpFromFile.

private void loadDumpFromFile() throws SemanticException {
    BufferedReader br = null;
    try {
        // read from dumpfile and instantiate self
        FileSystem fs = dumpFile.getFileSystem(hiveConf);
        br = new BufferedReader(new InputStreamReader(fs.open(dumpFile)));
        String line = null;
        if ((line = br.readLine()) != null) {
            String[] lineContents = line.split("\t", 5);
            setDump(DumpType.valueOf(lineContents[0]), Long.valueOf(lineContents[1]), Long.valueOf(lineContents[2]), new Path(lineContents[3]));
            setPayload(lineContents[4].equals(Utilities.nullStringOutput) ? null : lineContents[4]);
            ReplChangeManager.setCmRoot(cmRoot);
        } else {
            throw new IOException("Unable to read valid values from dumpFile:" + dumpFile.toUri().toString());
        }
    } catch (IOException ioe) {
        throw new SemanticException(ioe);
    } finally {
        if (br != null) {
            try {
                br.close();
            } catch (IOException e) {
                throw new SemanticException(e);
            }
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) InputStreamReader(java.io.InputStreamReader) FileSystem(org.apache.hadoop.fs.FileSystem) BufferedReader(java.io.BufferedReader) IOException(java.io.IOException) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException)

Aggregations

SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)131 ArrayList (java.util.ArrayList)64 ExprNodeDesc (org.apache.hadoop.hive.ql.plan.ExprNodeDesc)36 HashMap (java.util.HashMap)30 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)27 Path (org.apache.hadoop.fs.Path)22 IOException (java.io.IOException)20 LinkedHashMap (java.util.LinkedHashMap)19 List (java.util.List)18 TableScanOperator (org.apache.hadoop.hive.ql.exec.TableScanOperator)18 Node (org.apache.hadoop.hive.ql.lib.Node)17 ExprNodeGenericFuncDesc (org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc)17 ReduceSinkOperator (org.apache.hadoop.hive.ql.exec.ReduceSinkOperator)16 DefaultRuleDispatcher (org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher)16 Dispatcher (org.apache.hadoop.hive.ql.lib.Dispatcher)16 GraphWalker (org.apache.hadoop.hive.ql.lib.GraphWalker)16 ExprNodeColumnDesc (org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc)16 Operator (org.apache.hadoop.hive.ql.exec.Operator)15 DefaultGraphWalker (org.apache.hadoop.hive.ql.lib.DefaultGraphWalker)15 Table (org.apache.hadoop.hive.ql.metadata.Table)14