Search in sources :

Example 1 with TableType

use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.TableType in project flink by apache.

the class HiveParserCalcitePlanner method genTableLogicalPlan.

private RelNode genTableLogicalPlan(String tableAlias, HiveParserQB qb) throws SemanticException {
    HiveParserRowResolver rowResolver = new HiveParserRowResolver();
    try {
        // 2. if returnpath is on and hivetestmode is on bail
        if (qb.getParseInfo().needTableSample(tableAlias) || semanticAnalyzer.getNameToSplitSampleMap().containsKey(tableAlias) || Boolean.parseBoolean(semanticAnalyzer.getConf().get("hive.cbo.returnpath.hiveop", "false")) && semanticAnalyzer.getConf().getBoolVar(HiveConf.ConfVars.HIVETESTMODE)) {
            String msg = String.format("Table Sample specified for %s." + " Currently we don't support Table Sample clauses in CBO," + " turn off cbo for queries on tableSamples.", tableAlias);
            LOG.debug(msg);
            throw new SemanticException(msg);
        }
        // 2. Get Table Metadata
        Table table = qb.getMetaData().getSrcForAlias(tableAlias);
        if (table.isTemporary()) {
            // Hive creates a temp table for VALUES, we need to convert it to LogicalValues
            RelNode values = genValues(tableAlias, table, rowResolver, cluster, getQB().getValuesTableToData().get(tableAlias));
            relToRowResolver.put(values, rowResolver);
            relToHiveColNameCalcitePosMap.put(values, buildHiveToCalciteColumnMap(rowResolver));
            return values;
        } else {
            // 3. Get Table Logical Schema (Row Type)
            // NOTE: Table logical schema = Non Partition Cols + Partition Cols + Virtual Cols
            // 3.1 Add Column info for non partition cols (Object Inspector fields)
            StructObjectInspector rowObjectInspector = (StructObjectInspector) table.getDeserializer().getObjectInspector();
            List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
            ColumnInfo colInfo;
            String colName;
            for (StructField field : fields) {
                colName = field.getFieldName();
                colInfo = new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(field.getFieldObjectInspector()), tableAlias, false);
                colInfo.setSkewedCol(HiveParserUtils.isSkewedCol(tableAlias, qb, colName));
                rowResolver.put(tableAlias, colName, colInfo);
            }
            // 3.2 Add column info corresponding to partition columns
            for (FieldSchema partCol : table.getPartCols()) {
                colName = partCol.getName();
                colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(partCol.getType()), tableAlias, true);
                rowResolver.put(tableAlias, colName, colInfo);
            }
            final TableType tableType = obtainTableType(table);
            Preconditions.checkArgument(tableType == TableType.NATIVE, "Only native tables are supported");
            // Build Hive Table Scan Rel
            RelNode tableRel = catalogReader.getTable(Arrays.asList(catalogManager.getCurrentCatalog(), table.getDbName(), table.getTableName())).toRel(ViewExpanders.toRelContext(flinkPlanner.createToRelContext(), cluster));
            // 6. Add Schema(RR) to RelNode-Schema map
            Map<String, Integer> hiveToCalciteColMap = buildHiveToCalciteColumnMap(rowResolver);
            relToRowResolver.put(tableRel, rowResolver);
            relToHiveColNameCalcitePosMap.put(tableRel, hiveToCalciteColMap);
            return tableRel;
        }
    } catch (Exception e) {
        if (e instanceof SemanticException) {
            throw (SemanticException) e;
        } else {
            throw (new RuntimeException(e));
        }
    }
}
Also used : SqlStdOperatorTable(org.apache.calcite.sql.fun.SqlStdOperatorTable) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveParserBaseSemanticAnalyzer.obtainTableType(org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.obtainTableType) TableType(org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.TableType) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) StructField(org.apache.hadoop.hive.serde2.objectinspector.StructField) RelNode(org.apache.calcite.rel.RelNode) HiveParserRowResolver(org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector)

Aggregations

RelNode (org.apache.calcite.rel.RelNode)1 SqlStdOperatorTable (org.apache.calcite.sql.fun.SqlStdOperatorTable)1 TableType (org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.TableType)1 HiveParserBaseSemanticAnalyzer.obtainTableType (org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.obtainTableType)1 HiveParserRowResolver (org.apache.flink.table.planner.delegation.hive.copy.HiveParserRowResolver)1 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)1 ColumnInfo (org.apache.hadoop.hive.ql.exec.ColumnInfo)1 Table (org.apache.hadoop.hive.ql.metadata.Table)1 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)1 StructField (org.apache.hadoop.hive.serde2.objectinspector.StructField)1 StructObjectInspector (org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector)1