use of org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.TableType in project flink by apache.
the class HiveParserCalcitePlanner method genTableLogicalPlan.
private RelNode genTableLogicalPlan(String tableAlias, HiveParserQB qb) throws SemanticException {
HiveParserRowResolver rowResolver = new HiveParserRowResolver();
try {
// 2. if returnpath is on and hivetestmode is on bail
if (qb.getParseInfo().needTableSample(tableAlias) || semanticAnalyzer.getNameToSplitSampleMap().containsKey(tableAlias) || Boolean.parseBoolean(semanticAnalyzer.getConf().get("hive.cbo.returnpath.hiveop", "false")) && semanticAnalyzer.getConf().getBoolVar(HiveConf.ConfVars.HIVETESTMODE)) {
String msg = String.format("Table Sample specified for %s." + " Currently we don't support Table Sample clauses in CBO," + " turn off cbo for queries on tableSamples.", tableAlias);
LOG.debug(msg);
throw new SemanticException(msg);
}
// 2. Get Table Metadata
Table table = qb.getMetaData().getSrcForAlias(tableAlias);
if (table.isTemporary()) {
// Hive creates a temp table for VALUES, we need to convert it to LogicalValues
RelNode values = genValues(tableAlias, table, rowResolver, cluster, getQB().getValuesTableToData().get(tableAlias));
relToRowResolver.put(values, rowResolver);
relToHiveColNameCalcitePosMap.put(values, buildHiveToCalciteColumnMap(rowResolver));
return values;
} else {
// 3. Get Table Logical Schema (Row Type)
// NOTE: Table logical schema = Non Partition Cols + Partition Cols + Virtual Cols
// 3.1 Add Column info for non partition cols (Object Inspector fields)
StructObjectInspector rowObjectInspector = (StructObjectInspector) table.getDeserializer().getObjectInspector();
List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
ColumnInfo colInfo;
String colName;
for (StructField field : fields) {
colName = field.getFieldName();
colInfo = new ColumnInfo(field.getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(field.getFieldObjectInspector()), tableAlias, false);
colInfo.setSkewedCol(HiveParserUtils.isSkewedCol(tableAlias, qb, colName));
rowResolver.put(tableAlias, colName, colInfo);
}
// 3.2 Add column info corresponding to partition columns
for (FieldSchema partCol : table.getPartCols()) {
colName = partCol.getName();
colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(partCol.getType()), tableAlias, true);
rowResolver.put(tableAlias, colName, colInfo);
}
final TableType tableType = obtainTableType(table);
Preconditions.checkArgument(tableType == TableType.NATIVE, "Only native tables are supported");
// Build Hive Table Scan Rel
RelNode tableRel = catalogReader.getTable(Arrays.asList(catalogManager.getCurrentCatalog(), table.getDbName(), table.getTableName())).toRel(ViewExpanders.toRelContext(flinkPlanner.createToRelContext(), cluster));
// 6. Add Schema(RR) to RelNode-Schema map
Map<String, Integer> hiveToCalciteColMap = buildHiveToCalciteColumnMap(rowResolver);
relToRowResolver.put(tableRel, rowResolver);
relToHiveColNameCalcitePosMap.put(tableRel, hiveToCalciteColMap);
return tableRel;
}
} catch (Exception e) {
if (e instanceof SemanticException) {
throw (SemanticException) e;
} else {
throw (new RuntimeException(e));
}
}
}
Aggregations