Search in sources :

Example 1 with QBMetaData

use of org.apache.hadoop.hive.ql.parse.QBMetaData in project flink by apache.

the class HiveParserDMLHelper method createInsertOperation.

public Operation createInsertOperation(HiveParserCalcitePlanner analyzer, RelNode queryRelNode) throws SemanticException {
    HiveParserQB topQB = analyzer.getQB();
    QBMetaData qbMetaData = topQB.getMetaData();
    // decide the dest table
    Map<String, Table> nameToDestTable = qbMetaData.getNameToDestTable();
    Map<String, Partition> nameToDestPart = qbMetaData.getNameToDestPartition();
    // for now we only support inserting to a single table
    Preconditions.checkState(nameToDestTable.size() <= 1 && nameToDestPart.size() <= 1, "Only support inserting to 1 table");
    Table destTable;
    String insClauseName;
    if (!nameToDestTable.isEmpty()) {
        insClauseName = nameToDestTable.keySet().iterator().next();
        destTable = nameToDestTable.values().iterator().next();
    } else if (!nameToDestPart.isEmpty()) {
        insClauseName = nameToDestPart.keySet().iterator().next();
        destTable = nameToDestPart.values().iterator().next().getTable();
    } else {
        // happens for INSERT DIRECTORY
        throw new SemanticException("INSERT DIRECTORY is not supported");
    }
    // decide static partition specs
    Map<String, String> staticPartSpec = new LinkedHashMap<>();
    if (destTable.isPartitioned()) {
        List<String> partCols = HiveCatalog.getFieldNames(destTable.getTTable().getPartitionKeys());
        if (!nameToDestPart.isEmpty()) {
            // static partition
            Partition destPart = nameToDestPart.values().iterator().next();
            Preconditions.checkState(partCols.size() == destPart.getValues().size(), "Part cols and static spec doesn't match");
            for (int i = 0; i < partCols.size(); i++) {
                staticPartSpec.put(partCols.get(i), destPart.getValues().get(i));
            }
        } else {
            // dynamic partition
            Map<String, String> spec = qbMetaData.getPartSpecForAlias(insClauseName);
            if (spec != null) {
                for (String partCol : partCols) {
                    String val = spec.get(partCol);
                    if (val != null) {
                        staticPartSpec.put(partCol, val);
                    }
                }
            }
        }
    }
    // decide whether it's overwrite
    boolean overwrite = topQB.getParseInfo().getInsertOverwriteTables().keySet().stream().map(String::toLowerCase).collect(Collectors.toSet()).contains(destTable.getDbName() + "." + destTable.getTableName());
    Tuple4<ObjectIdentifier, QueryOperation, Map<String, String>, Boolean> insertOperationInfo = createInsertOperationInfo(queryRelNode, destTable, staticPartSpec, analyzer.getDestSchemaForClause(insClauseName), overwrite);
    return new SinkModifyOperation(catalogManager.getTableOrError(insertOperationInfo.f0), insertOperationInfo.f1, insertOperationInfo.f2, insertOperationInfo.f3, Collections.emptyMap());
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveParserQB(org.apache.flink.table.planner.delegation.hive.copy.HiveParserQB) SinkModifyOperation(org.apache.flink.table.operations.SinkModifyOperation) LinkedHashMap(java.util.LinkedHashMap) QBMetaData(org.apache.hadoop.hive.ql.parse.QBMetaData) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier) PlannerQueryOperation(org.apache.flink.table.planner.operations.PlannerQueryOperation) QueryOperation(org.apache.flink.table.operations.QueryOperation)

Aggregations

LinkedHashMap (java.util.LinkedHashMap)1 Map (java.util.Map)1 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)1 QueryOperation (org.apache.flink.table.operations.QueryOperation)1 SinkModifyOperation (org.apache.flink.table.operations.SinkModifyOperation)1 HiveParserQB (org.apache.flink.table.planner.delegation.hive.copy.HiveParserQB)1 PlannerQueryOperation (org.apache.flink.table.planner.operations.PlannerQueryOperation)1 Partition (org.apache.hadoop.hive.ql.metadata.Partition)1 Table (org.apache.hadoop.hive.ql.metadata.Table)1 QBMetaData (org.apache.hadoop.hive.ql.parse.QBMetaData)1 SemanticException (org.apache.hadoop.hive.ql.parse.SemanticException)1