Search in sources :

Example 96 with ObjectIdentifier

use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.

the class SqlToOperationConverter method convertDescribeTable.

/**
 * Convert DESCRIBE [EXTENDED] [[catalogName.] dataBasesName].sqlIdentifier.
 */
private Operation convertDescribeTable(SqlRichDescribeTable sqlRichDescribeTable) {
    UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlRichDescribeTable.fullTableName());
    ObjectIdentifier identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
    return new DescribeTableOperation(identifier, sqlRichDescribeTable.isExtended());
}
Also used : UnresolvedIdentifier(org.apache.flink.table.catalog.UnresolvedIdentifier) DescribeTableOperation(org.apache.flink.table.operations.DescribeTableOperation) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Example 97 with ObjectIdentifier

use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.

the class SqlToOperationConverter method convertShowPartitions.

/**
 * Convert SHOW PARTITIONS statement.
 */
private Operation convertShowPartitions(SqlShowPartitions sqlShowPartitions) {
    UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlShowPartitions.fullTableName());
    ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
    LinkedHashMap<String, String> partitionKVs = sqlShowPartitions.getPartitionKVs();
    if (partitionKVs != null) {
        CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(partitionKVs);
        return new ShowPartitionsOperation(tableIdentifier, partitionSpec);
    }
    return new ShowPartitionsOperation(tableIdentifier, null);
}
Also used : UnresolvedIdentifier(org.apache.flink.table.catalog.UnresolvedIdentifier) ShowPartitionsOperation(org.apache.flink.table.operations.ShowPartitionsOperation) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Example 98 with ObjectIdentifier

use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.

the class SqlCreateTableConverter method lookupLikeSourceTable.

private CatalogTable lookupLikeSourceTable(SqlTableLike sqlTableLike) {
    UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlTableLike.getSourceTable().names);
    ObjectIdentifier identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
    ContextResolvedTable lookupResult = catalogManager.getTable(identifier).orElseThrow(() -> new ValidationException(String.format("Source table '%s' of the LIKE clause not found in the catalog, at %s", identifier, sqlTableLike.getSourceTable().getParserPosition())));
    if (!(lookupResult.getTable() instanceof CatalogTable)) {
        throw new ValidationException(String.format("Source table '%s' of the LIKE clause can not be a VIEW, at %s", identifier, sqlTableLike.getSourceTable().getParserPosition()));
    }
    return lookupResult.getTable();
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) UnresolvedIdentifier(org.apache.flink.table.catalog.UnresolvedIdentifier) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable) CatalogTable(org.apache.flink.table.catalog.CatalogTable) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Example 99 with ObjectIdentifier

use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by apache.

the class HiveParserDMLHelper method createInsertOperation.

public Operation createInsertOperation(HiveParserCalcitePlanner analyzer, RelNode queryRelNode) throws SemanticException {
    HiveParserQB topQB = analyzer.getQB();
    QBMetaData qbMetaData = topQB.getMetaData();
    // decide the dest table
    Map<String, Table> nameToDestTable = qbMetaData.getNameToDestTable();
    Map<String, Partition> nameToDestPart = qbMetaData.getNameToDestPartition();
    // for now we only support inserting to a single table
    Preconditions.checkState(nameToDestTable.size() <= 1 && nameToDestPart.size() <= 1, "Only support inserting to 1 table");
    Table destTable;
    String insClauseName;
    if (!nameToDestTable.isEmpty()) {
        insClauseName = nameToDestTable.keySet().iterator().next();
        destTable = nameToDestTable.values().iterator().next();
    } else if (!nameToDestPart.isEmpty()) {
        insClauseName = nameToDestPart.keySet().iterator().next();
        destTable = nameToDestPart.values().iterator().next().getTable();
    } else {
        // happens for INSERT DIRECTORY
        throw new SemanticException("INSERT DIRECTORY is not supported");
    }
    // decide static partition specs
    Map<String, String> staticPartSpec = new LinkedHashMap<>();
    if (destTable.isPartitioned()) {
        List<String> partCols = HiveCatalog.getFieldNames(destTable.getTTable().getPartitionKeys());
        if (!nameToDestPart.isEmpty()) {
            // static partition
            Partition destPart = nameToDestPart.values().iterator().next();
            Preconditions.checkState(partCols.size() == destPart.getValues().size(), "Part cols and static spec doesn't match");
            for (int i = 0; i < partCols.size(); i++) {
                staticPartSpec.put(partCols.get(i), destPart.getValues().get(i));
            }
        } else {
            // dynamic partition
            Map<String, String> spec = qbMetaData.getPartSpecForAlias(insClauseName);
            if (spec != null) {
                for (String partCol : partCols) {
                    String val = spec.get(partCol);
                    if (val != null) {
                        staticPartSpec.put(partCol, val);
                    }
                }
            }
        }
    }
    // decide whether it's overwrite
    boolean overwrite = topQB.getParseInfo().getInsertOverwriteTables().keySet().stream().map(String::toLowerCase).collect(Collectors.toSet()).contains(destTable.getDbName() + "." + destTable.getTableName());
    Tuple4<ObjectIdentifier, QueryOperation, Map<String, String>, Boolean> insertOperationInfo = createInsertOperationInfo(queryRelNode, destTable, staticPartSpec, analyzer.getDestSchemaForClause(insClauseName), overwrite);
    return new SinkModifyOperation(catalogManager.getTableOrError(insertOperationInfo.f0), insertOperationInfo.f1, insertOperationInfo.f2, insertOperationInfo.f3, Collections.emptyMap());
}
Also used : Partition(org.apache.hadoop.hive.ql.metadata.Partition) Table(org.apache.hadoop.hive.ql.metadata.Table) HiveParserQB(org.apache.flink.table.planner.delegation.hive.copy.HiveParserQB) SinkModifyOperation(org.apache.flink.table.operations.SinkModifyOperation) LinkedHashMap(java.util.LinkedHashMap) QBMetaData(org.apache.hadoop.hive.ql.parse.QBMetaData) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) SemanticException(org.apache.hadoop.hive.ql.parse.SemanticException) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier) PlannerQueryOperation(org.apache.flink.table.planner.operations.PlannerQueryOperation) QueryOperation(org.apache.flink.table.operations.QueryOperation)

Example 100 with ObjectIdentifier

use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by apache.

the class HiveParserDDLSemanticAnalyzer method convertAlterTableDropParts.

private Operation convertAlterTableDropParts(String[] qualified, HiveParserASTNode ast) {
    boolean ifExists = ast.getFirstChildWithType(HiveASTParser.TOK_IFEXISTS) != null;
    // If the drop has to fail on non-existent partitions, we cannot batch expressions.
    // That is because we actually have to check each separate expression for existence.
    // We could do a small optimization for the case where expr has all columns and all
    // operators are equality, if we assume those would always match one partition (which
    // may not be true with legacy, non-normalized column values). This is probably a
    // popular case but that's kinda hacky. Let's not do it for now.
    Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
    // hive represents drop partition specs with generic func desc, but what we need is just
    // spec maps
    List<Map<String, String>> partSpecs = new ArrayList<>();
    for (int i = 0; i < ast.getChildCount(); i++) {
        HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
        if (child.getType() == HiveASTParser.TOK_PARTSPEC) {
            partSpecs.add(getPartSpec(child));
        }
    }
    validateAlterTableType(tab);
    ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(qualified[0], qualified[1]));
    List<CatalogPartitionSpec> specs = partSpecs.stream().map(CatalogPartitionSpec::new).collect(Collectors.toList());
    return new DropPartitionsOperation(tableIdentifier, ifExists, specs);
}
Also used : DropPartitionsOperation(org.apache.flink.table.operations.ddl.DropPartitionsOperation) ObjectPath(org.apache.flink.table.catalog.ObjectPath) CatalogTable(org.apache.flink.table.catalog.CatalogTable) SqlCreateHiveTable(org.apache.flink.sql.parser.hive.ddl.SqlCreateHiveTable) Table(org.apache.hadoop.hive.ql.metadata.Table) ContextResolvedTable(org.apache.flink.table.catalog.ContextResolvedTable) CatalogBaseTable(org.apache.flink.table.catalog.CatalogBaseTable) HiveParserASTNode(org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode) ArrayList(java.util.ArrayList) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) NotNullConstraint(org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.NotNullConstraint) UniqueConstraint(org.apache.flink.table.api.constraints.UniqueConstraint) CatalogPartitionSpec(org.apache.flink.table.catalog.CatalogPartitionSpec) ObjectIdentifier(org.apache.flink.table.catalog.ObjectIdentifier)

Aggregations

ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)185 CatalogTable (org.apache.flink.table.catalog.CatalogTable)66 UnresolvedIdentifier (org.apache.flink.table.catalog.UnresolvedIdentifier)60 ValidationException (org.apache.flink.table.api.ValidationException)59 HashMap (java.util.HashMap)57 LinkedHashMap (java.util.LinkedHashMap)48 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)42 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)41 ResolvedCatalogTable (org.apache.flink.table.catalog.ResolvedCatalogTable)33 ArrayList (java.util.ArrayList)30 Map (java.util.Map)27 UniqueConstraint (org.apache.flink.table.api.constraints.UniqueConstraint)27 CatalogPartitionSpec (org.apache.flink.table.catalog.CatalogPartitionSpec)24 NotNullConstraint (org.apache.flink.table.planner.delegation.hive.copy.HiveParserBaseSemanticAnalyzer.NotNullConstraint)24 TableException (org.apache.flink.table.api.TableException)23 TableSchema (org.apache.flink.table.api.TableSchema)23 CatalogView (org.apache.flink.table.catalog.CatalogView)21 QueryOperation (org.apache.flink.table.operations.QueryOperation)18 HiveParserASTNode (org.apache.flink.table.planner.delegation.hive.copy.HiveParserASTNode)18 List (java.util.List)16