use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class SqlToOperationConverter method convertDescribeTable.
/**
* Convert DESCRIBE [EXTENDED] [[catalogName.] dataBasesName].sqlIdentifier.
*/
private Operation convertDescribeTable(SqlRichDescribeTable sqlRichDescribeTable) {
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlRichDescribeTable.fullTableName());
ObjectIdentifier identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
return new DescribeTableOperation(identifier, sqlRichDescribeTable.isExtended());
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class SqlToOperationConverter method convertShowPartitions.
/**
* Convert SHOW PARTITIONS statement.
*/
private Operation convertShowPartitions(SqlShowPartitions sqlShowPartitions) {
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlShowPartitions.fullTableName());
ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
LinkedHashMap<String, String> partitionKVs = sqlShowPartitions.getPartitionKVs();
if (partitionKVs != null) {
CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(partitionKVs);
return new ShowPartitionsOperation(tableIdentifier, partitionSpec);
}
return new ShowPartitionsOperation(tableIdentifier, null);
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by splunk.
the class SqlCreateTableConverter method lookupLikeSourceTable.
private CatalogTable lookupLikeSourceTable(SqlTableLike sqlTableLike) {
UnresolvedIdentifier unresolvedIdentifier = UnresolvedIdentifier.of(sqlTableLike.getSourceTable().names);
ObjectIdentifier identifier = catalogManager.qualifyIdentifier(unresolvedIdentifier);
ContextResolvedTable lookupResult = catalogManager.getTable(identifier).orElseThrow(() -> new ValidationException(String.format("Source table '%s' of the LIKE clause not found in the catalog, at %s", identifier, sqlTableLike.getSourceTable().getParserPosition())));
if (!(lookupResult.getTable() instanceof CatalogTable)) {
throw new ValidationException(String.format("Source table '%s' of the LIKE clause can not be a VIEW, at %s", identifier, sqlTableLike.getSourceTable().getParserPosition()));
}
return lookupResult.getTable();
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by apache.
the class HiveParserDMLHelper method createInsertOperation.
public Operation createInsertOperation(HiveParserCalcitePlanner analyzer, RelNode queryRelNode) throws SemanticException {
HiveParserQB topQB = analyzer.getQB();
QBMetaData qbMetaData = topQB.getMetaData();
// decide the dest table
Map<String, Table> nameToDestTable = qbMetaData.getNameToDestTable();
Map<String, Partition> nameToDestPart = qbMetaData.getNameToDestPartition();
// for now we only support inserting to a single table
Preconditions.checkState(nameToDestTable.size() <= 1 && nameToDestPart.size() <= 1, "Only support inserting to 1 table");
Table destTable;
String insClauseName;
if (!nameToDestTable.isEmpty()) {
insClauseName = nameToDestTable.keySet().iterator().next();
destTable = nameToDestTable.values().iterator().next();
} else if (!nameToDestPart.isEmpty()) {
insClauseName = nameToDestPart.keySet().iterator().next();
destTable = nameToDestPart.values().iterator().next().getTable();
} else {
// happens for INSERT DIRECTORY
throw new SemanticException("INSERT DIRECTORY is not supported");
}
// decide static partition specs
Map<String, String> staticPartSpec = new LinkedHashMap<>();
if (destTable.isPartitioned()) {
List<String> partCols = HiveCatalog.getFieldNames(destTable.getTTable().getPartitionKeys());
if (!nameToDestPart.isEmpty()) {
// static partition
Partition destPart = nameToDestPart.values().iterator().next();
Preconditions.checkState(partCols.size() == destPart.getValues().size(), "Part cols and static spec doesn't match");
for (int i = 0; i < partCols.size(); i++) {
staticPartSpec.put(partCols.get(i), destPart.getValues().get(i));
}
} else {
// dynamic partition
Map<String, String> spec = qbMetaData.getPartSpecForAlias(insClauseName);
if (spec != null) {
for (String partCol : partCols) {
String val = spec.get(partCol);
if (val != null) {
staticPartSpec.put(partCol, val);
}
}
}
}
}
// decide whether it's overwrite
boolean overwrite = topQB.getParseInfo().getInsertOverwriteTables().keySet().stream().map(String::toLowerCase).collect(Collectors.toSet()).contains(destTable.getDbName() + "." + destTable.getTableName());
Tuple4<ObjectIdentifier, QueryOperation, Map<String, String>, Boolean> insertOperationInfo = createInsertOperationInfo(queryRelNode, destTable, staticPartSpec, analyzer.getDestSchemaForClause(insClauseName), overwrite);
return new SinkModifyOperation(catalogManager.getTableOrError(insertOperationInfo.f0), insertOperationInfo.f1, insertOperationInfo.f2, insertOperationInfo.f3, Collections.emptyMap());
}
use of org.apache.flink.table.catalog.ObjectIdentifier in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableDropParts.
private Operation convertAlterTableDropParts(String[] qualified, HiveParserASTNode ast) {
boolean ifExists = ast.getFirstChildWithType(HiveASTParser.TOK_IFEXISTS) != null;
// If the drop has to fail on non-existent partitions, we cannot batch expressions.
// That is because we actually have to check each separate expression for existence.
// We could do a small optimization for the case where expr has all columns and all
// operators are equality, if we assume those would always match one partition (which
// may not be true with legacy, non-normalized column values). This is probably a
// popular case but that's kinda hacky. Let's not do it for now.
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
// hive represents drop partition specs with generic func desc, but what we need is just
// spec maps
List<Map<String, String>> partSpecs = new ArrayList<>();
for (int i = 0; i < ast.getChildCount(); i++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
if (child.getType() == HiveASTParser.TOK_PARTSPEC) {
partSpecs.add(getPartSpec(child));
}
}
validateAlterTableType(tab);
ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(qualified[0], qualified[1]));
List<CatalogPartitionSpec> specs = partSpecs.stream().map(CatalogPartitionSpec::new).collect(Collectors.toList());
return new DropPartitionsOperation(tableIdentifier, ifExists, specs);
}
Aggregations