use of org.apache.flink.table.operations.SinkModifyOperation in project flink by apache.
the class HiveParserDMLHelper method createInsertOperation.
public Operation createInsertOperation(HiveParserCalcitePlanner analyzer, RelNode queryRelNode) throws SemanticException {
HiveParserQB topQB = analyzer.getQB();
QBMetaData qbMetaData = topQB.getMetaData();
// decide the dest table
Map<String, Table> nameToDestTable = qbMetaData.getNameToDestTable();
Map<String, Partition> nameToDestPart = qbMetaData.getNameToDestPartition();
// for now we only support inserting to a single table
Preconditions.checkState(nameToDestTable.size() <= 1 && nameToDestPart.size() <= 1, "Only support inserting to 1 table");
Table destTable;
String insClauseName;
if (!nameToDestTable.isEmpty()) {
insClauseName = nameToDestTable.keySet().iterator().next();
destTable = nameToDestTable.values().iterator().next();
} else if (!nameToDestPart.isEmpty()) {
insClauseName = nameToDestPart.keySet().iterator().next();
destTable = nameToDestPart.values().iterator().next().getTable();
} else {
// happens for INSERT DIRECTORY
throw new SemanticException("INSERT DIRECTORY is not supported");
}
// decide static partition specs
Map<String, String> staticPartSpec = new LinkedHashMap<>();
if (destTable.isPartitioned()) {
List<String> partCols = HiveCatalog.getFieldNames(destTable.getTTable().getPartitionKeys());
if (!nameToDestPart.isEmpty()) {
// static partition
Partition destPart = nameToDestPart.values().iterator().next();
Preconditions.checkState(partCols.size() == destPart.getValues().size(), "Part cols and static spec doesn't match");
for (int i = 0; i < partCols.size(); i++) {
staticPartSpec.put(partCols.get(i), destPart.getValues().get(i));
}
} else {
// dynamic partition
Map<String, String> spec = qbMetaData.getPartSpecForAlias(insClauseName);
if (spec != null) {
for (String partCol : partCols) {
String val = spec.get(partCol);
if (val != null) {
staticPartSpec.put(partCol, val);
}
}
}
}
}
// decide whether it's overwrite
boolean overwrite = topQB.getParseInfo().getInsertOverwriteTables().keySet().stream().map(String::toLowerCase).collect(Collectors.toSet()).contains(destTable.getDbName() + "." + destTable.getTableName());
Tuple4<ObjectIdentifier, QueryOperation, Map<String, String>, Boolean> insertOperationInfo = createInsertOperationInfo(queryRelNode, destTable, staticPartSpec, analyzer.getDestSchemaForClause(insClauseName), overwrite);
return new SinkModifyOperation(catalogManager.getTableOrError(insertOperationInfo.f0), insertOperationInfo.f1, insertOperationInfo.f2, insertOperationInfo.f3, Collections.emptyMap());
}
use of org.apache.flink.table.operations.SinkModifyOperation in project flink by apache.
the class TableEnvironmentImpl method extractSinkIdentifierNames.
/**
* extract sink identifier names from {@link ModifyOperation}s and deduplicate them with {@link
* #deduplicateSinkIdentifierNames(List)}.
*/
private List<String> extractSinkIdentifierNames(List<ModifyOperation> operations) {
List<String> tableNames = new ArrayList<>(operations.size());
for (ModifyOperation operation : operations) {
if (operation instanceof SinkModifyOperation) {
String fullName = ((SinkModifyOperation) operation).getContextResolvedTable().getIdentifier().asSummaryString();
tableNames.add(fullName);
} else {
throw new UnsupportedOperationException("Unsupported operation: " + operation);
}
}
return deduplicateSinkIdentifierNames(tableNames);
}
use of org.apache.flink.table.operations.SinkModifyOperation in project flink by apache.
the class SqlToOperationConverterTest method checkAlterTableCompact.
private void checkAlterTableCompact(Operation operation, Map<String, String> staticPartitions) {
assertThat(operation).isInstanceOf(SinkModifyOperation.class);
SinkModifyOperation modifyOperation = (SinkModifyOperation) operation;
assertThat(modifyOperation.getStaticPartitions()).containsExactlyInAnyOrderEntriesOf(staticPartitions);
assertThat(modifyOperation.isOverwrite()).isFalse();
assertThat(modifyOperation.getDynamicOptions()).containsEntry(TestManagedTableFactory.ENRICHED_KEY, TestManagedTableFactory.ENRICHED_VALUE);
ContextResolvedTable contextResolvedTable = modifyOperation.getContextResolvedTable();
assertThat(contextResolvedTable.getIdentifier()).isEqualTo(ObjectIdentifier.of("cat1", "db1", "tb1"));
assertThat(modifyOperation.getChild()).isInstanceOf(SourceQueryOperation.class);
SourceQueryOperation child = (SourceQueryOperation) modifyOperation.getChild();
assertThat(child.getChildren()).isEmpty();
assertThat(child.getDynamicOptions()).containsEntry("k", "v");
assertThat(child.getDynamicOptions()).containsEntry(TestManagedTableFactory.ENRICHED_KEY, TestManagedTableFactory.ENRICHED_VALUE);
}
use of org.apache.flink.table.operations.SinkModifyOperation in project flink by apache.
the class SqlToOperationConverterTest method testSqlInsertWithDynamicTableOptions.
@Test
public void testSqlInsertWithDynamicTableOptions() {
final String sql = "insert into t1 /*+ OPTIONS('k1'='v1', 'k2'='v2') */\n" + "select a, b, c, d from t2";
FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
final CalciteParser parser = getParserBySqlDialect(SqlDialect.DEFAULT);
Operation operation = parse(sql, planner, parser);
assertThat(operation).isInstanceOf(SinkModifyOperation.class);
SinkModifyOperation sinkModifyOperation = (SinkModifyOperation) operation;
Map<String, String> dynamicOptions = sinkModifyOperation.getDynamicOptions();
assertThat(dynamicOptions).isNotNull();
assertThat(dynamicOptions.size()).isEqualTo(2);
assertThat(dynamicOptions.toString()).isEqualTo("{k1=v1, k2=v2}");
}
use of org.apache.flink.table.operations.SinkModifyOperation in project flink by apache.
the class SqlToOperationConverterTest method testSqlInsertWithStaticPartition.
@Test
public void testSqlInsertWithStaticPartition() {
final String sql = "insert into t1 partition(a=1) select b, c, d from t2";
FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
final CalciteParser parser = getParserBySqlDialect(SqlDialect.DEFAULT);
Operation operation = parse(sql, planner, parser);
assertThat(operation).isInstanceOf(SinkModifyOperation.class);
SinkModifyOperation sinkModifyOperation = (SinkModifyOperation) operation;
final Map<String, String> expectedStaticPartitions = new HashMap<>();
expectedStaticPartitions.put("a", "1");
assertThat(sinkModifyOperation.getStaticPartitions()).isEqualTo(expectedStaticPartitions);
}
Aggregations