use of org.apache.flink.table.operations.Operation in project flink by apache.
the class SqlToOperationConverterTest method testCreateTableWithComputedColumn.
@Test
public void testCreateTableWithComputedColumn() {
final String sql = "CREATE TABLE tbl1 (\n" + " a int,\n" + " b varchar, \n" + " c as a - 1, \n" + " d as b || '$$', \n" + " e as my_udf1(a)," + " f as `default`.my_udf2(a) + 1," + " g as builtin.`default`.my_udf3(a) || '##'\n" + ")\n" + " with (\n" + " 'connector' = 'kafka', \n" + " 'kafka.topic' = 'log.test'\n" + ")\n";
functionCatalog.registerTempCatalogScalarFunction(ObjectIdentifier.of("builtin", "default", "my_udf1"), Func0$.MODULE$);
functionCatalog.registerTempCatalogScalarFunction(ObjectIdentifier.of("builtin", "default", "my_udf2"), Func1$.MODULE$);
functionCatalog.registerTempCatalogScalarFunction(ObjectIdentifier.of("builtin", "default", "my_udf3"), Func8$.MODULE$);
FlinkPlannerImpl planner = getPlannerBySqlDialect(SqlDialect.DEFAULT);
Operation operation = parse(sql, planner, getParserBySqlDialect(SqlDialect.DEFAULT));
assertThat(operation).isInstanceOf(CreateTableOperation.class);
CreateTableOperation op = (CreateTableOperation) operation;
CatalogTable catalogTable = op.getCatalogTable();
assertThat(catalogTable.getSchema().getFieldNames()).isEqualTo(new String[] { "a", "b", "c", "d", "e", "f", "g" });
assertThat(catalogTable.getSchema().getFieldDataTypes()).isEqualTo(new DataType[] { DataTypes.INT(), DataTypes.STRING(), DataTypes.INT(), DataTypes.STRING(), DataTypes.INT().notNull(), DataTypes.INT(), DataTypes.STRING() });
String[] columnExpressions = catalogTable.getSchema().getTableColumns().stream().filter(ComputedColumn.class::isInstance).map(ComputedColumn.class::cast).map(ComputedColumn::getExpression).toArray(String[]::new);
String[] expected = new String[] { "`a` - 1", "`b` || '$$'", "`builtin`.`default`.`my_udf1`(`a`)", "`builtin`.`default`.`my_udf2`(`a`) + 1", "`builtin`.`default`.`my_udf3`(`a`) || '##'" };
assertThat(columnExpressions).isEqualTo(expected);
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class ParserImpl method parse.
/**
* When parsing statement, it first uses {@link ExtendedParser} to parse statements. If {@link
* ExtendedParser} fails to parse statement, it uses the {@link CalciteParser} to parse
* statements.
*
* @param statement input statement.
* @return parsed operations.
*/
@Override
public List<Operation> parse(String statement) {
CalciteParser parser = calciteParserSupplier.get();
FlinkPlannerImpl planner = validatorSupplier.get();
Optional<Operation> command = EXTENDED_PARSER.parse(statement);
if (command.isPresent()) {
return Collections.singletonList(command.get());
}
// parse the sql query
// use parseSqlList here because we need to support statement end with ';' in sql client.
SqlNodeList sqlNodeList = parser.parseSqlList(statement);
List<SqlNode> parsed = sqlNodeList.getList();
Preconditions.checkArgument(parsed.size() == 1, "only single statement supported");
return Collections.singletonList(SqlToOperationConverter.convert(planner, catalogManager, parsed.get(0)).orElseThrow(() -> new TableException("Unsupported query: " + statement)));
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTable.
private Operation convertAlterTable(HiveParserASTNode input) throws SemanticException {
Operation operation = null;
HiveParserASTNode ast = (HiveParserASTNode) input.getChild(1);
String[] qualified = HiveParserBaseSemanticAnalyzer.getQualifiedTableName((HiveParserASTNode) input.getChild(0));
String tableName = HiveParserBaseSemanticAnalyzer.getDotName(qualified);
HashMap<String, String> partSpec = null;
HiveParserASTNode partSpecNode = (HiveParserASTNode) input.getChild(2);
if (partSpecNode != null) {
partSpec = getPartSpec(partSpecNode);
}
CatalogBaseTable alteredTable = getAlteredTable(tableName, false);
switch(ast.getType()) {
case HiveASTParser.TOK_ALTERTABLE_RENAME:
operation = convertAlterTableRename(tableName, ast, false);
break;
case HiveASTParser.TOK_ALTERTABLE_ADDCOLS:
operation = convertAlterTableModifyCols(alteredTable, tableName, ast, false);
break;
case HiveASTParser.TOK_ALTERTABLE_REPLACECOLS:
operation = convertAlterTableModifyCols(alteredTable, tableName, ast, true);
break;
case HiveASTParser.TOK_ALTERTABLE_RENAMECOL:
operation = convertAlterTableChangeCol(alteredTable, qualified, ast);
break;
case HiveASTParser.TOK_ALTERTABLE_ADDPARTS:
operation = convertAlterTableAddParts(qualified, ast);
break;
case HiveASTParser.TOK_ALTERTABLE_DROPPARTS:
operation = convertAlterTableDropParts(qualified, ast);
break;
case HiveASTParser.TOK_ALTERTABLE_PROPERTIES:
operation = convertAlterTableProps(alteredTable, tableName, null, ast, false, false);
break;
case HiveASTParser.TOK_ALTERTABLE_DROPPROPERTIES:
operation = convertAlterTableProps(alteredTable, tableName, null, ast, false, true);
break;
case HiveASTParser.TOK_ALTERTABLE_UPDATESTATS:
operation = convertAlterTableProps(alteredTable, tableName, partSpec, ast, false, false);
break;
case HiveASTParser.TOK_ALTERTABLE_FILEFORMAT:
operation = convertAlterTableFileFormat(alteredTable, ast, tableName, partSpec);
break;
case HiveASTParser.TOK_ALTERTABLE_LOCATION:
operation = convertAlterTableLocation(alteredTable, ast, tableName, partSpec);
break;
case HiveASTParser.TOK_ALTERTABLE_SERIALIZER:
operation = convertAlterTableSerde(alteredTable, ast, tableName, partSpec);
break;
case HiveASTParser.TOK_ALTERTABLE_SERDEPROPERTIES:
operation = convertAlterTableSerdeProps(alteredTable, ast, tableName, partSpec);
break;
case HiveASTParser.TOK_ALTERTABLE_TOUCH:
case HiveASTParser.TOK_ALTERTABLE_ARCHIVE:
case HiveASTParser.TOK_ALTERTABLE_UNARCHIVE:
case HiveASTParser.TOK_ALTERTABLE_PARTCOLTYPE:
case HiveASTParser.TOK_ALTERTABLE_SKEWED:
case HiveASTParser.TOK_ALTERTABLE_EXCHANGEPARTITION:
case HiveASTParser.TOK_ALTERTABLE_MERGEFILES:
case HiveASTParser.TOK_ALTERTABLE_RENAMEPART:
case HiveASTParser.TOK_ALTERTABLE_SKEWED_LOCATION:
case HiveASTParser.TOK_ALTERTABLE_BUCKETS:
case HiveASTParser.TOK_ALTERTABLE_CLUSTER_SORT:
case HiveASTParser.TOK_ALTERTABLE_COMPACT:
case HiveASTParser.TOK_ALTERTABLE_UPDATECOLSTATS:
case HiveASTParser.TOK_ALTERTABLE_DROPCONSTRAINT:
case HiveASTParser.TOK_ALTERTABLE_ADDCONSTRAINT:
handleUnsupportedOperation(ast);
break;
default:
throw new ValidationException("Unknown AST node for ALTER TABLE: " + ast);
}
return operation;
}
use of org.apache.flink.table.operations.Operation in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterView.
private Operation convertAlterView(HiveParserASTNode ast) throws SemanticException {
Operation operation = null;
String[] qualified = HiveParserBaseSemanticAnalyzer.getQualifiedTableName((HiveParserASTNode) ast.getChild(0));
String tableName = HiveParserBaseSemanticAnalyzer.getDotName(qualified);
CatalogBaseTable alteredTable = getAlteredTable(tableName, true);
if (ast.getChild(1).getType() == HiveASTParser.TOK_QUERY) {
// alter view as
operation = convertCreateView(ast);
} else {
ast = (HiveParserASTNode) ast.getChild(1);
switch(ast.getType()) {
case HiveASTParser.TOK_ALTERVIEW_PROPERTIES:
operation = convertAlterTableProps(alteredTable, tableName, null, ast, true, false);
break;
case HiveASTParser.TOK_ALTERVIEW_DROPPROPERTIES:
operation = convertAlterTableProps(alteredTable, tableName, null, ast, true, true);
break;
case HiveASTParser.TOK_ALTERVIEW_RENAME:
operation = convertAlterTableRename(tableName, ast, true);
break;
case HiveASTParser.TOK_ALTERVIEW_ADDPARTS:
case HiveASTParser.TOK_ALTERVIEW_DROPPARTS:
handleUnsupportedOperation("ADD/DROP PARTITION for view is not supported");
break;
default:
throw new ValidationException("Unknown AST node for ALTER VIEW: " + ast);
}
}
return operation;
}
use of org.apache.flink.table.operations.Operation in project zeppelin by apache.
the class Flink113Shims method parseBySqlParser.
private SqlCommandCall parseBySqlParser(Parser sqlParser, String stmt) throws Exception {
List<Operation> operations;
try {
operations = sqlParser.parse(stmt);
} catch (Throwable e) {
throw new Exception("Invalidate SQL statement.", e);
}
if (operations.size() != 1) {
throw new Exception("Only single statement is supported now.");
}
final SqlCommand cmd;
String[] operands = new String[] { stmt };
Operation operation = operations.get(0);
if (operation instanceof CatalogSinkModifyOperation) {
boolean overwrite = ((CatalogSinkModifyOperation) operation).isOverwrite();
cmd = overwrite ? SqlCommand.INSERT_OVERWRITE : SqlCommand.INSERT_INTO;
} else if (operation instanceof CreateTableOperation) {
cmd = SqlCommand.CREATE_TABLE;
} else if (operation instanceof DropTableOperation) {
cmd = SqlCommand.DROP_TABLE;
} else if (operation instanceof AlterTableOperation) {
cmd = SqlCommand.ALTER_TABLE;
} else if (operation instanceof CreateViewOperation) {
cmd = SqlCommand.CREATE_VIEW;
} else if (operation instanceof DropViewOperation) {
cmd = SqlCommand.DROP_VIEW;
} else if (operation instanceof CreateDatabaseOperation) {
cmd = SqlCommand.CREATE_DATABASE;
} else if (operation instanceof DropDatabaseOperation) {
cmd = SqlCommand.DROP_DATABASE;
} else if (operation instanceof AlterDatabaseOperation) {
cmd = SqlCommand.ALTER_DATABASE;
} else if (operation instanceof CreateCatalogOperation) {
cmd = SqlCommand.CREATE_CATALOG;
} else if (operation instanceof DropCatalogOperation) {
cmd = SqlCommand.DROP_CATALOG;
} else if (operation instanceof UseCatalogOperation) {
cmd = SqlCommand.USE_CATALOG;
operands = new String[] { ((UseCatalogOperation) operation).getCatalogName() };
} else if (operation instanceof UseDatabaseOperation) {
cmd = SqlCommand.USE;
operands = new String[] { ((UseDatabaseOperation) operation).getDatabaseName() };
} else if (operation instanceof ShowCatalogsOperation) {
cmd = SqlCommand.SHOW_CATALOGS;
operands = new String[0];
} else if (operation instanceof ShowDatabasesOperation) {
cmd = SqlCommand.SHOW_DATABASES;
operands = new String[0];
} else if (operation instanceof ShowTablesOperation) {
cmd = SqlCommand.SHOW_TABLES;
operands = new String[0];
} else if (operation instanceof ShowFunctionsOperation) {
cmd = SqlCommand.SHOW_FUNCTIONS;
operands = new String[0];
} else if (operation instanceof CreateCatalogFunctionOperation || operation instanceof CreateTempSystemFunctionOperation) {
cmd = SqlCommand.CREATE_FUNCTION;
} else if (operation instanceof DropCatalogFunctionOperation || operation instanceof DropTempSystemFunctionOperation) {
cmd = SqlCommand.DROP_FUNCTION;
} else if (operation instanceof AlterCatalogFunctionOperation) {
cmd = SqlCommand.ALTER_FUNCTION;
} else if (operation instanceof ExplainOperation) {
cmd = SqlCommand.EXPLAIN;
} else if (operation instanceof DescribeTableOperation) {
cmd = SqlCommand.DESCRIBE;
operands = new String[] { ((DescribeTableOperation) operation).getSqlIdentifier().asSerializableString() };
} else if (operation instanceof QueryOperation) {
cmd = SqlCommand.SELECT;
} else {
throw new Exception("Unknown operation: " + operation.asSummaryString());
}
return new SqlCommandCall(cmd, operands, stmt);
}
Aggregations