use of com.alibaba.druid.sql.ast.statement.SQLExprTableSource in project druid by alibaba.
the class MySqlCreateTable_like_test method test_0.
public void test_0() throws Exception {
String sql = "CREATE TABLE like_test (LIKE t1)";
List<SQLStatement> statementList = SQLUtils.parseStatements(sql, JdbcConstants.MYSQL, true);
assertEquals(1, statementList.size());
MySqlCreateTableStatement stmt = (MySqlCreateTableStatement) statementList.get(0);
SQLExprTableSource like = stmt.getLike();
assertTrue(stmt.getTableElementList().size() == 1);
assertTrue(like == null);
assertTrue(stmt.getTableElementList().get(0) instanceof SQLTableLike);
assertEquals("CREATE TABLE like_test (\n" + "\tLIKE t1\n" + ")", stmt.toString());
}
use of com.alibaba.druid.sql.ast.statement.SQLExprTableSource in project druid by alibaba.
the class MySqlGrantTest_34 method test_0.
public void test_0() throws Exception {
String sql = "GRANT USAGE ON *.* TO 'bob'@'%.example.org' IDENTIFIED BY 'cleartext password';";
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLGrantStatement stmt = (SQLGrantStatement) statementList.get(0);
// print(statementList);
Assert.assertEquals(1, statementList.size());
MySqlSchemaStatVisitor visitor = new MySqlSchemaStatVisitor();
stmt.accept(visitor);
SQLExprTableSource resource = (SQLExprTableSource) stmt.getResource();
Assert.assertEquals("*", resource.getSchema());
String output = SQLUtils.toMySqlString(stmt);
//
Assert.assertEquals(//
"GRANT USAGE ON *.* TO 'bob'@'%.example.org' IDENTIFIED BY 'cleartext password';", output);
// System.out.println("Tables : " + visitor.getTables());
// System.out.println("fields : " + visitor.getColumns());
// System.out.println("coditions : " + visitor.getConditions());
// System.out.println("orderBy : " + visitor.getOrderByColumns());
Assert.assertEquals(1, visitor.getTables().size());
Assert.assertEquals(0, visitor.getColumns().size());
Assert.assertEquals(0, visitor.getConditions().size());
// Assert.assertTrue(visitor.getTables().containsKey(new TableStat.Name("City")));
// Assert.assertTrue(visitor.getTables().containsKey(new TableStat.Name("t2")));
// Assert.assertTrue(visitor.getColumns().contains(new Column("t2", "id")));
}
use of com.alibaba.druid.sql.ast.statement.SQLExprTableSource in project druid by alibaba.
the class MySqlSelectTest_204_dla method test_0.
public void test_0() throws Exception {
String sql = "SELECT temp_1.$1, temp_2.smallint_col\n" + "FROM \n" + "TABLE temp_1\n" + "(\n" + " $1 int,\n" + " $2 int\n" + ")\n" + "TBLPROPERTIES (\n" + " TYPE='oss',\n" + " LOCATION='oss//x.x.x.x:xxx/test_db',\n" + " SCHEMA='test_db'\n" + ")\n" + "META LIFECYCLE 1\n" + "\n" + "JOIN\n" + "\n" + "TABLE temp_2\n" + "(\n" + " id INT COMMENT 'default',\n" + " bool_col BOOLEAN COMMENT 'default',\n" + " tinyint_col TINYINT COMMENT 'default',\n" + " smallint_col SMALLINT COMMENT 'default',\n" + " int_col INT COMMENT 'default',\n" + " bigint_col BIGINT COMMENT 'default',\n" + " float_col FLOAT COMMENT 'default',\n" + " double_col DOUBLE COMMENT 'default',\n" + " date_string_col STRING COMMENT 'default',\n" + " string_col STRING COMMENT 'default',\n" + " timestamp_col TIMESTAMP COMMENT 'default'\n" + ")\n" + "ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' \n" + "WITH SERDEPROPERTIES ('field.delim'='|', 'serialization.format'='|') \n" + "STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'\n" + "OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'\n" + "LOCATION 'oss://xxx/xxx/xxx.csv'\n" + "TBLPROPERTIES ('recursive.directories'='false')\n" + "META LIFECYCLE 1\n" + "\n" + "ON temp_1.$1 = temp_2.id\n" + "WHERE temp_2.bool_col = true;";
// System.out.println(sql);
MySqlStatementParser parser = new MySqlStatementParser(sql);
List<SQLStatement> statementList = parser.parseStatementList();
SQLSelectStatement stmt = (SQLSelectStatement) statementList.get(0);
assertEquals(1, statementList.size());
assertEquals("SELECT temp_1.$1, temp_2.smallint_col\n" + "FROM TABLE temp_1 (\n" + "\t$1 int,\n" + "\t$2 int\n" + ")\n" + "TBLPROPERTIES (\n" + "\t'TYPE' = 'oss',\n" + "\t'LOCATION' = 'oss//x.x.x.x:xxx/test_db',\n" + "\t'SCHEMA' = 'test_db'\n" + ")\n" + "META LIFECYCLE 1\n" + "\tJOIN TABLE temp_2 (\n" + "\t\tid INT COMMENT 'default',\n" + "\t\tbool_col BOOLEAN COMMENT 'default',\n" + "\t\ttinyint_col TINYINT COMMENT 'default',\n" + "\t\tsmallint_col SMALLINT COMMENT 'default',\n" + "\t\tint_col INT COMMENT 'default',\n" + "\t\tbigint_col BIGINT COMMENT 'default',\n" + "\t\tfloat_col FLOAT COMMENT 'default',\n" + "\t\tdouble_col DOUBLE COMMENT 'default',\n" + "\t\tdate_string_col STRING COMMENT 'default',\n" + "\t\tstring_col STRING COMMENT 'default',\n" + "\t\ttimestamp_col TIMESTAMP COMMENT 'default'\n" + "\t)\n" + "\tROW FORMAT\n" + "\t\tSERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'\n" + "\tWITH SERDEPROPERTIES (\n" + "\t\t'field.delim' = '|',\n" + "\t\t'serialization.format' = '|'\n" + "\t)\n" + "\tSTORED AS\n" + "\t\tINPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'\n" + "\t\tOUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'\n" + "\tLOCATION 'oss://xxx/xxx/xxx.csv'\n" + "\tTBLPROPERTIES (\n" + "\t\t'recursive.directories' = 'false'\n" + "\t)\n" + "\tMETA LIFECYCLE 1 ON temp_1.$1 = temp_2.id\n" + "WHERE temp_2.bool_col = true;", stmt.toString());
assertEquals("select temp_1.$1, temp_2.smallint_col\n" + "from table temp_1 (\n" + "\t$1 int,\n" + "\t$2 int\n" + ")\n" + "tblproperties (\n" + "\t'TYPE' = 'oss',\n" + "\t'LOCATION' = 'oss//x.x.x.x:xxx/test_db',\n" + "\t'SCHEMA' = 'test_db'\n" + ")\n" + "meta lifecycle 1\n" + "\tjoin table temp_2 (\n" + "\t\tid INT comment 'default',\n" + "\t\tbool_col BOOLEAN comment 'default',\n" + "\t\ttinyint_col TINYINT comment 'default',\n" + "\t\tsmallint_col SMALLINT comment 'default',\n" + "\t\tint_col INT comment 'default',\n" + "\t\tbigint_col BIGINT comment 'default',\n" + "\t\tfloat_col FLOAT comment 'default',\n" + "\t\tdouble_col DOUBLE comment 'default',\n" + "\t\tdate_string_col STRING comment 'default',\n" + "\t\tstring_col STRING comment 'default',\n" + "\t\ttimestamp_col TIMESTAMP comment 'default'\n" + "\t)\n" + "\trow rowFormat\n" + "\t\tserde 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'\n" + "\twith serdeproperties (\n" + "\t\t'field.delim' = '|',\n" + "\t\t'serialization.format' = '|'\n" + "\t)\n" + "\tstored as\n" + "\t\tinputformat 'org.apache.hadoop.mapred.TextInputFormat'\n" + "\t\toutputformat 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'\n" + "\tlocation 'oss://xxx/xxx/xxx.csv'\n" + "\ttblproperties (\n" + "\t\t'recursive.directories' = 'false'\n" + "\t)\n" + "\tmeta lifecycle 1 on temp_1.$1 = temp_2.id\n" + "where temp_2.bool_col = true;", stmt.toLowerCaseString());
final TempTableNameGen tempTableNameGen = new TempTableNameGen() {
@Override
public String generateName() {
return "__temp_table_1";
}
};
final List<SQLCreateTableStatement> createTableStatementList = new ArrayList<SQLCreateTableStatement>();
SQLASTVisitorAdapter v = new MySqlASTVisitorAdapter() {
public boolean visit(SQLAdhocTableSource x) {
final String tableName = tempTableNameGen.generateName();
HiveCreateTableStatement createStmt = (HiveCreateTableStatement) x.getDefinition();
createStmt.setParent(null);
createStmt.setTableName(tableName);
createStmt.setExternal(true);
SQLUtils.replaceInParent(x, new SQLExprTableSource(tableName));
createTableStatementList.add(createStmt);
return false;
}
public boolean visit(SQLVariantRefExpr x) {
String name = x.getName();
if (name != null && name.startsWith("$")) {
SQLUtils.replaceInParent(x, new SQLIdentifierExpr(name));
}
return false;
}
};
stmt.accept(v);
for (SQLCreateTableStatement createStmt : createTableStatementList) {
System.out.println(createStmt.toString(VisitorFeature.OutputNameQuote));
}
System.out.println();
System.out.println(stmt.toString(VisitorFeature.OutputNameQuote));
HiveCreateTableStatement createTableStatement = (HiveCreateTableStatement) createTableStatementList.get(0);
SQLExpr lifeCycle = createTableStatement.getMetaLifeCycle();
if (lifeCycle instanceof SQLIntegerExpr) {
int intValue = ((SQLIntegerExpr) lifeCycle).getNumber().intValue();
} else if (lifeCycle instanceof SQLIdentifierExpr && ((SQLIdentifierExpr) lifeCycle).nameHashCode64() == FnvHash.Constants.ALWAYS) {
// always
}
//
}
use of com.alibaba.druid.sql.ast.statement.SQLExprTableSource in project Mycat-Server by MyCATApache.
the class DruidSelectParser method changeSql.
/**
* 改写sql:需要加limit的加上
*/
@Override
public void changeSql(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt, LayerCachePool cachePool) throws SQLNonTransientException {
tryRoute(schema, rrs, cachePool);
rrs.copyLimitToNodes();
SQLSelectStatement selectStmt = (SQLSelectStatement) stmt;
SQLSelectQuery sqlSelectQuery = selectStmt.getSelect().getQuery();
if (sqlSelectQuery instanceof MySqlSelectQueryBlock) {
MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock) selectStmt.getSelect().getQuery();
int limitStart = 0;
int limitSize = schema.getDefaultMaxLimit();
// clear group having
SQLSelectGroupByClause groupByClause = mysqlSelectQuery.getGroupBy();
// Modified by winbill, 20160614, do NOT include having clause when routing to multiple nodes
if (groupByClause != null && groupByClause.getHaving() != null && isRoutMultiNode(schema, rrs)) {
groupByClause.setHaving(null);
}
Map<String, Map<String, Set<ColumnRoutePair>>> allConditions = getAllConditions();
boolean isNeedAddLimit = isNeedAddLimit(schema, rrs, mysqlSelectQuery, allConditions);
if (isNeedAddLimit) {
Limit limit = new Limit();
limit.setRowCount(new SQLIntegerExpr(limitSize));
mysqlSelectQuery.setLimit(limit);
rrs.setLimitSize(limitSize);
String sql = getSql(rrs, stmt, isNeedAddLimit);
rrs.changeNodeSqlAfterAddLimit(schema, getCurentDbType(), sql, 0, limitSize, true);
}
Limit limit = mysqlSelectQuery.getLimit();
if (limit != null && !isNeedAddLimit) {
SQLIntegerExpr offset = (SQLIntegerExpr) limit.getOffset();
SQLIntegerExpr count = (SQLIntegerExpr) limit.getRowCount();
if (offset != null) {
limitStart = offset.getNumber().intValue();
rrs.setLimitStart(limitStart);
}
if (count != null) {
limitSize = count.getNumber().intValue();
rrs.setLimitSize(limitSize);
}
if (isNeedChangeLimit(rrs)) {
Limit changedLimit = new Limit();
changedLimit.setRowCount(new SQLIntegerExpr(limitStart + limitSize));
if (offset != null) {
if (limitStart < 0) {
String msg = "You have an error in your SQL syntax; check the manual that " + "corresponds to your MySQL server version for the right syntax to use near '" + limitStart + "'";
throw new SQLNonTransientException(ErrorCode.ER_PARSE_ERROR + " - " + msg);
} else {
changedLimit.setOffset(new SQLIntegerExpr(0));
}
}
mysqlSelectQuery.setLimit(changedLimit);
String sql = getSql(rrs, stmt, isNeedAddLimit);
rrs.changeNodeSqlAfterAddLimit(schema, getCurentDbType(), sql, 0, limitStart + limitSize, true);
// 设置改写后的sql
ctx.setSql(sql);
} else {
rrs.changeNodeSqlAfterAddLimit(schema, getCurentDbType(), getCtx().getSql(), rrs.getLimitStart(), rrs.getLimitSize(), true);
// ctx.setSql(nativeSql);
}
}
if (rrs.isDistTable()) {
SQLTableSource from = mysqlSelectQuery.getFrom();
for (RouteResultsetNode node : rrs.getNodes()) {
SQLIdentifierExpr sqlIdentifierExpr = new SQLIdentifierExpr();
sqlIdentifierExpr.setParent(from);
sqlIdentifierExpr.setName(node.getSubTableName());
SQLExprTableSource from2 = new SQLExprTableSource(sqlIdentifierExpr);
from2.setAlias(from.getAlias());
mysqlSelectQuery.setFrom(from2);
node.setStatement(stmt.toString());
}
}
rrs.setCacheAble(isNeedCache(schema, rrs, mysqlSelectQuery, allConditions));
}
}
use of com.alibaba.druid.sql.ast.statement.SQLExprTableSource in project Mycat-Server by MyCATApache.
the class MycatSchemaStatVisitor method visit.
// DUAL
public boolean visit(MySqlDeleteStatement x) {
setAliasMap();
setMode(x, Mode.Delete);
accept(x.getFrom());
accept(x.getUsing());
x.getTableSource().accept(this);
if (x.getTableSource() instanceof SQLExprTableSource) {
SQLName tableName = (SQLName) ((SQLExprTableSource) x.getTableSource()).getExpr();
String ident = tableName.toString();
setCurrentTable(x, ident);
TableStat stat = this.getTableStat(ident, ident);
stat.incrementDeleteCount();
}
accept(x.getWhere());
accept(x.getOrderBy());
accept(x.getLimit());
return false;
}
Aggregations