use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter in project hive by apache.
the class ASTBuilder method table.
public static ASTNode table(final RelNode scan) {
HiveTableScan hts = null;
if (scan instanceof HiveJdbcConverter) {
hts = ((HiveJdbcConverter) scan).getTableScan().getHiveTableScan();
} else if (scan instanceof DruidQuery) {
hts = (HiveTableScan) ((DruidQuery) scan).getTableScan();
} else {
hts = (HiveTableScan) scan;
}
assert hts != null;
RelOptHiveTable hTbl = (RelOptHiveTable) hts.getTable();
ASTBuilder tableNameBuilder = ASTBuilder.construct(HiveParser.TOK_TABNAME, "TOK_TABNAME").add(HiveParser.Identifier, hTbl.getHiveTableMD().getDbName()).add(HiveParser.Identifier, hTbl.getHiveTableMD().getTableName());
if (hTbl.getHiveTableMD().getMetaTable() != null) {
tableNameBuilder.add(HiveParser.Identifier, hTbl.getHiveTableMD().getMetaTable());
}
ASTBuilder b = ASTBuilder.construct(HiveParser.TOK_TABREF, "TOK_TABREF").add(tableNameBuilder);
if (hTbl.getHiveTableMD().getAsOfTimestamp() != null) {
ASTBuilder asOfBuilder = ASTBuilder.construct(HiveParser.TOK_AS_OF_TIME, "TOK_AS_OF_TIME").add(HiveParser.StringLiteral, hTbl.getHiveTableMD().getAsOfTimestamp());
b.add(asOfBuilder);
}
if (hTbl.getHiveTableMD().getAsOfVersion() != null) {
ASTBuilder asOfBuilder = ASTBuilder.construct(HiveParser.TOK_AS_OF_VERSION, "TOK_AS_OF_VERSION").add(HiveParser.Number, hTbl.getHiveTableMD().getAsOfVersion());
b.add(asOfBuilder);
}
ASTBuilder propList = ASTBuilder.construct(HiveParser.TOK_TABLEPROPLIST, "TOK_TABLEPROPLIST");
if (scan instanceof DruidQuery) {
// Passing query spec, column names and column types to be used as part of Hive Physical execution
DruidQuery dq = (DruidQuery) scan;
// Adding Query specs to be used by org.apache.hadoop.hive.druid.io.DruidQueryBasedInputFormat
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_JSON + "\"").add(HiveParser.StringLiteral, "\"" + SemanticAnalyzer.escapeSQLString(dq.getQueryString()) + "\""));
// Adding column names used later by org.apache.hadoop.hive.druid.serde.DruidSerDe
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_FIELD_NAMES + "\"").add(HiveParser.StringLiteral, "\"" + dq.getRowType().getFieldNames().stream().map(Object::toString).collect(Collectors.joining(",")) + "\""));
// Adding column types used later by org.apache.hadoop.hive.druid.serde.DruidSerDe
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_FIELD_TYPES + "\"").add(HiveParser.StringLiteral, "\"" + dq.getRowType().getFieldList().stream().map(e -> TypeConverter.convert(e.getType()).getTypeName()).collect(Collectors.joining(",")) + "\""));
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.DRUID_QUERY_TYPE + "\"").add(HiveParser.StringLiteral, "\"" + dq.getQueryType().getQueryName() + "\""));
} else if (scan instanceof HiveJdbcConverter) {
HiveJdbcConverter jdbcConverter = (HiveJdbcConverter) scan;
final String query = jdbcConverter.generateSql();
LOGGER.debug("Generated SQL query: " + System.lineSeparator() + query);
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.JDBC_QUERY + "\"").add(HiveParser.StringLiteral, "\"" + SemanticAnalyzer.escapeSQLString(query) + "\""));
// Whether we can split the query
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.JDBC_SPLIT_QUERY + "\"").add(HiveParser.StringLiteral, "\"" + jdbcConverter.splittingAllowed() + "\""));
// Adding column names used later by org.apache.hadoop.hive.druid.serde.DruidSerDe
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.JDBC_QUERY_FIELD_NAMES + "\"").add(HiveParser.StringLiteral, "\"" + scan.getRowType().getFieldNames().stream().map(Object::toString).collect(Collectors.joining(",")) + "\""));
// Adding column types used later by org.apache.hadoop.hive.druid.serde.DruidSerDe
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"" + Constants.JDBC_QUERY_FIELD_TYPES + "\"").add(HiveParser.StringLiteral, "\"" + scan.getRowType().getFieldList().stream().map(e -> TypeConverter.convert(e.getType()).getTypeName()).collect(Collectors.joining(",")) + "\""));
}
if (hts.isInsideView()) {
// We need to carry the insideView information from calcite into the ast.
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, "\"insideView\"").add(HiveParser.StringLiteral, "\"TRUE\""));
}
if (hts.getTableScanTrait() != null) {
// We need to carry the fetchDeletedRows information from calcite into the ast.
propList.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTY, "TOK_TABLEPROPERTY").add(HiveParser.StringLiteral, String.format("\"%s\"", hts.getTableScanTrait().getPropertyKey())).add(HiveParser.StringLiteral, "\"TRUE\""));
}
b.add(ASTBuilder.construct(HiveParser.TOK_TABLEPROPERTIES, "TOK_TABLEPROPERTIES").add(propList));
// NOTE: Calcite considers tbls to be equal if their names are the same. Hence
// we need to provide Calcite the fully qualified table name (dbname.tblname)
// and not the user provided aliases.
// However in HIVE DB name can not appear in select list; in case of join
// where table names differ only in DB name, Hive would require user
// introducing explicit aliases for tbl.
b.add(HiveParser.Identifier, hts.getTableAlias());
return b.node();
}
use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter in project hive by apache.
the class ASTConverter method convertSource.
private QueryBlockInfo convertSource(RelNode r) throws CalciteSemanticException {
Schema s = null;
ASTNode ast = null;
if (r instanceof TableScan) {
TableScan f = (TableScan) r;
s = new Schema(f);
ast = ASTBuilder.table(f);
planMapper.link(ast, f);
} else if (r instanceof HiveJdbcConverter) {
HiveJdbcConverter f = (HiveJdbcConverter) r;
s = new Schema(f);
ast = ASTBuilder.table(f);
} else if (r instanceof DruidQuery) {
DruidQuery f = (DruidQuery) r;
s = new Schema(f);
ast = ASTBuilder.table(f);
} else if (r instanceof Join) {
Join join = (Join) r;
QueryBlockInfo left = convertSource(join.getLeft());
QueryBlockInfo right = convertSource(join.getRight());
s = new Schema(left.schema, right.schema);
ASTNode cond = join.getCondition().accept(new RexVisitor(s, false, r.getCluster().getRexBuilder()));
boolean semiJoin = join.isSemiJoin() || join.getJoinType() == JoinRelType.ANTI;
if (join.getRight() instanceof Join && !semiJoin) {
// should not be done for semijoin since it will change the semantics
// Invert join inputs; this is done because otherwise the SemanticAnalyzer
// methods to merge joins will not kick in
JoinRelType type;
if (join.getJoinType() == JoinRelType.LEFT) {
type = JoinRelType.RIGHT;
} else if (join.getJoinType() == JoinRelType.RIGHT) {
type = JoinRelType.LEFT;
} else {
type = join.getJoinType();
}
ast = ASTBuilder.join(right.ast, left.ast, type, cond);
addPkFkInfoToAST(ast, join, true);
} else {
ast = ASTBuilder.join(left.ast, right.ast, join.getJoinType(), cond);
addPkFkInfoToAST(ast, join, false);
}
if (semiJoin) {
s = left.schema;
}
} else if (r instanceof Union) {
Union u = ((Union) r);
ASTNode left = new ASTConverter(((Union) r).getInput(0), this.derivedTableCount, planMapper).convert();
for (int ind = 1; ind < u.getInputs().size(); ind++) {
left = getUnionAllAST(left, new ASTConverter(((Union) r).getInput(ind), this.derivedTableCount, planMapper).convert());
String sqAlias = nextAlias();
ast = ASTBuilder.subQuery(left, sqAlias);
s = new Schema((Union) r, sqAlias);
}
} else {
ASTConverter src = new ASTConverter(r, this.derivedTableCount, planMapper);
ASTNode srcAST = src.convert();
String sqAlias = nextAlias();
s = src.getRowSchema(sqAlias);
ast = ASTBuilder.subQuery(srcAST, sqAlias);
}
return new QueryBlockInfo(s, ast);
}
use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter in project hive by apache.
the class JDBCJoinPushDownRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
LOG.debug("JDBCJoinPushDownRule has been called");
final HiveJoin join = call.rel(0);
final HiveJdbcConverter converter1 = call.rel(1);
final RelNode input1 = converter1.getInput();
final HiveJdbcConverter converter2 = call.rel(2);
final RelNode input2 = converter2.getInput();
JdbcJoin jdbcJoin;
try {
jdbcJoin = new JdbcJoin(join.getCluster(), join.getTraitSet().replace(converter1.getJdbcConvention()), input1, input2, join.getCondition(), join.getVariablesSet(), join.getJoinType());
} catch (InvalidRelException e) {
LOG.warn(e.toString());
return;
}
call.transformTo(converter1.copy(converter1.getTraitSet(), jdbcJoin));
}
use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter in project hive by apache.
the class JDBCUnionPushDownRule method matches.
@Override
public boolean matches(RelOptRuleCall call) {
final HiveUnion union = call.rel(0);
final HiveJdbcConverter converter1 = call.rel(1);
final HiveJdbcConverter converter2 = call.rel(2);
// First we compare the convention
if (!converter1.getJdbcConvention().getName().equals(converter2.getJdbcConvention().getName())) {
return false;
}
// Second, we compare the connection string
if (!converter1.getConnectionUrl().equals(converter2.getConnectionUrl())) {
return false;
}
// Third, we compare the connection user
if (!converter1.getConnectionUser().equals(converter2.getConnectionUser())) {
return false;
}
return union.getInputs().size() == 2;
}
use of org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.jdbc.HiveJdbcConverter in project hive by apache.
the class JDBCAggregationPushDownRule method matches.
@Override
public boolean matches(RelOptRuleCall call) {
final HiveAggregate agg = call.rel(0);
final HiveJdbcConverter converter = call.rel(1);
if (agg.getGroupType() != Group.SIMPLE) {
// TODO: Grouping sets not supported yet
return false;
}
for (AggregateCall relOptRuleOperand : agg.getAggCallList()) {
SqlAggFunction f = relOptRuleOperand.getAggregation();
if (f instanceof HiveSqlCountAggFunction) {
// count distinct with more that one argument is not supported
HiveSqlCountAggFunction countAgg = (HiveSqlCountAggFunction) f;
if (countAgg.isDistinct() && 1 < relOptRuleOperand.getArgList().size()) {
return false;
}
}
SqlKind kind = f.getKind();
if (!converter.getJdbcDialect().supportsAggregateFunction(kind)) {
return false;
}
}
return true;
}
Aggregations