use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project hive by apache.
the class HiveMaterializedViewFilterScanRule method onMatch.
//~ Methods ----------------------------------------------------------------
public void onMatch(RelOptRuleCall call) {
final Project project = call.rel(0);
final Filter filter = call.rel(1);
final TableScan scan = call.rel(2);
apply(call, project, filter, scan);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project hive by apache.
the class SubstitutionVisitor method toMutable.
private static MutableRel toMutable(RelNode rel) {
if (rel instanceof TableScan) {
return MutableScan.of((TableScan) rel);
}
if (rel instanceof Values) {
return MutableValues.of((Values) rel);
}
if (rel instanceof Project) {
final Project project = (Project) rel;
final MutableRel input = toMutable(project.getInput());
return MutableProject.of(input, project.getProjects(), project.getRowType().getFieldNames());
}
if (rel instanceof Filter) {
final Filter filter = (Filter) rel;
final MutableRel input = toMutable(filter.getInput());
return MutableFilter.of(input, filter.getCondition());
}
if (rel instanceof Aggregate) {
final Aggregate aggregate = (Aggregate) rel;
final MutableRel input = toMutable(aggregate.getInput());
return MutableAggregate.of(input, aggregate.indicator, aggregate.getGroupSet(), aggregate.getGroupSets(), aggregate.getAggCallList());
}
if (rel instanceof Join) {
final Join join = (Join) rel;
final MutableRel left = toMutable(join.getLeft());
final MutableRel right = toMutable(join.getRight());
return MutableJoin.of(join.getCluster(), left, right, join.getCondition(), join.getJoinType(), join.getVariablesSet());
}
if (rel instanceof Sort) {
final Sort sort = (Sort) rel;
final MutableRel input = toMutable(sort.getInput());
return MutableSort.of(input, sort.getCollation(), sort.offset, sort.fetch);
}
throw new RuntimeException("cannot translate " + rel + " to MutableRel");
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project hive by apache.
the class HiveMaterializedViewsRegistry method createTableScan.
private static RelNode createTableScan(Table viewTable) {
// 0. Recreate cluster
final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
// 1. Create column schema
final RowResolver rr = new RowResolver();
// 1.1 Add Column info for non partion cols (Object Inspector fields)
StructObjectInspector rowObjectInspector;
try {
rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
} catch (SerDeException e) {
// Bail out
return null;
}
List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
ColumnInfo colInfo;
String colName;
ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
for (int i = 0; i < fields.size(); i++) {
colName = fields.get(i).getFieldName();
colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
}
ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
// 1.2 Add column info corresponding to partition columns
ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
for (FieldSchema part_col : viewTable.getPartCols()) {
colName = part_col.getName();
colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
partitionColumns.add(colInfo);
}
// 1.3 Build row type from field <type, name>
RelDataType rowType;
try {
rowType = TypeConverter.getType(cluster, rr, null);
} catch (CalciteSemanticException e) {
// Bail out
return null;
}
// 2. Build RelOptAbstractTable
String fullyQualifiedTabName = viewTable.getDbName();
if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
} else {
fullyQualifiedTabName = viewTable.getTableName();
}
RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
RelNode tableRel;
// 3. Build operator
if (obtainTableType(viewTable) == TableType.DRUID) {
// Build Druid query
String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
Set<String> metrics = new HashSet<>();
List<RelDataType> druidColTypes = new ArrayList<>();
List<String> druidColNames = new ArrayList<>();
for (RelDataTypeField field : rowType.getFieldList()) {
druidColTypes.add(field.getType());
druidColNames.add(field.getName());
if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
// timestamp
continue;
}
if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
// dimension
continue;
}
metrics.add(field.getName());
}
List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
} else {
// Build Hive Table Scan Rel
tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
}
return tableRel;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project storm by apache.
the class TridentScanRule method convert.
@Override
public RelNode convert(RelNode rel) {
final TableScan scan = (TableScan) rel;
int parallelismHint = DEFAULT_PARALLELISM_HINT;
final ParallelStreamableTable parallelTable = scan.getTable().unwrap(ParallelStreamableTable.class);
if (parallelTable != null && parallelTable.parallelismHint() != null) {
parallelismHint = parallelTable.parallelismHint();
}
final Table table = scan.getTable().unwrap(Table.class);
switch(table.getJdbcTableType()) {
case STREAM:
return new TridentStreamScanRel(scan.getCluster(), scan.getTraitSet().replace(TridentLogicalConvention.INSTANCE), scan.getTable(), parallelismHint);
default:
throw new IllegalArgumentException(String.format("Unsupported table type: %s", table.getJdbcTableType()));
}
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.TableScan in project storm by apache.
the class PlanCompiler method printMain.
private void printMain(PrintWriter pw, RelNode root) {
Set<TableScan> tables = new HashSet<>();
pw.print(INITIALIZER_PROLOGUE);
chainOperators(pw, root, tables);
for (TableScan n : tables) {
String escaped = CompilerUtil.escapeJavaString(Joiner.on('.').join(n.getTable().getQualifiedName()), true);
String r = NEW_LINE_JOINER.join(" if (!data.containsKey(%1$s))", " throw new RuntimeException(\"Cannot find table \" + %1$s);", " data.get(%1$s).open(CTX_%2$d);", "");
pw.print(String.format(r, escaped, n.getId()));
}
pw.print(" }\n");
}
Aggregations