Search in sources :

Example 26 with JavaTypeFactoryImpl

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project beam by apache.

the class TypedCombineFnDelegateTest method testParameterExtractionFromCombineFn_CombineFnDelegate_WithGenericArray.

@Test
public void testParameterExtractionFromCombineFn_CombineFnDelegate_WithGenericArray() {
    Combine.BinaryCombineFn<List<String>[]> max = Max.of((Comparator<List<String>[]> & Serializable) (a, b) -> Integer.compare(a[0].get(0).length(), b[0].get(0).length()));
    UdafImpl<List<String>[], Combine.Holder<List<String>[]>, List<String>[]> udaf = new UdafImpl<>(new TypedCombineFnDelegate<List<String>[], Combine.Holder<List<String>[]>, List<String>[]>(max) {
    });
    exceptions.expect(IllegalArgumentException.class);
    RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
    udaf.getParameters().get(0).getType(typeFactory);
}
Also used : RelDataTypeFactory(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.type.RelDataTypeFactory) FunctionParameter(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.schema.FunctionParameter) Combine(org.apache.beam.sdk.transforms.Combine) Test(org.junit.Test) JavaTypeFactoryImpl(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl) Serializable(java.io.Serializable) SqlTypeName(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName) List(java.util.List) Max(org.apache.beam.sdk.transforms.Max) Rule(org.junit.Rule) UdafImpl(org.apache.beam.sdk.extensions.sql.impl.UdafImpl) Comparator(java.util.Comparator) RelDataTypeSystem(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.type.RelDataTypeSystem) ExpectedException(org.junit.rules.ExpectedException) Assert.assertEquals(org.junit.Assert.assertEquals) Serializable(java.io.Serializable) Combine(org.apache.beam.sdk.transforms.Combine) UdafImpl(org.apache.beam.sdk.extensions.sql.impl.UdafImpl) Comparator(java.util.Comparator) JavaTypeFactoryImpl(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl) RelDataTypeFactory(org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.type.RelDataTypeFactory) List(java.util.List) Test(org.junit.Test)

Example 27 with JavaTypeFactoryImpl

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project druid by druid-io.

the class SystemSchemaTest method testGetTableMap.

@Test
public void testGetTableMap() {
    Assert.assertEquals(ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), schema.getTableNames());
    final Map<String, Table> tableMap = schema.getTableMap();
    Assert.assertEquals(ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), tableMap.keySet());
    final SystemSchema.SegmentsTable segmentsTable = (SystemSchema.SegmentsTable) schema.getTableMap().get("segments");
    final RelDataType rowType = segmentsTable.getRowType(new JavaTypeFactoryImpl());
    final List<RelDataTypeField> fields = rowType.getFieldList();
    Assert.assertEquals(17, fields.size());
    final SystemSchema.TasksTable tasksTable = (SystemSchema.TasksTable) schema.getTableMap().get("tasks");
    final RelDataType sysRowType = tasksTable.getRowType(new JavaTypeFactoryImpl());
    final List<RelDataTypeField> sysFields = sysRowType.getFieldList();
    Assert.assertEquals(14, sysFields.size());
    Assert.assertEquals("task_id", sysFields.get(0).getName());
    Assert.assertEquals(SqlTypeName.VARCHAR, sysFields.get(0).getType().getSqlTypeName());
    final SystemSchema.ServersTable serversTable = (SystemSchema.ServersTable) schema.getTableMap().get("servers");
    final RelDataType serverRowType = serversTable.getRowType(new JavaTypeFactoryImpl());
    final List<RelDataTypeField> serverFields = serverRowType.getFieldList();
    Assert.assertEquals(9, serverFields.size());
    Assert.assertEquals("server", serverFields.get(0).getName());
    Assert.assertEquals(SqlTypeName.VARCHAR, serverFields.get(0).getType().getSqlTypeName());
}
Also used : SegmentsTable(org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable) Table(org.apache.calcite.schema.Table) SegmentsTable(org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable) RelDataType(org.apache.calcite.rel.type.RelDataType) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) SegmentsTable(org.apache.druid.sql.calcite.schema.SystemSchema.SegmentsTable) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) Test(org.junit.Test)

Example 28 with JavaTypeFactoryImpl

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project hive by apache.

the class TestCBOMaxNumToCNF method testCBOMaxNumToCNF1.

@Test
public void testCBOMaxNumToCNF1() {
    // OR(=($0, 1), AND(=($0, 0), =($1, 8)))
    // transformation creates 7 nodes AND(OR(=($0, 1), =($0, 0)), OR(=($0, 1), =($1, 8)))
    // thus, it is triggered
    final RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl();
    final RexBuilder rexBuilder = new RexBuilder(typeFactory);
    final RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.OR, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(1, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.AND, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(0, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 1), rexBuilder.makeLiteral(8, typeFactory.createSqlType(SqlTypeName.INTEGER), false))));
    final RexNode newCond = RexUtil.toCnf(rexBuilder, maxNumNodesCNF, cond);
    assertEquals(newCond.toString(), "AND(OR(=($0, 1), =($0, 0)), OR(=($0, 1), =($1, 8)))");
}
Also used : JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RelDataTypeFactory(org.apache.calcite.rel.type.RelDataTypeFactory) RexBuilder(org.apache.calcite.rex.RexBuilder) RexNode(org.apache.calcite.rex.RexNode) Test(org.junit.Test)

Example 29 with JavaTypeFactoryImpl

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project hive by apache.

the class TestCBOMaxNumToCNF method testCBOMaxNumToCNF2.

@Test
public void testCBOMaxNumToCNF2() {
    // OR(=($0, 1), =($0, 2), AND(=($0, 0), =($1, 8)))
    // transformation creates 9 nodes AND(OR(=($0, 1), =($0, 2), =($0, 0)), OR(=($0, 1), =($0, 2), =($1, 8)))
    // thus, it is NOT triggered
    final RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl();
    final RexBuilder rexBuilder = new RexBuilder(typeFactory);
    final RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.OR, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(1, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(2, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.AND, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(0, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 1), rexBuilder.makeLiteral(8, typeFactory.createSqlType(SqlTypeName.INTEGER), false))));
    final RexNode newCond = RexUtil.toCnf(rexBuilder, maxNumNodesCNF, cond);
    assertEquals(newCond.toString(), "OR(=($0, 1), =($0, 2), AND(=($0, 0), =($1, 8)))");
}
Also used : JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RelDataTypeFactory(org.apache.calcite.rel.type.RelDataTypeFactory) RexBuilder(org.apache.calcite.rex.RexBuilder) RexNode(org.apache.calcite.rex.RexNode) Test(org.junit.Test)

Example 30 with JavaTypeFactoryImpl

use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project hive by apache.

the class HiveMaterializedViewsRegistry method createTableScan.

private static RelNode createTableScan(Table viewTable) {
    // 0. Recreate cluster
    final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
    final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
    final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
    // 1. Create column schema
    final RowResolver rr = new RowResolver();
    // 1.1 Add Column info for non partion cols (Object Inspector fields)
    StructObjectInspector rowObjectInspector;
    try {
        rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
    } catch (SerDeException e) {
        // Bail out
        return null;
    }
    List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
    ColumnInfo colInfo;
    String colName;
    ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
    for (int i = 0; i < fields.size(); i++) {
        colName = fields.get(i).getFieldName();
        colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
    }
    ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
    // 1.2 Add column info corresponding to partition columns
    ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
    for (FieldSchema part_col : viewTable.getPartCols()) {
        colName = part_col.getName();
        colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
        rr.put(null, colName, colInfo);
        cInfoLst.add(colInfo);
        partitionColumns.add(colInfo);
    }
    // 1.3 Build row type from field <type, name>
    RelDataType rowType;
    try {
        rowType = TypeConverter.getType(cluster, rr, null);
    } catch (CalciteSemanticException e) {
        // Bail out
        return null;
    }
    // 2. Build RelOptAbstractTable
    String fullyQualifiedTabName = viewTable.getDbName();
    if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
        fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
    } else {
        fullyQualifiedTabName = viewTable.getTableName();
    }
    RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
    RelNode tableRel;
    // 3. Build operator
    if (obtainTableType(viewTable) == TableType.DRUID) {
        // Build Druid query
        String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
        String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
        Set<String> metrics = new HashSet<>();
        List<RelDataType> druidColTypes = new ArrayList<>();
        List<String> druidColNames = new ArrayList<>();
        for (RelDataTypeField field : rowType.getFieldList()) {
            druidColTypes.add(field.getType());
            druidColNames.add(field.getName());
            if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
                // timestamp
                continue;
            }
            if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
                // dimension
                continue;
            }
            metrics.add(field.getName());
        }
        List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
        DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
        final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
        tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
    } else {
        // Build Hive Table Scan Rel
        tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
    }
    return tableRel;
}
Also used : RelOptCluster(org.apache.calcite.plan.RelOptCluster) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) ArrayList(java.util.ArrayList) ColumnInfo(org.apache.hadoop.hive.ql.exec.ColumnInfo) DruidTable(org.apache.calcite.adapter.druid.DruidTable) RelDataType(org.apache.calcite.rel.type.RelDataType) RowResolver(org.apache.hadoop.hive.ql.parse.RowResolver) RelOptPlanner(org.apache.calcite.plan.RelOptPlanner) PrunedPartitionList(org.apache.hadoop.hive.ql.parse.PrunedPartitionList) JavaTypeFactoryImpl(org.apache.calcite.jdbc.JavaTypeFactoryImpl) RexBuilder(org.apache.calcite.rex.RexBuilder) CalciteSemanticException(org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException) SerDeException(org.apache.hadoop.hive.serde2.SerDeException) HashSet(java.util.HashSet) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) TableScan(org.apache.calcite.rel.core.TableScan) DruidSchema(org.apache.calcite.adapter.druid.DruidSchema) RelDataTypeField(org.apache.calcite.rel.type.RelDataTypeField) RelOptHiveTable(org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable) HiveRelNode(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode) RelNode(org.apache.calcite.rel.RelNode) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HiveTableScan(org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan) StructObjectInspector(org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector) Interval(org.joda.time.Interval)

Aggregations

JavaTypeFactoryImpl (org.apache.calcite.jdbc.JavaTypeFactoryImpl)46 RelDataType (org.apache.calcite.rel.type.RelDataType)22 Test (org.junit.Test)18 JavaTypeFactory (org.apache.calcite.adapter.java.JavaTypeFactory)17 Table (org.apache.calcite.schema.Table)16 RexBuilder (org.apache.calcite.rex.RexBuilder)13 SchemaPlus (org.apache.calcite.schema.SchemaPlus)13 StreamableTable (org.apache.calcite.schema.StreamableTable)12 RelDataTypeField (org.apache.calcite.rel.type.RelDataTypeField)9 RelNode (org.apache.calcite.rel.RelNode)8 RelDataTypeFactory (org.apache.calcite.rel.type.RelDataTypeFactory)7 ArrayList (java.util.ArrayList)6 RelOptCluster (org.apache.calcite.plan.RelOptCluster)6 CompilerUtil (org.apache.storm.sql.compiler.CompilerUtil)6 BlockBuilder (org.apache.calcite.linq4j.tree.BlockBuilder)5 ParameterExpression (org.apache.calcite.linq4j.tree.ParameterExpression)5 SqlNode (org.apache.calcite.sql.SqlNode)5 Before (org.junit.Before)5 JavaTypeFactoryImpl (org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl)4 RexToLixTranslator (org.apache.calcite.adapter.enumerable.RexToLixTranslator)4