use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project beam by apache.
the class TypedCombineFnDelegateTest method testParameterExtractionFromCombineFn_CombineFnDelegate_WithGenericArray.
@Test
public void testParameterExtractionFromCombineFn_CombineFnDelegate_WithGenericArray() {
Combine.BinaryCombineFn<List<String>[]> max = Max.of((Comparator<List<String>[]> & Serializable) (a, b) -> Integer.compare(a[0].get(0).length(), b[0].get(0).length()));
UdafImpl<List<String>[], Combine.Holder<List<String>[]>, List<String>[]> udaf = new UdafImpl<>(new TypedCombineFnDelegate<List<String>[], Combine.Holder<List<String>[]>, List<String>[]>(max) {
});
exceptions.expect(IllegalArgumentException.class);
RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
udaf.getParameters().get(0).getType(typeFactory);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project druid by druid-io.
the class SystemSchemaTest method testGetTableMap.
@Test
public void testGetTableMap() {
Assert.assertEquals(ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), schema.getTableNames());
final Map<String, Table> tableMap = schema.getTableMap();
Assert.assertEquals(ImmutableSet.of("segments", "servers", "server_segments", "tasks", "supervisors"), tableMap.keySet());
final SystemSchema.SegmentsTable segmentsTable = (SystemSchema.SegmentsTable) schema.getTableMap().get("segments");
final RelDataType rowType = segmentsTable.getRowType(new JavaTypeFactoryImpl());
final List<RelDataTypeField> fields = rowType.getFieldList();
Assert.assertEquals(17, fields.size());
final SystemSchema.TasksTable tasksTable = (SystemSchema.TasksTable) schema.getTableMap().get("tasks");
final RelDataType sysRowType = tasksTable.getRowType(new JavaTypeFactoryImpl());
final List<RelDataTypeField> sysFields = sysRowType.getFieldList();
Assert.assertEquals(14, sysFields.size());
Assert.assertEquals("task_id", sysFields.get(0).getName());
Assert.assertEquals(SqlTypeName.VARCHAR, sysFields.get(0).getType().getSqlTypeName());
final SystemSchema.ServersTable serversTable = (SystemSchema.ServersTable) schema.getTableMap().get("servers");
final RelDataType serverRowType = serversTable.getRowType(new JavaTypeFactoryImpl());
final List<RelDataTypeField> serverFields = serverRowType.getFieldList();
Assert.assertEquals(9, serverFields.size());
Assert.assertEquals("server", serverFields.get(0).getName());
Assert.assertEquals(SqlTypeName.VARCHAR, serverFields.get(0).getType().getSqlTypeName());
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project hive by apache.
the class TestCBOMaxNumToCNF method testCBOMaxNumToCNF1.
@Test
public void testCBOMaxNumToCNF1() {
// OR(=($0, 1), AND(=($0, 0), =($1, 8)))
// transformation creates 7 nodes AND(OR(=($0, 1), =($0, 0)), OR(=($0, 1), =($1, 8)))
// thus, it is triggered
final RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl();
final RexBuilder rexBuilder = new RexBuilder(typeFactory);
final RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.OR, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(1, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.AND, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(0, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 1), rexBuilder.makeLiteral(8, typeFactory.createSqlType(SqlTypeName.INTEGER), false))));
final RexNode newCond = RexUtil.toCnf(rexBuilder, maxNumNodesCNF, cond);
assertEquals(newCond.toString(), "AND(OR(=($0, 1), =($0, 0)), OR(=($0, 1), =($1, 8)))");
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project hive by apache.
the class TestCBOMaxNumToCNF method testCBOMaxNumToCNF2.
@Test
public void testCBOMaxNumToCNF2() {
// OR(=($0, 1), =($0, 2), AND(=($0, 0), =($1, 8)))
// transformation creates 9 nodes AND(OR(=($0, 1), =($0, 2), =($0, 0)), OR(=($0, 1), =($0, 2), =($1, 8)))
// thus, it is NOT triggered
final RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl();
final RexBuilder rexBuilder = new RexBuilder(typeFactory);
final RexNode cond = rexBuilder.makeCall(SqlStdOperatorTable.OR, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(1, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(2, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.AND, rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 0), rexBuilder.makeLiteral(0, typeFactory.createSqlType(SqlTypeName.INTEGER), false)), rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexBuilder.makeInputRef(typeFactory.createSqlType(SqlTypeName.INTEGER), 1), rexBuilder.makeLiteral(8, typeFactory.createSqlType(SqlTypeName.INTEGER), false))));
final RexNode newCond = RexUtil.toCnf(rexBuilder, maxNumNodesCNF, cond);
assertEquals(newCond.toString(), "OR(=($0, 1), =($0, 2), AND(=($0, 0), =($1, 8)))");
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.jdbc.JavaTypeFactoryImpl in project hive by apache.
the class HiveMaterializedViewsRegistry method createTableScan.
private static RelNode createTableScan(Table viewTable) {
// 0. Recreate cluster
final RelOptPlanner planner = HiveVolcanoPlanner.createPlanner(null);
final RexBuilder rexBuilder = new RexBuilder(new JavaTypeFactoryImpl());
final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
// 1. Create column schema
final RowResolver rr = new RowResolver();
// 1.1 Add Column info for non partion cols (Object Inspector fields)
StructObjectInspector rowObjectInspector;
try {
rowObjectInspector = (StructObjectInspector) viewTable.getDeserializer().getObjectInspector();
} catch (SerDeException e) {
// Bail out
return null;
}
List<? extends StructField> fields = rowObjectInspector.getAllStructFieldRefs();
ColumnInfo colInfo;
String colName;
ArrayList<ColumnInfo> cInfoLst = new ArrayList<ColumnInfo>();
for (int i = 0; i < fields.size(); i++) {
colName = fields.get(i).getFieldName();
colInfo = new ColumnInfo(fields.get(i).getFieldName(), TypeInfoUtils.getTypeInfoFromObjectInspector(fields.get(i).getFieldObjectInspector()), null, false);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
}
ArrayList<ColumnInfo> nonPartitionColumns = new ArrayList<ColumnInfo>(cInfoLst);
// 1.2 Add column info corresponding to partition columns
ArrayList<ColumnInfo> partitionColumns = new ArrayList<ColumnInfo>();
for (FieldSchema part_col : viewTable.getPartCols()) {
colName = part_col.getName();
colInfo = new ColumnInfo(colName, TypeInfoFactory.getPrimitiveTypeInfo(part_col.getType()), null, true);
rr.put(null, colName, colInfo);
cInfoLst.add(colInfo);
partitionColumns.add(colInfo);
}
// 1.3 Build row type from field <type, name>
RelDataType rowType;
try {
rowType = TypeConverter.getType(cluster, rr, null);
} catch (CalciteSemanticException e) {
// Bail out
return null;
}
// 2. Build RelOptAbstractTable
String fullyQualifiedTabName = viewTable.getDbName();
if (fullyQualifiedTabName != null && !fullyQualifiedTabName.isEmpty()) {
fullyQualifiedTabName = fullyQualifiedTabName + "." + viewTable.getTableName();
} else {
fullyQualifiedTabName = viewTable.getTableName();
}
RelOptHiveTable optTable = new RelOptHiveTable(null, fullyQualifiedTabName, rowType, viewTable, nonPartitionColumns, partitionColumns, new ArrayList<VirtualColumn>(), SessionState.get().getConf(), new HashMap<String, PrunedPartitionList>(), new AtomicInteger());
RelNode tableRel;
// 3. Build operator
if (obtainTableType(viewTable) == TableType.DRUID) {
// Build Druid query
String address = HiveConf.getVar(SessionState.get().getConf(), HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS);
String dataSource = viewTable.getParameters().get(Constants.DRUID_DATA_SOURCE);
Set<String> metrics = new HashSet<>();
List<RelDataType> druidColTypes = new ArrayList<>();
List<String> druidColNames = new ArrayList<>();
for (RelDataTypeField field : rowType.getFieldList()) {
druidColTypes.add(field.getType());
druidColNames.add(field.getName());
if (field.getName().equals(DruidTable.DEFAULT_TIMESTAMP_COLUMN)) {
// timestamp
continue;
}
if (field.getType().getSqlTypeName() == SqlTypeName.VARCHAR) {
// dimension
continue;
}
metrics.add(field.getName());
}
List<Interval> intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false), dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, druidTable, ImmutableList.<RelNode>of(scan));
} else {
// Build Hive Table Scan Rel
tableRel = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION), optTable, viewTable.getTableName(), null, false, false);
}
return tableRel;
}
Aggregations