use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project flink by apache.
the class HiveParserDMLHelper method replaceProjectForStaticPart.
private RelNode replaceProjectForStaticPart(Project project, Map<String, String> staticPartSpec, Table destTable, Map<String, RelDataType> targetColToType) {
List<RexNode> exprs = project.getProjects();
List<RexNode> extendedExprs = new ArrayList<>(exprs);
int numDynmPart = destTable.getTTable().getPartitionKeys().size() - staticPartSpec.size();
int insertIndex = extendedExprs.size() - numDynmPart;
RexBuilder rexBuilder = plannerContext.getCluster().getRexBuilder();
for (Map.Entry<String, String> spec : staticPartSpec.entrySet()) {
RexNode toAdd = rexBuilder.makeCharLiteral(HiveParserUtils.asUnicodeString(spec.getValue()));
toAdd = rexBuilder.makeAbstractCast(targetColToType.get(spec.getKey()), toAdd);
extendedExprs.add(insertIndex++, toAdd);
}
RelNode res = LogicalProject.create(project.getInput(), Collections.emptyList(), extendedExprs, (List<String>) null);
project.replaceInput(0, null);
return res;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project flink by apache.
the class HiveParserRexNodeConverter method convertConstant.
public static RexNode convertConstant(ExprNodeConstantDesc literal, RelOptCluster cluster) throws SemanticException {
RexBuilder rexBuilder = cluster.getRexBuilder();
RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory();
PrimitiveTypeInfo hiveType = (PrimitiveTypeInfo) literal.getTypeInfo();
RelDataType calciteDataType = HiveParserTypeConverter.convert(hiveType, dtFactory);
PrimitiveObjectInspector.PrimitiveCategory hiveTypeCategory = hiveType.getPrimitiveCategory();
ConstantObjectInspector coi = literal.getWritableObjectInspector();
Object value = ObjectInspectorUtils.copyToStandardJavaObject(coi.getWritableConstantValue(), coi);
RexNode calciteLiteral;
HiveShim hiveShim = HiveParserUtils.getSessionHiveShim();
// If value is null, the type should also be VOID.
if (value == null) {
hiveTypeCategory = PrimitiveObjectInspector.PrimitiveCategory.VOID;
}
// TODO: Verify if we need to use ConstantObjectInspector to unwrap data
switch(hiveTypeCategory) {
case BOOLEAN:
calciteLiteral = rexBuilder.makeLiteral((Boolean) value);
break;
case BYTE:
calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Byte) value), calciteDataType);
break;
case SHORT:
calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Short) value), calciteDataType);
break;
case INT:
calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Integer) value));
break;
case LONG:
calciteLiteral = rexBuilder.makeBigintLiteral(new BigDecimal((Long) value));
break;
// TODO: is Decimal an exact numeric or approximate numeric?
case DECIMAL:
if (value instanceof HiveDecimal) {
value = ((HiveDecimal) value).bigDecimalValue();
} else if (value instanceof Decimal128) {
value = ((Decimal128) value).toBigDecimal();
}
if (value == null) {
// For now, we will not run CBO in the presence of invalid decimal literals.
throw new SemanticException("Expression " + literal.getExprString() + " is not a valid decimal");
// TODO: return createNullLiteral(literal);
}
BigDecimal bd = (BigDecimal) value;
BigInteger unscaled = bd.unscaledValue();
if (unscaled.compareTo(MIN_LONG_BI) >= 0 && unscaled.compareTo(MAX_LONG_BI) <= 0) {
calciteLiteral = rexBuilder.makeExactLiteral(bd);
} else {
// CBO doesn't support unlimited precision decimals. In practice, this
// will work...
// An alternative would be to throw CboSemanticException and fall back
// to no CBO.
RelDataType relType = cluster.getTypeFactory().createSqlType(SqlTypeName.DECIMAL, unscaled.toString().length(), bd.scale());
calciteLiteral = rexBuilder.makeExactLiteral(bd, relType);
}
break;
case FLOAT:
calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal(Float.toString((Float) value)), calciteDataType);
break;
case DOUBLE:
// TODO: The best solution is to support NaN in expression reduction.
if (Double.isNaN((Double) value)) {
throw new SemanticException("NaN");
}
calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal(Double.toString((Double) value)), calciteDataType);
break;
case CHAR:
if (value instanceof HiveChar) {
value = ((HiveChar) value).getValue();
}
calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
break;
case VARCHAR:
if (value instanceof HiveVarchar) {
value = ((HiveVarchar) value).getValue();
}
calciteLiteral = rexBuilder.makeCharLiteral(asUnicodeString((String) value));
break;
case STRING:
Object constantDescVal = literal.getValue();
constantDescVal = constantDescVal instanceof NlsString ? constantDescVal : asUnicodeString((String) value);
// calcite treat string literal as char type, we should treat it as string just like
// hive
RelDataType type = HiveParserTypeConverter.convert(hiveType, dtFactory);
// if we get here, the value is not null
type = dtFactory.createTypeWithNullability(type, false);
calciteLiteral = rexBuilder.makeLiteral(constantDescVal, type, true);
break;
case DATE:
LocalDate localDate = HiveParserUtils.getSessionHiveShim().toFlinkDate(value);
DateString dateString = new DateString(localDate.getYear(), localDate.getMonthValue(), localDate.getDayOfMonth());
calciteLiteral = rexBuilder.makeDateLiteral(dateString);
break;
case TIMESTAMP:
TimestampString timestampString;
if (value instanceof Calendar) {
timestampString = TimestampString.fromCalendarFields((Calendar) value);
} else {
LocalDateTime localDateTime = HiveParserUtils.getSessionHiveShim().toFlinkTimestamp(value);
timestampString = new TimestampString(localDateTime.getYear(), localDateTime.getMonthValue(), localDateTime.getDayOfMonth(), localDateTime.getHour(), localDateTime.getMinute(), localDateTime.getSecond());
timestampString = timestampString.withNanos(localDateTime.getNano());
}
// hive always treats timestamp with precision 9
calciteLiteral = rexBuilder.makeTimestampLiteral(timestampString, 9);
break;
case VOID:
calciteLiteral = cluster.getRexBuilder().makeLiteral(null, dtFactory.createSqlType(SqlTypeName.NULL), true);
break;
case BINARY:
case UNKNOWN:
default:
if (hiveShim.isIntervalYearMonthType(hiveTypeCategory)) {
// Calcite year-month literal value is months as BigDecimal
BigDecimal totalMonths = BigDecimal.valueOf(((HiveParserIntervalYearMonth) value).getTotalMonths());
calciteLiteral = rexBuilder.makeIntervalLiteral(totalMonths, new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, new SqlParserPos(1, 1)));
} else if (hiveShim.isIntervalDayTimeType(hiveTypeCategory)) {
// Calcite day-time interval is millis value as BigDecimal
// Seconds converted to millis
BigDecimal secsValueBd = BigDecimal.valueOf(((HiveParserIntervalDayTime) value).getTotalSeconds() * 1000);
// Nanos converted to millis
BigDecimal nanosValueBd = BigDecimal.valueOf(((HiveParserIntervalDayTime) value).getNanos(), 6);
calciteLiteral = rexBuilder.makeIntervalLiteral(secsValueBd.add(nanosValueBd), new SqlIntervalQualifier(TimeUnit.MILLISECOND, null, new SqlParserPos(1, 1)));
} else {
throw new RuntimeException("UnSupported Literal type " + hiveTypeCategory);
}
}
return calciteLiteral;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project flink by apache.
the class HiveParserBaseSemanticAnalyzer method genValues.
public static RelNode genValues(String tabAlias, Table tmpTable, HiveParserRowResolver rowResolver, RelOptCluster cluster, List<List<String>> values) {
List<TypeInfo> tmpTableTypes = tmpTable.getCols().stream().map(f -> TypeInfoUtils.getTypeInfoFromTypeString(f.getType())).collect(Collectors.toList());
RexBuilder rexBuilder = cluster.getRexBuilder();
// calcite types for each field
List<RelDataType> calciteTargetTypes = tmpTableTypes.stream().map(ti -> HiveParserTypeConverter.convert((PrimitiveTypeInfo) ti, rexBuilder.getTypeFactory())).collect(Collectors.toList());
// calcite field names
List<String> calciteFieldNames = IntStream.range(0, calciteTargetTypes.size()).mapToObj(SqlUtil::deriveAliasFromOrdinal).collect(Collectors.toList());
// calcite type for each row
List<RelDataType> calciteRowTypes = new ArrayList<>();
List<List<RexLiteral>> rows = new ArrayList<>();
for (List<String> value : values) {
Preconditions.checkArgument(value.size() == tmpTableTypes.size(), String.format("Values table col length (%d) and data length (%d) mismatch", tmpTableTypes.size(), value.size()));
List<RexLiteral> row = new ArrayList<>();
for (int i = 0; i < tmpTableTypes.size(); i++) {
PrimitiveTypeInfo primitiveTypeInfo = (PrimitiveTypeInfo) tmpTableTypes.get(i);
RelDataType calciteType = calciteTargetTypes.get(i);
String col = value.get(i);
if (col == null) {
row.add(rexBuilder.makeNullLiteral(calciteType));
} else {
switch(primitiveTypeInfo.getPrimitiveCategory()) {
case BYTE:
case SHORT:
case INT:
case LONG:
row.add(rexBuilder.makeExactLiteral(new BigDecimal(col), calciteType));
break;
case DECIMAL:
BigDecimal bigDec = new BigDecimal(col);
row.add(SqlTypeUtil.isValidDecimalValue(bigDec, calciteType) ? rexBuilder.makeExactLiteral(bigDec, calciteType) : rexBuilder.makeNullLiteral(calciteType));
break;
case FLOAT:
case DOUBLE:
row.add(rexBuilder.makeApproxLiteral(new BigDecimal(col), calciteType));
break;
case BOOLEAN:
row.add(rexBuilder.makeLiteral(Boolean.parseBoolean(col)));
break;
default:
row.add(rexBuilder.makeCharLiteral(HiveParserUtils.asUnicodeString(col)));
}
}
}
calciteRowTypes.add(rexBuilder.getTypeFactory().createStructType(row.stream().map(RexLiteral::getType).collect(Collectors.toList()), calciteFieldNames));
rows.add(row);
}
// compute the final row type
RelDataType calciteRowType = rexBuilder.getTypeFactory().leastRestrictive(calciteRowTypes);
for (int i = 0; i < calciteFieldNames.size(); i++) {
ColumnInfo colInfo = new ColumnInfo(calciteFieldNames.get(i), HiveParserTypeConverter.convert(calciteRowType.getFieldList().get(i).getType()), tabAlias, false);
rowResolver.put(tabAlias, calciteFieldNames.get(i), colInfo);
}
return HiveParserUtils.genValuesRelNode(cluster, rexBuilder.getTypeFactory().createStructType(calciteRowType.getFieldList()), rows);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project flink by apache.
the class HiveParserTypeConverter method getType.
public static RelDataType getType(RelOptCluster cluster, HiveParserRowResolver rr, List<String> neededCols) throws SemanticException {
RexBuilder rexBuilder = cluster.getRexBuilder();
RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory();
RowSchema rs = rr.getRowSchema();
List<RelDataType> fieldTypes = new LinkedList<>();
List<String> fieldNames = new LinkedList<>();
for (ColumnInfo ci : rs.getSignature()) {
if (neededCols == null || neededCols.contains(ci.getInternalName())) {
fieldTypes.add(convert(ci.getType(), dtFactory));
fieldNames.add(ci.getInternalName());
}
}
return dtFactory.createStructType(fieldTypes, fieldNames);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rex.RexBuilder in project flink by apache.
the class CalcPythonCorrelateTransposeRule method onMatch.
@Override
public void onMatch(RelOptRuleCall call) {
FlinkLogicalCorrelate correlate = call.rel(0);
FlinkLogicalCalc right = call.rel(2);
RexBuilder rexBuilder = call.builder().getRexBuilder();
FlinkLogicalCalc mergedCalc = StreamPhysicalCorrelateRule.getMergedCalc(right);
FlinkLogicalTableFunctionScan tableScan = StreamPhysicalCorrelateRule.getTableScan(mergedCalc);
RexProgram mergedCalcProgram = mergedCalc.getProgram();
InputRefRewriter inputRefRewriter = new InputRefRewriter(correlate.getRowType().getFieldCount() - mergedCalc.getRowType().getFieldCount());
List<RexNode> correlateFilters = RelOptUtil.conjunctions(mergedCalcProgram.expandLocalRef(mergedCalcProgram.getCondition())).stream().map(x -> x.accept(inputRefRewriter)).collect(Collectors.toList());
FlinkLogicalCorrelate newCorrelate = new FlinkLogicalCorrelate(correlate.getCluster(), correlate.getTraitSet(), correlate.getLeft(), tableScan, correlate.getCorrelationId(), correlate.getRequiredColumns(), correlate.getJoinType());
RexNode topCalcCondition = RexUtil.composeConjunction(rexBuilder, correlateFilters);
RexProgram rexProgram = new RexProgramBuilder(newCorrelate.getRowType(), rexBuilder).getProgram();
FlinkLogicalCalc newTopCalc = new FlinkLogicalCalc(newCorrelate.getCluster(), newCorrelate.getTraitSet(), newCorrelate, RexProgram.create(newCorrelate.getRowType(), rexProgram.getExprList(), topCalcCondition, newCorrelate.getRowType(), rexBuilder));
call.transformTo(newTopCalc);
}
Aggregations