use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlIntervalQualifier in project druid by druid-io.
the class DruidSqlParserUtils method convertSqlNodeToGranularity.
/**
* This method is used to extract the granularity from a SqlNode representing following function calls:
* 1. FLOOR(__time TO TimeUnit)
* 2. TIME_FLOOR(__time, 'PT1H')
*
* Validation on the sqlNode is contingent to following conditions:
* 1. sqlNode is an instance of SqlCall
* 2. Operator is either one of TIME_FLOOR or FLOOR
* 3. Number of operands in the call are 2
* 4. First operand is a SimpleIdentifier representing __time
* 5. If operator is TIME_FLOOR, the second argument is a literal, and can be converted to the Granularity class
* 6. If operator is FLOOR, the second argument is a TimeUnit, and can be mapped using {@link TimeUnits}
*
* Since it is to be used primarily while parsing the SqlNode, it is wrapped in {@code convertSqlNodeToGranularityThrowingParseExceptions}
*
* @param sqlNode SqlNode representing a call to a function
* @return Granularity as intended by the function call
* @throws ParseException SqlNode cannot be converted a granularity
*/
public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws ParseException {
final String genericParseFailedMessageFormatString = "Encountered %s after PARTITIONED BY. " + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or %s function";
if (!(sqlNode instanceof SqlCall)) {
throw new ParseException(StringUtils.format(genericParseFailedMessageFormatString, sqlNode.toString(), TimeFloorOperatorConversion.SQL_FUNCTION_NAME));
}
SqlCall sqlCall = (SqlCall) sqlNode;
String operatorName = sqlCall.getOperator().getName();
Preconditions.checkArgument("FLOOR".equalsIgnoreCase(operatorName) || TimeFloorOperatorConversion.SQL_FUNCTION_NAME.equalsIgnoreCase(operatorName), StringUtils.format("PARTITIONED BY clause only supports FLOOR(__time TO <unit> and %s(__time, period) functions", TimeFloorOperatorConversion.SQL_FUNCTION_NAME));
List<SqlNode> operandList = sqlCall.getOperandList();
Preconditions.checkArgument(operandList.size() == 2, StringUtils.format("%s in PARTITIONED BY clause must have two arguments", operatorName));
// Check if the first argument passed in the floor function is __time
SqlNode timeOperandSqlNode = operandList.get(0);
Preconditions.checkArgument(timeOperandSqlNode.getKind().equals(SqlKind.IDENTIFIER), StringUtils.format("First argument to %s in PARTITIONED BY clause can only be __time", operatorName));
SqlIdentifier timeOperandSqlIdentifier = (SqlIdentifier) timeOperandSqlNode;
Preconditions.checkArgument(timeOperandSqlIdentifier.getSimple().equals(ColumnHolder.TIME_COLUMN_NAME), StringUtils.format("First argument to %s in PARTITIONED BY clause can only be __time", operatorName));
// If the floor function is of form TIME_FLOOR(__time, 'PT1H')
if (operatorName.equalsIgnoreCase(TimeFloorOperatorConversion.SQL_FUNCTION_NAME)) {
SqlNode granularitySqlNode = operandList.get(1);
Preconditions.checkArgument(granularitySqlNode.getKind().equals(SqlKind.LITERAL), "Second argument to TIME_FLOOR in PARTITIONED BY clause must be a period like 'PT1H'");
String granularityString = SqlLiteral.unchain(granularitySqlNode).toValue();
Period period;
try {
period = new Period(granularityString);
} catch (IllegalArgumentException e) {
throw new ParseException(StringUtils.format("%s is an invalid period string", granularitySqlNode.toString()));
}
return new PeriodGranularity(period, null, null);
} else if ("FLOOR".equalsIgnoreCase(operatorName)) {
// If the floor function is of form FLOOR(__time TO DAY)
SqlNode granularitySqlNode = operandList.get(1);
// In future versions of Calcite, this can be checked via
// granularitySqlNode.getKind().equals(SqlKind.INTERVAL_QUALIFIER)
Preconditions.checkArgument(granularitySqlNode instanceof SqlIntervalQualifier, "Second argument to the FLOOR function in PARTITIONED BY clause is not a valid granularity. " + "Please refer to the documentation of FLOOR function");
SqlIntervalQualifier granularityIntervalQualifier = (SqlIntervalQualifier) granularitySqlNode;
Period period = TimeUnits.toPeriod(granularityIntervalQualifier.timeUnitRange);
Preconditions.checkNotNull(period, StringUtils.format("%s is not a valid granularity for ingestion", granularityIntervalQualifier.timeUnitRange.toString()));
return new PeriodGranularity(period, null, null);
}
// Shouldn't reach here
throw new ParseException(StringUtils.format(genericParseFailedMessageFormatString, sqlNode.toString(), TimeFloorOperatorConversion.SQL_FUNCTION_NAME));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlIntervalQualifier in project hive by apache.
the class TypeConverter method convert.
public static RelDataType convert(PrimitiveTypeInfo type, RelDataTypeFactory dtFactory) {
RelDataType convertedType = null;
switch(type.getPrimitiveCategory()) {
case VOID:
convertedType = dtFactory.createSqlType(SqlTypeName.NULL);
break;
case BOOLEAN:
convertedType = dtFactory.createSqlType(SqlTypeName.BOOLEAN);
break;
case BYTE:
convertedType = dtFactory.createSqlType(SqlTypeName.TINYINT);
break;
case SHORT:
convertedType = dtFactory.createSqlType(SqlTypeName.SMALLINT);
break;
case INT:
convertedType = dtFactory.createSqlType(SqlTypeName.INTEGER);
break;
case LONG:
convertedType = dtFactory.createSqlType(SqlTypeName.BIGINT);
break;
case FLOAT:
convertedType = dtFactory.createSqlType(SqlTypeName.FLOAT);
break;
case DOUBLE:
convertedType = dtFactory.createSqlType(SqlTypeName.DOUBLE);
break;
case STRING:
convertedType = dtFactory.createTypeWithCharsetAndCollation(dtFactory.createSqlType(SqlTypeName.VARCHAR, Integer.MAX_VALUE), Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME), SqlCollation.IMPLICIT);
break;
case DATE:
convertedType = dtFactory.createSqlType(SqlTypeName.DATE);
break;
case TIMESTAMP:
convertedType = dtFactory.createSqlType(SqlTypeName.TIMESTAMP);
break;
case TIMESTAMPLOCALTZ:
convertedType = dtFactory.createSqlType(SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE);
break;
case INTERVAL_YEAR_MONTH:
convertedType = dtFactory.createSqlIntervalType(new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, new SqlParserPos(1, 1)));
break;
case INTERVAL_DAY_TIME:
convertedType = dtFactory.createSqlIntervalType(new SqlIntervalQualifier(TimeUnit.DAY, TimeUnit.SECOND, new SqlParserPos(1, 1)));
break;
case BINARY:
convertedType = dtFactory.createSqlType(SqlTypeName.BINARY);
break;
case DECIMAL:
DecimalTypeInfo dtInf = (DecimalTypeInfo) type;
convertedType = dtFactory.createSqlType(SqlTypeName.DECIMAL, dtInf.precision(), dtInf.scale());
break;
case VARCHAR:
convertedType = dtFactory.createTypeWithCharsetAndCollation(dtFactory.createSqlType(SqlTypeName.VARCHAR, ((BaseCharTypeInfo) type).getLength()), Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME), SqlCollation.IMPLICIT);
break;
case CHAR:
convertedType = dtFactory.createTypeWithCharsetAndCollation(dtFactory.createSqlType(SqlTypeName.CHAR, ((BaseCharTypeInfo) type).getLength()), Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME), SqlCollation.IMPLICIT);
break;
case UNKNOWN:
convertedType = dtFactory.createSqlType(SqlTypeName.OTHER);
break;
}
if (null == convertedType) {
throw new RuntimeException("Unsupported Type : " + type.getTypeName());
}
return dtFactory.createTypeWithNullability(convertedType, true);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlIntervalQualifier in project hive by apache.
the class RexNodeConverter method convert.
protected RexNode convert(ExprNodeConstantDesc literal) throws CalciteSemanticException {
final RelDataTypeFactory dtFactory = rexBuilder.getTypeFactory();
final PrimitiveTypeInfo hiveType = (PrimitiveTypeInfo) literal.getTypeInfo();
final RelDataType calciteDataType = TypeConverter.convert(hiveType, dtFactory);
PrimitiveCategory hiveTypeCategory = hiveType.getPrimitiveCategory();
ConstantObjectInspector coi = literal.getWritableObjectInspector();
Object value = ObjectInspectorUtils.copyToStandardJavaObject(coi.getWritableConstantValue(), coi);
RexNode calciteLiteral = null;
// If value is null, the type should also be VOID.
if (value == null) {
hiveTypeCategory = PrimitiveCategory.VOID;
}
// TODO: Verify if we need to use ConstantObjectInspector to unwrap data
switch(hiveTypeCategory) {
case BOOLEAN:
calciteLiteral = rexBuilder.makeLiteral(((Boolean) value).booleanValue());
break;
case BYTE:
calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Byte) value), calciteDataType);
break;
case SHORT:
calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Short) value), calciteDataType);
break;
case INT:
calciteLiteral = rexBuilder.makeExactLiteral(new BigDecimal((Integer) value));
break;
case LONG:
calciteLiteral = rexBuilder.makeBigintLiteral(new BigDecimal((Long) value));
break;
case DECIMAL:
if (value instanceof HiveDecimal) {
value = ((HiveDecimal) value).bigDecimalValue();
} else if (value instanceof Decimal128) {
value = ((Decimal128) value).toBigDecimal();
}
if (value == null) {
// literals.
throw new CalciteSemanticException("Expression " + literal.getExprString() + " is not a valid decimal", UnsupportedFeature.Invalid_decimal);
// TODO: return createNullLiteral(literal);
}
calciteLiteral = rexBuilder.makeExactLiteral((BigDecimal) value, calciteDataType);
break;
case FLOAT:
calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal(Float.toString((Float) value)), calciteDataType);
break;
case DOUBLE:
// TODO: The best solution is to support NaN in expression reduction.
if (Double.isNaN((Double) value)) {
throw new CalciteSemanticException("NaN", UnsupportedFeature.Invalid_decimal);
}
calciteLiteral = rexBuilder.makeApproxLiteral(new BigDecimal(Double.toString((Double) value)), calciteDataType);
break;
case CHAR:
if (value instanceof HiveChar) {
value = ((HiveChar) value).getValue();
}
final int lengthChar = TypeInfoUtils.getCharacterLengthForType(hiveType);
RelDataType charType = rexBuilder.getTypeFactory().createTypeWithCharsetAndCollation(rexBuilder.getTypeFactory().createSqlType(SqlTypeName.CHAR, lengthChar), Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME), SqlCollation.IMPLICIT);
calciteLiteral = rexBuilder.makeLiteral(RexNodeExprFactory.makeHiveUnicodeString((String) value), charType, false);
break;
case VARCHAR:
if (value instanceof HiveVarchar) {
value = ((HiveVarchar) value).getValue();
}
final int lengthVarchar = TypeInfoUtils.getCharacterLengthForType(hiveType);
RelDataType varcharType = rexBuilder.getTypeFactory().createTypeWithCharsetAndCollation(rexBuilder.getTypeFactory().createSqlType(SqlTypeName.VARCHAR, lengthVarchar), Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME), SqlCollation.IMPLICIT);
calciteLiteral = rexBuilder.makeLiteral(RexNodeExprFactory.makeHiveUnicodeString((String) value), varcharType, true);
break;
case STRING:
RelDataType stringType = rexBuilder.getTypeFactory().createTypeWithCharsetAndCollation(rexBuilder.getTypeFactory().createSqlType(SqlTypeName.VARCHAR, Integer.MAX_VALUE), Charset.forName(ConversionUtil.NATIVE_UTF16_CHARSET_NAME), SqlCollation.IMPLICIT);
calciteLiteral = rexBuilder.makeLiteral(RexNodeExprFactory.makeHiveUnicodeString((String) value), stringType, true);
break;
case DATE:
final Date date = (Date) value;
calciteLiteral = rexBuilder.makeDateLiteral(DateString.fromDaysSinceEpoch(date.toEpochDay()));
break;
case TIMESTAMP:
final TimestampString tsString;
if (value instanceof Calendar) {
tsString = TimestampString.fromCalendarFields((Calendar) value);
} else {
final Timestamp ts = (Timestamp) value;
tsString = TimestampString.fromMillisSinceEpoch(ts.toEpochMilli()).withNanos(ts.getNanos());
}
// Must call makeLiteral, not makeTimestampLiteral
// to have the RexBuilder.roundTime logic kick in
calciteLiteral = rexBuilder.makeLiteral(tsString, rexBuilder.getTypeFactory().createSqlType(SqlTypeName.TIMESTAMP, rexBuilder.getTypeFactory().getTypeSystem().getDefaultPrecision(SqlTypeName.TIMESTAMP)), false);
break;
case TIMESTAMPLOCALTZ:
final TimestampString tsLocalTZString;
Instant i = ((TimestampTZ) value).getZonedDateTime().toInstant();
tsLocalTZString = TimestampString.fromMillisSinceEpoch(i.toEpochMilli()).withNanos(i.getNano());
calciteLiteral = rexBuilder.makeTimestampWithLocalTimeZoneLiteral(tsLocalTZString, rexBuilder.getTypeFactory().getTypeSystem().getDefaultPrecision(SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE));
break;
case INTERVAL_YEAR_MONTH:
// Calcite year-month literal value is months as BigDecimal
BigDecimal totalMonths = BigDecimal.valueOf(((HiveIntervalYearMonth) value).getTotalMonths());
calciteLiteral = rexBuilder.makeIntervalLiteral(totalMonths, new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, new SqlParserPos(1, 1)));
break;
case INTERVAL_DAY_TIME:
// Calcite day-time interval is millis value as BigDecimal
// Seconds converted to millis
BigDecimal secsValueBd = BigDecimal.valueOf(((HiveIntervalDayTime) value).getTotalSeconds() * 1000);
// Nanos converted to millis
BigDecimal nanosValueBd = BigDecimal.valueOf(((HiveIntervalDayTime) value).getNanos(), 6);
calciteLiteral = rexBuilder.makeIntervalLiteral(secsValueBd.add(nanosValueBd), new SqlIntervalQualifier(TimeUnit.MILLISECOND, null, new SqlParserPos(1, 1)));
break;
case VOID:
calciteLiteral = rexBuilder.makeLiteral(null, calciteDataType, true);
break;
case BINARY:
case UNKNOWN:
default:
throw new RuntimeException("Unsupported Literal");
}
return calciteLiteral;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlIntervalQualifier in project hive by apache.
the class RexNodeExprFactory method createIntervalDayConstantExpr.
/**
* {@inheritDoc}
*/
@Override
protected RexLiteral createIntervalDayConstantExpr(String value) {
HiveIntervalDayTime v = new HiveIntervalDayTime(Integer.parseInt(value), 0, 0, 0, 0);
BigDecimal secsValueBd = BigDecimal.valueOf(v.getTotalSeconds() * 1000);
BigDecimal nanosValueBd = BigDecimal.valueOf((v).getNanos(), 6);
return rexBuilder.makeIntervalLiteral(secsValueBd.add(nanosValueBd), new SqlIntervalQualifier(TimeUnit.MILLISECOND, null, new SqlParserPos(1, 1)));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.SqlIntervalQualifier in project hive by apache.
the class RexNodeExprFactory method createIntervalYearConstantExpr.
/**
* {@inheritDoc}
*/
@Override
protected RexLiteral createIntervalYearConstantExpr(String value) {
HiveIntervalYearMonth v = new HiveIntervalYearMonth(Integer.parseInt(value), 0);
BigDecimal totalMonths = BigDecimal.valueOf(v.getTotalMonths());
return rexBuilder.makeIntervalLiteral(totalMonths, new SqlIntervalQualifier(TimeUnit.YEAR, TimeUnit.MONTH, new SqlParserPos(1, 1)));
}
Aggregations