use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project hive by apache.
the class ASTBuilder method literal.
public static ASTNode literal(RexLiteral literal) {
Object val = null;
int type = 0;
SqlTypeName sqlType = literal.getType().getSqlTypeName();
switch(sqlType) {
case BINARY:
case DATE:
case TIME:
case TIMESTAMP:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
case INTERVAL_DAY:
case INTERVAL_DAY_HOUR:
case INTERVAL_DAY_MINUTE:
case INTERVAL_DAY_SECOND:
case INTERVAL_HOUR:
case INTERVAL_HOUR_MINUTE:
case INTERVAL_HOUR_SECOND:
case INTERVAL_MINUTE:
case INTERVAL_MINUTE_SECOND:
case INTERVAL_MONTH:
case INTERVAL_SECOND:
case INTERVAL_YEAR:
case INTERVAL_YEAR_MONTH:
case MAP:
case ARRAY:
case ROW:
if (literal.getValue() == null) {
return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node();
}
break;
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
case DOUBLE:
case DECIMAL:
case FLOAT:
case REAL:
case VARCHAR:
case CHAR:
case BOOLEAN:
if (literal.getValue3() == null) {
return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node();
}
}
switch(sqlType) {
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
val = literal.getValue3();
// Hive makes a distinction between them most importantly IntegralLiteral
if (val instanceof BigDecimal) {
val = ((BigDecimal) val).longValue();
}
switch(sqlType) {
case TINYINT:
val += "Y";
break;
case SMALLINT:
val += "S";
break;
case INTEGER:
val += "";
break;
case BIGINT:
val += "L";
break;
}
type = HiveParser.IntegralLiteral;
break;
case DOUBLE:
val = literal.getValue3() + "D";
type = HiveParser.NumberLiteral;
break;
case DECIMAL:
val = literal.getValue3() + "BD";
type = HiveParser.NumberLiteral;
break;
case FLOAT:
case REAL:
val = literal.getValue3() + "F";
type = HiveParser.Number;
break;
case VARCHAR:
case CHAR:
val = literal.getValue3();
String escapedVal = BaseSemanticAnalyzer.escapeSQLString(String.valueOf(val));
type = HiveParser.StringLiteral;
val = "'" + escapedVal + "'";
break;
case BOOLEAN:
val = literal.getValue3();
type = ((Boolean) val).booleanValue() ? HiveParser.KW_TRUE : HiveParser.KW_FALSE;
break;
case DATE:
val = "'" + literal.getValueAs(DateString.class).toString() + "'";
type = HiveParser.TOK_DATELITERAL;
break;
case TIME:
val = "'" + literal.getValueAs(TimeString.class).toString() + "'";
type = HiveParser.TOK_TIMESTAMPLITERAL;
break;
case TIMESTAMP:
val = "'" + literal.getValueAs(TimestampString.class).toString() + "'";
type = HiveParser.TOK_TIMESTAMPLITERAL;
break;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
// Calcite stores timestamp with local time-zone in UTC internally, thus
// when we bring it back, we need to add the UTC suffix.
val = "'" + literal.getValueAs(TimestampString.class).toString() + " UTC'";
type = HiveParser.TOK_TIMESTAMPLOCALTZLITERAL;
break;
case INTERVAL_YEAR:
case INTERVAL_MONTH:
case INTERVAL_YEAR_MONTH:
{
type = HiveParser.TOK_INTERVAL_YEAR_MONTH_LITERAL;
BigDecimal monthsBd = (BigDecimal) literal.getValue();
HiveIntervalYearMonth intervalYearMonth = new HiveIntervalYearMonth(monthsBd.intValue());
val = "'" + intervalYearMonth.toString() + "'";
}
break;
case INTERVAL_DAY:
case INTERVAL_DAY_HOUR:
case INTERVAL_DAY_MINUTE:
case INTERVAL_DAY_SECOND:
case INTERVAL_HOUR:
case INTERVAL_HOUR_MINUTE:
case INTERVAL_HOUR_SECOND:
case INTERVAL_MINUTE:
case INTERVAL_MINUTE_SECOND:
case INTERVAL_SECOND:
{
type = HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL;
BigDecimal millisBd = (BigDecimal) literal.getValue();
// Calcite literal is in millis, convert to seconds
BigDecimal secsBd = millisBd.divide(BigDecimal.valueOf(1000));
HiveIntervalDayTime intervalDayTime = new HiveIntervalDayTime(secsBd);
val = "'" + intervalDayTime.toString() + "'";
}
break;
case NULL:
type = HiveParser.TOK_NULL;
break;
// binary, ROW type should not be seen.
case BINARY:
case ROW:
default:
throw new RuntimeException("Unsupported Type: " + sqlType);
}
return (ASTNode) ParseDriver.adaptor.create(type, String.valueOf(val));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project druid by alibaba.
the class CalciteMySqlNodeVisitor method toSqlTypeName.
private SqlTypeName toSqlTypeName(SQLDataType dataType) {
long nameHashCode64 = dataType.nameHashCode64();
SqlTypeName sqlTypeName = nameHashCode64SqlTypeNameMapping.get(nameHashCode64);
if (sqlTypeName != null) {
return sqlTypeName;
}
throw new FastsqlException("TODO");
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project spf4j by zolyfarkas.
the class Types method from.
public static Schema from(final RelDataType dataType) {
SqlTypeName sqlTypeName = dataType.getSqlTypeName();
Schema result;
switch(sqlTypeName) {
case ROW:
List<RelDataTypeField> fieldList = dataType.getFieldList();
List<Schema.Field> aFields = new ArrayList<>(fieldList.size());
for (RelDataTypeField field : fieldList) {
aFields.add(AvroCompatUtils.createField(field.getName(), from(field.getType()), null, null, false, false, Order.IGNORE));
}
return Schema.createRecord(aFields);
case INTEGER:
result = Schema.create(Schema.Type.INT);
break;
case BIGINT:
result = Schema.create(Schema.Type.LONG);
break;
case VARCHAR:
result = Schema.create(Schema.Type.STRING);
break;
case DATE:
result = Schemas.dateString();
break;
case TIMESTAMP:
result = Schemas.instantString();
break;
case BINARY:
int precision = dataType.getPrecision();
if (precision > 0) {
result = Schema.createFixed(null, null, null, precision);
} else {
result = Schema.create(Schema.Type.BYTES);
}
break;
case DOUBLE:
case REAL:
case DECIMAL:
result = Schema.create(Schema.Type.DOUBLE);
break;
// break;
case FLOAT:
result = Schema.create(Schema.Type.FLOAT);
break;
case BOOLEAN:
result = Schema.create(Schema.Type.BOOLEAN);
break;
case ARRAY:
case MULTISET:
result = Schema.createArray(from(dataType.getComponentType()));
break;
case MAP:
result = Schema.createMap(from(dataType.getValueType()));
break;
default:
throw new UnsupportedOperationException("Unsupported data Type " + dataType);
}
if (dataType.isNullable()) {
result = Schema.createUnion(Schema.create(Schema.Type.NULL), result);
}
return result;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project druid by druid-io.
the class GroupByRules method toLimitSpec.
public static DefaultLimitSpec toLimitSpec(final List<String> rowOrder, final Sort sort) {
final Integer limit = sort.fetch != null ? RexLiteral.intValue(sort.fetch) : null;
final List<OrderByColumnSpec> orderBys = Lists.newArrayListWithCapacity(sort.getChildExps().size());
if (sort.offset != null) {
// LimitSpecs don't accept offsets.
return null;
}
// Extract orderBy column specs.
for (int sortKey = 0; sortKey < sort.getChildExps().size(); sortKey++) {
final RexNode sortExpression = sort.getChildExps().get(sortKey);
final RelFieldCollation collation = sort.getCollation().getFieldCollations().get(sortKey);
final OrderByColumnSpec.Direction direction;
final StringComparator comparator;
if (collation.getDirection() == RelFieldCollation.Direction.ASCENDING) {
direction = OrderByColumnSpec.Direction.ASCENDING;
} else if (collation.getDirection() == RelFieldCollation.Direction.DESCENDING) {
direction = OrderByColumnSpec.Direction.DESCENDING;
} else {
throw new ISE("WTF?! Don't know what to do with direction[%s]", collation.getDirection());
}
final SqlTypeName sortExpressionType = sortExpression.getType().getSqlTypeName();
if (SqlTypeName.NUMERIC_TYPES.contains(sortExpressionType) || SqlTypeName.TIMESTAMP == sortExpressionType || SqlTypeName.DATE == sortExpressionType) {
comparator = StringComparators.NUMERIC;
} else {
comparator = StringComparators.LEXICOGRAPHIC;
}
if (sortExpression.isA(SqlKind.INPUT_REF)) {
final RexInputRef ref = (RexInputRef) sortExpression;
final String fieldName = rowOrder.get(ref.getIndex());
orderBys.add(new OrderByColumnSpec(fieldName, direction, comparator));
} else {
// We don't support sorting by anything other than refs which actually appear in the query result.
return null;
}
}
return new DefaultLimitSpec(orderBys, limit);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project druid by druid-io.
the class GroupByRules method applyAggregate.
/**
* Applies a filter -> project -> aggregate chain to a druidRel. Do not call this method unless
* {@link #canApplyAggregate(DruidRel, Filter, Project, Aggregate)} returns true.
*
* @return new rel, or null if the chain cannot be applied
*/
private static DruidRel applyAggregate(final DruidRel druidRel, final Filter filter0, final Project project0, final Aggregate aggregate, final DruidOperatorTable operatorTable, final boolean approximateCountDistinct) {
Preconditions.checkState(canApplyAggregate(druidRel, filter0, project0, aggregate), "Cannot applyAggregate.");
final RowSignature sourceRowSignature;
final boolean isNestedQuery = druidRel.getQueryBuilder().getGrouping() != null;
if (isNestedQuery) {
// Nested groupBy; source row signature is the output signature of druidRel.
sourceRowSignature = druidRel.getOutputRowSignature();
} else {
sourceRowSignature = druidRel.getSourceRowSignature();
}
// Filter that should be applied before aggregating.
final DimFilter filter;
if (filter0 != null) {
filter = Expressions.toFilter(operatorTable, druidRel.getPlannerContext(), sourceRowSignature, filter0.getCondition());
if (filter == null) {
// Can't plan this filter.
return null;
}
} else if (druidRel.getQueryBuilder().getFilter() != null && !isNestedQuery) {
// We're going to replace the existing druidRel, so inherit its filter.
filter = druidRel.getQueryBuilder().getFilter();
} else {
filter = null;
}
// Projection that should be applied before aggregating.
final Project project;
if (project0 != null) {
project = project0;
} else if (druidRel.getQueryBuilder().getSelectProjection() != null && !isNestedQuery) {
// We're going to replace the existing druidRel, so inherit its projection.
project = druidRel.getQueryBuilder().getSelectProjection().getProject();
} else {
project = null;
}
final List<DimensionSpec> dimensions = Lists.newArrayList();
final List<Aggregation> aggregations = Lists.newArrayList();
final List<String> rowOrder = Lists.newArrayList();
// Translate groupSet.
final ImmutableBitSet groupSet = aggregate.getGroupSet();
int dimOutputNameCounter = 0;
for (int i : groupSet) {
if (project != null && project.getChildExps().get(i) instanceof RexLiteral) {
// Ignore literals in GROUP BY, so a user can write e.g. "GROUP BY 'dummy'" to group everything into a single
// row. Add dummy rowOrder entry so NULLs come out. This is not strictly correct but it works as long as
// nobody actually expects to see the literal.
rowOrder.add(dimOutputName(dimOutputNameCounter++));
} else {
final RexNode rexNode = Expressions.fromFieldAccess(sourceRowSignature, project, i);
final RowExtraction rex = Expressions.toRowExtraction(operatorTable, druidRel.getPlannerContext(), sourceRowSignature.getRowOrder(), rexNode);
if (rex == null) {
return null;
}
final SqlTypeName sqlTypeName = rexNode.getType().getSqlTypeName();
final ValueType outputType = Calcites.getValueTypeForSqlTypeName(sqlTypeName);
if (outputType == null) {
throw new ISE("Cannot translate sqlTypeName[%s] to Druid type for field[%s]", sqlTypeName, rowOrder.get(i));
}
final DimensionSpec dimensionSpec = rex.toDimensionSpec(sourceRowSignature, dimOutputName(dimOutputNameCounter++), outputType);
if (dimensionSpec == null) {
return null;
}
dimensions.add(dimensionSpec);
rowOrder.add(dimensionSpec.getOutputName());
}
}
// Translate aggregates.
for (int i = 0; i < aggregate.getAggCallList().size(); i++) {
final AggregateCall aggCall = aggregate.getAggCallList().get(i);
final Aggregation aggregation = translateAggregateCall(druidRel.getPlannerContext(), sourceRowSignature, project, aggCall, operatorTable, aggregations, i, approximateCountDistinct);
if (aggregation == null) {
return null;
}
aggregations.add(aggregation);
rowOrder.add(aggregation.getOutputName());
}
if (isNestedQuery) {
// Nested groupBy.
return DruidNestedGroupBy.from(druidRel, filter, Grouping.create(dimensions, aggregations), aggregate.getRowType(), rowOrder);
} else {
// groupBy on a base dataSource.
return druidRel.withQueryBuilder(druidRel.getQueryBuilder().withFilter(filter).withGrouping(Grouping.create(dimensions, aggregations), aggregate.getRowType(), rowOrder));
}
}
Aggregations