use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project drill by apache.
the class ExprToRex method visitCastExpression.
@Override
public RexNode visitCastExpression(CastExpression e, Void value) throws RuntimeException {
RexNode convertedInput = e.getInput().accept(this, null);
String typeStr = e.getMajorType().getMinorType().toString();
if (SqlTypeName.get(typeStr) == null) {
logger.debug("SqlTypeName could not find {}", typeStr);
}
SqlTypeName typeName = TypeInferenceUtils.getCalciteTypeFromDrillType(e.getMajorType().getMinorType());
RelDataType targetType = TypeInferenceUtils.createCalciteTypeWithNullability(inputRel.getCluster().getTypeFactory(), typeName, true);
return builder.makeCast(targetType, convertedInput);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project storm by apache.
the class StormSqlTypeFactoryImpl method toSql.
@Override
public RelDataType toSql(RelDataType type) {
if (type instanceof JavaType) {
JavaType javaType = (JavaType) type;
SqlTypeName sqlTypeName = JavaToSqlTypeConversionRules.instance().lookup(javaType.getJavaClass());
if (sqlTypeName == null) {
sqlTypeName = SqlTypeName.ANY;
}
return createTypeWithNullability(createSqlType(sqlTypeName), type.isNullable());
}
return super.toSql(type);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project beam by apache.
the class SqlOperators method createSqlType.
private static RelDataType createSqlType(SqlTypeName typeName, boolean withNullability) {
final RelDataTypeFactory typeFactory = createTypeFactory();
RelDataType type = typeFactory.createSqlType(typeName);
if (withNullability) {
type = typeFactory.createTypeWithNullability(type, true);
}
return type;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project druid by druid-io.
the class DruidQuery method computeSorting.
@Nonnull
private static Sorting computeSorting(final PartialDruidQuery partialQuery, final PlannerContext plannerContext, final RowSignature rowSignature, @Nullable final VirtualColumnRegistry virtualColumnRegistry) {
final Sort sort = Preconditions.checkNotNull(partialQuery.getSort(), "sort");
final Project sortProject = partialQuery.getSortProject();
// Extract limit and offset.
final OffsetLimit offsetLimit = OffsetLimit.fromSort(sort);
// Extract orderBy column specs.
final List<OrderByColumnSpec> orderBys = new ArrayList<>(sort.getChildExps().size());
for (int sortKey = 0; sortKey < sort.getChildExps().size(); sortKey++) {
final RexNode sortExpression = sort.getChildExps().get(sortKey);
final RelFieldCollation collation = sort.getCollation().getFieldCollations().get(sortKey);
final OrderByColumnSpec.Direction direction;
final StringComparator comparator;
if (collation.getDirection() == RelFieldCollation.Direction.ASCENDING) {
direction = OrderByColumnSpec.Direction.ASCENDING;
} else if (collation.getDirection() == RelFieldCollation.Direction.DESCENDING) {
direction = OrderByColumnSpec.Direction.DESCENDING;
} else {
throw new ISE("Don't know what to do with direction[%s]", collation.getDirection());
}
final SqlTypeName sortExpressionType = sortExpression.getType().getSqlTypeName();
if (SqlTypeName.NUMERIC_TYPES.contains(sortExpressionType) || SqlTypeName.TIMESTAMP == sortExpressionType || SqlTypeName.DATE == sortExpressionType) {
comparator = StringComparators.NUMERIC;
} else {
comparator = StringComparators.LEXICOGRAPHIC;
}
if (sortExpression.isA(SqlKind.INPUT_REF)) {
final RexInputRef ref = (RexInputRef) sortExpression;
final String fieldName = rowSignature.getColumnName(ref.getIndex());
orderBys.add(new OrderByColumnSpec(fieldName, direction, comparator));
} else {
// We don't support sorting by anything other than refs which actually appear in the query result.
throw new CannotBuildQueryException(sort, sortExpression);
}
}
// Extract any post-sort Projection.
final Projection projection;
if (sortProject == null) {
projection = null;
} else if (partialQuery.getAggregate() == null) {
if (virtualColumnRegistry == null) {
throw new ISE("Must provide 'virtualColumnRegistry' for pre-aggregation Projection!");
}
projection = Projection.preAggregation(sortProject, plannerContext, rowSignature, virtualColumnRegistry);
} else {
projection = Projection.postAggregation(sortProject, plannerContext, rowSignature, "s");
}
return Sorting.create(orderBys, offsetLimit, projection);
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.sql.type.SqlTypeName in project druid by druid-io.
the class NativeQueryMaker method runQuery.
@Override
public Sequence<Object[]> runQuery(final DruidQuery druidQuery) {
final Query<?> query = druidQuery.getQuery();
if (plannerContext.getPlannerConfig().isRequireTimeCondition() && !(druidQuery.getDataSource() instanceof InlineDataSource)) {
if (Intervals.ONLY_ETERNITY.equals(findBaseDataSourceIntervals(query))) {
throw new CannotBuildQueryException("requireTimeCondition is enabled, all queries must include a filter condition on the __time column");
}
}
int numFilters = plannerContext.getPlannerConfig().getMaxNumericInFilters();
// Instead of IN(v1,v2,v3) user should specify IN('v1','v2','v3')
if (numFilters != PlannerConfig.NUM_FILTER_NOT_USED) {
if (query.getFilter() instanceof OrDimFilter) {
OrDimFilter orDimFilter = (OrDimFilter) query.getFilter();
int numBoundFilters = 0;
for (DimFilter filter : orDimFilter.getFields()) {
numBoundFilters += filter instanceof BoundDimFilter ? 1 : 0;
}
if (numBoundFilters > numFilters) {
String dimension = ((BoundDimFilter) (orDimFilter.getFields().get(0))).getDimension();
throw new UOE(StringUtils.format("The number of values in the IN clause for [%s] in query exceeds configured maxNumericFilter limit of [%s] for INs. Cast [%s] values of IN clause to String", dimension, numFilters, orDimFilter.getFields().size()));
}
}
}
final List<String> rowOrder;
if (query instanceof TimeseriesQuery && !druidQuery.getGrouping().getDimensions().isEmpty()) {
// Hack for timeseries queries: when generating them, DruidQuery.toTimeseriesQuery translates a dimension
// based on a timestamp_floor expression into a 'granularity'. This is not reflected in the druidQuery's
// output row signature, so we have to account for it here.
// TODO: We can remove this once https://github.com/apache/druid/issues/9974 is done.
final String timeDimension = Iterables.getOnlyElement(druidQuery.getGrouping().getDimensions()).getOutputName();
rowOrder = druidQuery.getOutputRowSignature().getColumnNames().stream().map(f -> timeDimension.equals(f) ? ColumnHolder.TIME_COLUMN_NAME : f).collect(Collectors.toList());
} else {
rowOrder = druidQuery.getOutputRowSignature().getColumnNames();
}
final List<SqlTypeName> columnTypes = druidQuery.getOutputRowType().getFieldList().stream().map(f -> f.getType().getSqlTypeName()).collect(Collectors.toList());
return execute(query, mapColumnList(rowOrder, fieldMapping), mapColumnList(columnTypes, fieldMapping));
}
Aggregations