use of org.apache.calcite.sql.type.SqlTypeName in project calcite by apache.
the class DruidSqlCastConverter method toDruidExpression.
@Override
public String toDruidExpression(RexNode rexNode, RelDataType topRel, DruidQuery druidQuery) {
final RexNode operand = ((RexCall) rexNode).getOperands().get(0);
final String operandExpression = DruidExpressions.toDruidExpression(operand, topRel, druidQuery);
if (operandExpression == null) {
return null;
}
final SqlTypeName fromType = operand.getType().getSqlTypeName();
String fromTypeString = dateTimeFormatString(fromType);
final SqlTypeName toType = rexNode.getType().getSqlTypeName();
final String timeZoneConf = druidQuery.getConnectionConfig().timeZone();
final TimeZone timeZone = TimeZone.getTimeZone(timeZoneConf == null ? "UTC" : timeZoneConf);
final boolean nullEqualToEmpty = druidQuery.getConnectionConfig().nullEqualToEmpty();
if (fromTypeString == null) {
fromTypeString = nullEqualToEmpty ? "" : null;
}
if (SqlTypeName.CHAR_TYPES.contains(fromType) && SqlTypeName.DATETIME_TYPES.contains(toType)) {
// case chars to dates
return castCharToDateTime(toType == SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE ? timeZone : DateTimeUtils.UTC_ZONE, operandExpression, toType, fromTypeString);
} else if (SqlTypeName.DATETIME_TYPES.contains(fromType) && SqlTypeName.CHAR_TYPES.contains(toType)) {
// case dates to chars
return castDateTimeToChar(fromType == SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE ? timeZone : DateTimeUtils.UTC_ZONE, operandExpression, fromType);
} else if (SqlTypeName.DATETIME_TYPES.contains(fromType) && toType == SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE) {
if (timeZone.equals(DateTimeUtils.UTC_ZONE)) {
// we do not need to do anything
return operandExpression;
}
// to timestamp with local time zone
return castCharToDateTime(timeZone, castDateTimeToChar(DateTimeUtils.UTC_ZONE, operandExpression, fromType), toType, fromTypeString);
} else if (fromType == SqlTypeName.TIMESTAMP_WITH_LOCAL_TIME_ZONE && SqlTypeName.DATETIME_TYPES.contains(toType)) {
if (toType != SqlTypeName.DATE && timeZone.equals(DateTimeUtils.UTC_ZONE)) {
// we do not need to do anything
return operandExpression;
}
// timestamp with local time zone to other types
return castCharToDateTime(DateTimeUtils.UTC_ZONE, castDateTimeToChar(timeZone, operandExpression, fromType), toType, fromTypeString);
} else {
// Handle other casts.
final DruidType fromExprType = DruidExpressions.EXPRESSION_TYPES.get(fromType);
final DruidType toExprType = DruidExpressions.EXPRESSION_TYPES.get(toType);
if (fromExprType == null || toExprType == null) {
// Unknown types bail out.
return null;
}
final String typeCastExpression;
if (fromExprType != toExprType) {
typeCastExpression = DruidQuery.format("CAST(%s, '%s')", operandExpression, toExprType.toString());
} else {
// case it is the same type it is ok to skip CAST
typeCastExpression = operandExpression;
}
if (toType == SqlTypeName.DATE) {
// Floor to day when casting to DATE.
return DruidExpressions.applyTimestampFloor(typeCastExpression, Period.days(1).toString(), "", TimeZone.getTimeZone(druidQuery.getConnectionConfig().timeZone()));
} else {
return typeCastExpression;
}
}
}
use of org.apache.calcite.sql.type.SqlTypeName in project calcite by apache.
the class InnodbSchema method getRelDataType.
RelProtoDataType getRelDataType(String tableName) {
// Temporary type factory, just for the duration of this method. Allowable
// because we're creating a proto-type, not a type; before being used, the
// proto-type will be copied into a real type factory.
final RelDataTypeFactory typeFactory = new SqlTypeFactoryImpl(RelDataTypeSystem.DEFAULT);
final RelDataTypeFactory.Builder fieldInfo = typeFactory.builder();
if (!tableReaderFactory.existTableDef(tableName)) {
throw new RuntimeException("Table definition " + tableName + " not found");
}
TableDef tableDef = tableReaderFactory.getTableDef(tableName);
for (Column column : tableDef.getColumnList()) {
final SqlTypeName sqlTypeName = COLUMN_TYPE_TO_SQL_TYPE.lookup(column.getType());
final int precision;
final int scale;
switch(column.getType()) {
case ColumnType.TIMESTAMP:
case ColumnType.TIME:
case ColumnType.DATETIME:
precision = column.getPrecision();
scale = 0;
break;
default:
precision = column.getPrecision();
scale = column.getScale();
break;
}
if (sqlTypeName.allowsPrecScale(true, true) && column.getPrecision() >= 0 && column.getScale() >= 0) {
fieldInfo.add(column.getName(), sqlTypeName, precision, scale);
} else if (sqlTypeName.allowsPrecNoScale() && precision >= 0) {
fieldInfo.add(column.getName(), sqlTypeName, precision);
} else {
assert sqlTypeName.allowsNoPrecNoScale();
fieldInfo.add(column.getName(), sqlTypeName);
}
fieldInfo.nullable(column.isNullable());
}
return RelDataTypeImpl.proto(fieldInfo.build());
}
use of org.apache.calcite.sql.type.SqlTypeName in project calcite by apache.
the class SplunkPushDownRule method toString.
private static String toString(boolean like, RexLiteral literal) {
String value = null;
SqlTypeName litSqlType = literal.getTypeName();
if (SqlTypeName.NUMERIC_TYPES.contains(litSqlType)) {
value = literal.getValue().toString();
} else if (litSqlType == SqlTypeName.CHAR) {
value = ((NlsString) literal.getValue()).getValue();
if (like) {
value = value.replace("%", "*");
}
value = searchEscape(value);
}
return value;
}
use of org.apache.calcite.sql.type.SqlTypeName in project calcite by apache.
the class SqlTypeNameTest method testNclob.
@Test
void testNclob() {
SqlTypeName tn = SqlTypeName.getNameForJdbcType(ExtraSqlTypes.NCLOB);
// NCLOB not supported yet
assertEquals(null, tn, "NCLOB maps to non-null type");
}
use of org.apache.calcite.sql.type.SqlTypeName in project calcite by apache.
the class DruidQuery method getJsonAggregation.
@Nullable
private static JsonAggregation getJsonAggregation(String name, AggregateCall aggCall, RexNode filterNode, String fieldName, String aggExpression, DruidQuery druidQuery) {
final boolean fractional;
final RelDataType type = aggCall.getType();
final SqlTypeName sqlTypeName = type.getSqlTypeName();
final JsonAggregation aggregation;
final CalciteConnectionConfig config = druidQuery.getConnectionConfig();
if (SqlTypeFamily.APPROXIMATE_NUMERIC.getTypeNames().contains(sqlTypeName)) {
fractional = true;
} else if (SqlTypeFamily.INTEGER.getTypeNames().contains(sqlTypeName)) {
fractional = false;
} else if (SqlTypeFamily.EXACT_NUMERIC.getTypeNames().contains(sqlTypeName)) {
// Decimal
assert sqlTypeName == SqlTypeName.DECIMAL;
if (type.getScale() == 0) {
fractional = false;
} else {
fractional = true;
}
} else {
// Cannot handle this aggregate function type
return null;
}
// Convert from a complex metric
ComplexMetric complexMetric = druidQuery.druidTable.resolveComplexMetric(fieldName, aggCall);
switch(aggCall.getAggregation().getKind()) {
case COUNT:
if (aggCall.isDistinct()) {
if (aggCall.isApproximate() || config.approximateDistinctCount()) {
if (complexMetric == null) {
aggregation = new JsonCardinalityAggregation("cardinality", name, ImmutableList.of(fieldName));
} else {
aggregation = new JsonAggregation(complexMetric.getMetricType(), name, complexMetric.getMetricName(), null);
}
break;
} else {
// when approximate results were not told be acceptable.
return null;
}
}
if (aggCall.getArgList().size() == 1 && !aggCall.isDistinct()) {
// case we have count(column) push it as count(*) where column is not null
final DruidJsonFilter matchNulls;
if (fieldName == null) {
matchNulls = new DruidJsonFilter.JsonExpressionFilter(aggExpression + " == null");
} else {
matchNulls = DruidJsonFilter.getSelectorFilter(fieldName, null, null);
}
aggregation = new JsonFilteredAggregation(DruidJsonFilter.toNotDruidFilter(matchNulls), new JsonAggregation("count", name, fieldName, aggExpression));
} else if (!aggCall.isDistinct()) {
aggregation = new JsonAggregation("count", name, fieldName, aggExpression);
} else {
aggregation = null;
}
break;
case SUM:
case SUM0:
aggregation = new JsonAggregation(fractional ? "doubleSum" : "longSum", name, fieldName, aggExpression);
break;
case MIN:
aggregation = new JsonAggregation(fractional ? "doubleMin" : "longMin", name, fieldName, aggExpression);
break;
case MAX:
aggregation = new JsonAggregation(fractional ? "doubleMax" : "longMax", name, fieldName, aggExpression);
break;
default:
return null;
}
if (aggregation == null) {
return null;
}
// translate filters
if (filterNode != null) {
DruidJsonFilter druidFilter = DruidJsonFilter.toDruidFilters(filterNode, druidQuery.table.getRowType(), druidQuery, druidQuery.getCluster().getRexBuilder());
if (druidFilter == null) {
// cannot translate filter
return null;
}
return new JsonFilteredAggregation(druidFilter, aggregation);
}
return aggregation;
}
Aggregations