use of org.apache.calcite.sql.type.SqlTypeName in project hazelcast by hazelcast.
the class HazelcastOperandTypeInference method inferOperandTypes.
@Override
public void inferOperandTypes(SqlCallBinding callBinding, RelDataType returnType, RelDataType[] operandTypes) {
SqlCall call = callBinding.getCall();
if (ValidationUtil.hasAssignment(call)) {
RelDataTypeFactory typeFactory = callBinding.getTypeFactory();
RelDataType[] parameterTypes = new RelDataType[parametersByName.size()];
for (int i = 0; i < call.operandCount(); i++) {
SqlCall assignment = call.operand(i);
SqlIdentifier id = assignment.operand(1);
String name = id.getSimple();
HazelcastTableFunctionParameter parameter = parametersByName.get(name);
if (parameter != null) {
SqlTypeName parameterType = parameter.type();
parameterTypes[parameter.ordinal()] = toType(parameterType, typeFactory);
} else {
throw SqlUtil.newContextException(id.getParserPosition(), RESOURCE.unknownArgumentName(name));
}
}
// noinspection ResultOfMethodCallIgnored
Arrays.stream(parameterTypes).filter(Objects::nonNull).toArray(ignored -> operandTypes);
} else {
positionalOperandTypeInference.inferOperandTypes(callBinding, returnType, operandTypes);
}
}
use of org.apache.calcite.sql.type.SqlTypeName in project hazelcast by hazelcast.
the class HazelcastTypeFactory method leastRestrictive.
@Override
public RelDataType leastRestrictive(List<RelDataType> types) {
// special-case for JSON - see https://github.com/hazelcast/hazelcast/issues/20303
// SqlTypeName for JSON is OTHER, there's missing handling for OTHER in SqlTypeAssignmentRule,
// and we don't know how to add it there. And even if we did, OTHER can represent both JSON and
// a JAVA object, and these aren't assignable.
boolean containsNullable = false;
boolean allJson = true;
boolean allJsonOrVarchar = true;
for (RelDataType type : types) {
if (!(type instanceof HazelcastJsonType)) {
allJson = false;
}
if (!(type instanceof HazelcastJsonType) && type.getSqlTypeName() != VARCHAR) {
allJsonOrVarchar = false;
}
if (type.isNullable()) {
containsNullable = true;
}
}
if (allJson) {
return containsNullable ? HazelcastJsonType.TYPE_NULLABLE : HazelcastJsonType.TYPE;
}
if (allJsonOrVarchar) {
return createSqlType(VARCHAR, containsNullable);
}
// Calcite returns BIGINT for all integer types and DOUBLE for all inexact fractional types.
// This code allows us to use more narrow types in these cases.
RelDataType selected = super.leastRestrictive(types);
if (selected == null) {
return null;
}
SqlTypeName selectedTypeName = selected.getSqlTypeName();
if (HazelcastTypeUtils.isNumericIntegerType(selectedTypeName)) {
return leastRestrictive(selected, types);
}
if (selectedTypeName == DOUBLE) {
boolean seenDouble = false;
boolean seenReal = false;
for (RelDataType type : types) {
if (type.getSqlTypeName() == DOUBLE) {
seenDouble = true;
break;
}
if (type.getSqlTypeName() == REAL) {
seenReal = true;
}
}
if (!seenDouble && seenReal) {
selected = createSqlType(REAL, selected.isNullable());
}
}
return selected;
}
use of org.apache.calcite.sql.type.SqlTypeName in project hive by apache.
the class ASTBuilder method literal.
public static ASTNode literal(RexLiteral literal) {
Object val = null;
int type = 0;
SqlTypeName sqlType = literal.getType().getSqlTypeName();
switch(sqlType) {
case BINARY:
case DATE:
case TIME:
case TIMESTAMP:
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
case INTERVAL_DAY:
case INTERVAL_DAY_HOUR:
case INTERVAL_DAY_MINUTE:
case INTERVAL_DAY_SECOND:
case INTERVAL_HOUR:
case INTERVAL_HOUR_MINUTE:
case INTERVAL_HOUR_SECOND:
case INTERVAL_MINUTE:
case INTERVAL_MINUTE_SECOND:
case INTERVAL_MONTH:
case INTERVAL_SECOND:
case INTERVAL_YEAR:
case INTERVAL_YEAR_MONTH:
case MAP:
case ARRAY:
case ROW:
if (literal.getValue() == null) {
return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node();
}
break;
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
case DOUBLE:
case DECIMAL:
case FLOAT:
case REAL:
case VARCHAR:
case CHAR:
case BOOLEAN:
if (literal.getValue3() == null) {
return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node();
}
}
switch(sqlType) {
case TINYINT:
case SMALLINT:
case INTEGER:
case BIGINT:
val = literal.getValue3();
// Hive makes a distinction between them most importantly IntegralLiteral
if (val instanceof BigDecimal) {
val = ((BigDecimal) val).longValue();
}
switch(sqlType) {
case TINYINT:
val += "Y";
break;
case SMALLINT:
val += "S";
break;
case INTEGER:
val += "";
break;
case BIGINT:
val += "L";
break;
}
type = HiveParser.IntegralLiteral;
break;
case DOUBLE:
val = literal.getValue3() + "D";
type = HiveParser.NumberLiteral;
break;
case DECIMAL:
val = literal.getValue3() + "BD";
type = HiveParser.NumberLiteral;
break;
case FLOAT:
case REAL:
val = literal.getValue3() + "F";
type = HiveParser.Number;
break;
case VARCHAR:
case CHAR:
val = literal.getValue3();
String escapedVal = BaseSemanticAnalyzer.escapeSQLString(String.valueOf(val));
type = HiveParser.StringLiteral;
val = "'" + escapedVal + "'";
break;
case BOOLEAN:
val = literal.getValue3();
type = ((Boolean) val).booleanValue() ? HiveParser.KW_TRUE : HiveParser.KW_FALSE;
break;
case DATE:
val = "'" + literal.getValueAs(DateString.class).toString() + "'";
type = HiveParser.TOK_DATELITERAL;
break;
case TIME:
val = "'" + literal.getValueAs(TimeString.class).toString() + "'";
type = HiveParser.TOK_TIMESTAMPLITERAL;
break;
case TIMESTAMP:
val = "'" + literal.getValueAs(TimestampString.class).toString() + "'";
type = HiveParser.TOK_TIMESTAMPLITERAL;
break;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
// Calcite stores timestamp with local time-zone in UTC internally, thus
// when we bring it back, we need to add the UTC suffix.
val = "'" + literal.getValueAs(TimestampString.class).toString() + " UTC'";
type = HiveParser.TOK_TIMESTAMPLOCALTZLITERAL;
break;
case INTERVAL_YEAR:
case INTERVAL_MONTH:
case INTERVAL_YEAR_MONTH:
{
type = HiveParser.TOK_INTERVAL_YEAR_MONTH_LITERAL;
BigDecimal monthsBd = (BigDecimal) literal.getValue();
HiveIntervalYearMonth intervalYearMonth = new HiveIntervalYearMonth(monthsBd.intValue());
val = "'" + intervalYearMonth.toString() + "'";
}
break;
case INTERVAL_DAY:
case INTERVAL_DAY_HOUR:
case INTERVAL_DAY_MINUTE:
case INTERVAL_DAY_SECOND:
case INTERVAL_HOUR:
case INTERVAL_HOUR_MINUTE:
case INTERVAL_HOUR_SECOND:
case INTERVAL_MINUTE:
case INTERVAL_MINUTE_SECOND:
case INTERVAL_SECOND:
{
type = HiveParser.TOK_INTERVAL_DAY_TIME_LITERAL;
BigDecimal millisBd = (BigDecimal) literal.getValue();
// Calcite literal is in millis, convert to seconds
BigDecimal secsBd = millisBd.divide(BigDecimal.valueOf(1000));
HiveIntervalDayTime intervalDayTime = new HiveIntervalDayTime(secsBd);
val = "'" + intervalDayTime.toString() + "'";
}
break;
case NULL:
type = HiveParser.TOK_NULL;
break;
// binary, ROW type should not be seen.
case BINARY:
case ROW:
default:
throw new RuntimeException("Unsupported Type: " + sqlType);
}
return (ASTNode) ParseDriver.adaptor.create(type, String.valueOf(val));
}
use of org.apache.calcite.sql.type.SqlTypeName in project druid by alibaba.
the class CalciteMySqlNodeVisitor method toSqlTypeName.
private SqlTypeName toSqlTypeName(SQLDataType dataType) {
long nameHashCode64 = dataType.nameHashCode64();
SqlTypeName sqlTypeName = nameHashCode64SqlTypeNameMapping.get(nameHashCode64);
if (sqlTypeName != null) {
return sqlTypeName;
}
throw new FastsqlException("TODO");
}
use of org.apache.calcite.sql.type.SqlTypeName in project spf4j by zolyfarkas.
the class Types method from.
public static Schema from(final RelDataType dataType) {
SqlTypeName sqlTypeName = dataType.getSqlTypeName();
Schema result;
switch(sqlTypeName) {
case ROW:
List<RelDataTypeField> fieldList = dataType.getFieldList();
List<Schema.Field> aFields = new ArrayList<>(fieldList.size());
for (RelDataTypeField field : fieldList) {
aFields.add(AvroCompatUtils.createField(field.getName(), from(field.getType()), null, null, false, false, Order.IGNORE));
}
return Schema.createRecord(aFields);
case INTEGER:
result = Schema.create(Schema.Type.INT);
break;
case BIGINT:
result = Schema.create(Schema.Type.LONG);
break;
case VARCHAR:
result = Schema.create(Schema.Type.STRING);
break;
case DATE:
result = Schemas.dateString();
break;
case TIMESTAMP:
result = Schemas.instantString();
break;
case BINARY:
int precision = dataType.getPrecision();
if (precision > 0) {
result = Schema.createFixed(null, null, null, precision);
} else {
result = Schema.create(Schema.Type.BYTES);
}
break;
case DOUBLE:
case REAL:
case DECIMAL:
result = Schema.create(Schema.Type.DOUBLE);
break;
// break;
case FLOAT:
result = Schema.create(Schema.Type.FLOAT);
break;
case BOOLEAN:
result = Schema.create(Schema.Type.BOOLEAN);
break;
case ARRAY:
case MULTISET:
result = Schema.createArray(from(dataType.getComponentType()));
break;
case MAP:
result = Schema.createMap(from(dataType.getValueType()));
break;
default:
throw new UnsupportedOperationException("Unsupported data Type " + dataType);
}
if (dataType.isNullable()) {
result = Schema.createUnion(Schema.create(Schema.Type.NULL), result);
}
return result;
}
Aggregations