use of org.apache.calcite.sql.SqlIdentifier in project druid by druid-io.
the class DruidPlanner method validateAndGetDataSourceForInsert.
/**
* Extract target datasource from a {@link SqlInsert}, and also validate that the INSERT is of a form we support.
* Expects the INSERT target to be either an unqualified name, or a name qualified by the default schema.
*/
private String validateAndGetDataSourceForInsert(final SqlInsert insert) throws ValidationException {
if (insert.isUpsert()) {
throw new ValidationException("UPSERT is not supported.");
}
if (insert.getTargetColumnList() != null) {
throw new ValidationException("INSERT with target column list is not supported.");
}
final SqlIdentifier tableIdentifier = (SqlIdentifier) insert.getTargetTable();
final String dataSource;
if (tableIdentifier.names.isEmpty()) {
// I don't think this can happen, but include a branch for it just in case.
throw new ValidationException("INSERT requires target table.");
} else if (tableIdentifier.names.size() == 1) {
// Unqualified name.
dataSource = Iterables.getOnlyElement(tableIdentifier.names);
} else {
// Qualified name.
final String defaultSchemaName = Iterables.getOnlyElement(CalciteSchema.from(frameworkConfig.getDefaultSchema()).path(null));
if (tableIdentifier.names.size() == 2 && defaultSchemaName.equals(tableIdentifier.names.get(0))) {
dataSource = tableIdentifier.names.get(1);
} else {
throw new ValidationException(StringUtils.format("Cannot INSERT into [%s] because it is not a Druid datasource.", tableIdentifier));
}
}
try {
IdUtils.validateId("INSERT dataSource", dataSource);
} catch (IllegalArgumentException e) {
throw new ValidationException(e.getMessage());
}
return dataSource;
}
use of org.apache.calcite.sql.SqlIdentifier in project druid by druid-io.
the class DruidSqlParserUtils method convertSqlNodeToGranularity.
/**
* This method is used to extract the granularity from a SqlNode representing following function calls:
* 1. FLOOR(__time TO TimeUnit)
* 2. TIME_FLOOR(__time, 'PT1H')
*
* Validation on the sqlNode is contingent to following conditions:
* 1. sqlNode is an instance of SqlCall
* 2. Operator is either one of TIME_FLOOR or FLOOR
* 3. Number of operands in the call are 2
* 4. First operand is a SimpleIdentifier representing __time
* 5. If operator is TIME_FLOOR, the second argument is a literal, and can be converted to the Granularity class
* 6. If operator is FLOOR, the second argument is a TimeUnit, and can be mapped using {@link TimeUnits}
*
* Since it is to be used primarily while parsing the SqlNode, it is wrapped in {@code convertSqlNodeToGranularityThrowingParseExceptions}
*
* @param sqlNode SqlNode representing a call to a function
* @return Granularity as intended by the function call
* @throws ParseException SqlNode cannot be converted a granularity
*/
public static Granularity convertSqlNodeToGranularity(SqlNode sqlNode) throws ParseException {
final String genericParseFailedMessageFormatString = "Encountered %s after PARTITIONED BY. " + "Expected HOUR, DAY, MONTH, YEAR, ALL TIME, FLOOR function or %s function";
if (!(sqlNode instanceof SqlCall)) {
throw new ParseException(StringUtils.format(genericParseFailedMessageFormatString, sqlNode.toString(), TimeFloorOperatorConversion.SQL_FUNCTION_NAME));
}
SqlCall sqlCall = (SqlCall) sqlNode;
String operatorName = sqlCall.getOperator().getName();
Preconditions.checkArgument("FLOOR".equalsIgnoreCase(operatorName) || TimeFloorOperatorConversion.SQL_FUNCTION_NAME.equalsIgnoreCase(operatorName), StringUtils.format("PARTITIONED BY clause only supports FLOOR(__time TO <unit> and %s(__time, period) functions", TimeFloorOperatorConversion.SQL_FUNCTION_NAME));
List<SqlNode> operandList = sqlCall.getOperandList();
Preconditions.checkArgument(operandList.size() == 2, StringUtils.format("%s in PARTITIONED BY clause must have two arguments", operatorName));
// Check if the first argument passed in the floor function is __time
SqlNode timeOperandSqlNode = operandList.get(0);
Preconditions.checkArgument(timeOperandSqlNode.getKind().equals(SqlKind.IDENTIFIER), StringUtils.format("First argument to %s in PARTITIONED BY clause can only be __time", operatorName));
SqlIdentifier timeOperandSqlIdentifier = (SqlIdentifier) timeOperandSqlNode;
Preconditions.checkArgument(timeOperandSqlIdentifier.getSimple().equals(ColumnHolder.TIME_COLUMN_NAME), StringUtils.format("First argument to %s in PARTITIONED BY clause can only be __time", operatorName));
// If the floor function is of form TIME_FLOOR(__time, 'PT1H')
if (operatorName.equalsIgnoreCase(TimeFloorOperatorConversion.SQL_FUNCTION_NAME)) {
SqlNode granularitySqlNode = operandList.get(1);
Preconditions.checkArgument(granularitySqlNode.getKind().equals(SqlKind.LITERAL), "Second argument to TIME_FLOOR in PARTITIONED BY clause must be a period like 'PT1H'");
String granularityString = SqlLiteral.unchain(granularitySqlNode).toValue();
Period period;
try {
period = new Period(granularityString);
} catch (IllegalArgumentException e) {
throw new ParseException(StringUtils.format("%s is an invalid period string", granularitySqlNode.toString()));
}
return new PeriodGranularity(period, null, null);
} else if ("FLOOR".equalsIgnoreCase(operatorName)) {
// If the floor function is of form FLOOR(__time TO DAY)
SqlNode granularitySqlNode = operandList.get(1);
// In future versions of Calcite, this can be checked via
// granularitySqlNode.getKind().equals(SqlKind.INTERVAL_QUALIFIER)
Preconditions.checkArgument(granularitySqlNode instanceof SqlIntervalQualifier, "Second argument to the FLOOR function in PARTITIONED BY clause is not a valid granularity. " + "Please refer to the documentation of FLOOR function");
SqlIntervalQualifier granularityIntervalQualifier = (SqlIntervalQualifier) granularitySqlNode;
Period period = TimeUnits.toPeriod(granularityIntervalQualifier.timeUnitRange);
Preconditions.checkNotNull(period, StringUtils.format("%s is not a valid granularity for ingestion", granularityIntervalQualifier.timeUnitRange.toString()));
return new PeriodGranularity(period, null, null);
}
// Shouldn't reach here
throw new ParseException(StringUtils.format(genericParseFailedMessageFormatString, sqlNode.toString(), TimeFloorOperatorConversion.SQL_FUNCTION_NAME));
}
use of org.apache.calcite.sql.SqlIdentifier in project hazelcast by hazelcast.
the class SqlCreateIndex method validate.
@Override
public void validate(SqlValidator validator, SqlValidatorScope scope) {
if (getReplace()) {
throw validator.newValidationError(this, RESOURCE.notSupported("OR REPLACE", "CREATE INDEX"));
}
Set<String> columnNames = new HashSet<>();
for (SqlNode column : columns.getList()) {
String name = ((SqlIdentifier) requireNonNull(column)).getSimple();
if (!columnNames.add(name)) {
throw validator.newValidationError(column, RESOURCE.duplicateIndexAttribute(name));
}
}
IndexType indexType = getIndexType();
if (!indexType.equals(IndexType.BITMAP) && !options.getList().isEmpty()) {
throw validator.newValidationError(options, RESOURCE.unsupportedIndexType(indexType.name(), options().keySet().iterator().next()));
}
Set<String> optionNames = new HashSet<>();
for (SqlNode option : options.getList()) {
String name = ((SqlOption) option).keyString();
if (!optionNames.add(name)) {
throw validator.newValidationError(option, RESOURCE.duplicateOption(name));
}
}
}
use of org.apache.calcite.sql.SqlIdentifier in project hazelcast by hazelcast.
the class NamedOperandCheckerProgram method check.
public boolean check(HazelcastCallBinding callBinding, boolean throwOnFailure) {
boolean res = true;
SqlCall call = callBinding.getCall();
SqlFunction operator = (SqlFunction) call.getOperator();
for (int i = 0; i < call.operandCount(); i++) {
SqlNode operand = call.operand(i);
assert operand.getKind() == SqlKind.ARGUMENT_ASSIGNMENT;
SqlIdentifier id = ((SqlCall) operand).operand(1);
OperandChecker checker = findOperandChecker(id, operator);
res &= checker.check(callBinding, false, i);
}
if (!res && throwOnFailure) {
throw callBinding.newValidationSignatureError();
}
return res;
}
use of org.apache.calcite.sql.SqlIdentifier in project hazelcast by hazelcast.
the class HazelcastOperandTypeInference method inferOperandTypes.
@Override
public void inferOperandTypes(SqlCallBinding callBinding, RelDataType returnType, RelDataType[] operandTypes) {
SqlCall call = callBinding.getCall();
if (ValidationUtil.hasAssignment(call)) {
RelDataTypeFactory typeFactory = callBinding.getTypeFactory();
RelDataType[] parameterTypes = new RelDataType[parametersByName.size()];
for (int i = 0; i < call.operandCount(); i++) {
SqlCall assignment = call.operand(i);
SqlIdentifier id = assignment.operand(1);
String name = id.getSimple();
HazelcastTableFunctionParameter parameter = parametersByName.get(name);
if (parameter != null) {
SqlTypeName parameterType = parameter.type();
parameterTypes[parameter.ordinal()] = toType(parameterType, typeFactory);
} else {
throw SqlUtil.newContextException(id.getParserPosition(), RESOURCE.unknownArgumentName(name));
}
}
// noinspection ResultOfMethodCallIgnored
Arrays.stream(parameterTypes).filter(Objects::nonNull).toArray(ignored -> operandTypes);
} else {
positionalOperandTypeInference.inferOperandTypes(callBinding, returnType, operandTypes);
}
}
Aggregations