use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class CommonExecSinkITCase method testNullEnforcer.
@Test
public void testNullEnforcer() throws ExecutionException, InterruptedException {
final StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
final List<Row> rows = Arrays.asList(Row.of(1, "Apache", 11), Row.of(2, null, 22), Row.of(null, "Flink", 33), Row.of(null, null, 44));
final SharedReference<List<RowData>> results = sharedObjects.add(new ArrayList<>());
tableEnv.createTable("T1", TableFactoryHarness.newBuilder().schema(schemaForNotNullEnforcer()).source(new TestSource(rows)).sink(buildRuntimeSinkProvider(new RecordWriter(results))).build());
// Default config - ignore (no trim)
final ExecutionException ee = assertThrows(ExecutionException.class, () -> tableEnv.executeSql("INSERT INTO T1 SELECT * FROM T1").await());
assertThat(ExceptionUtils.findThrowableWithMessage(ee, "Column 'b' is NOT NULL, however, a null value is being written into it. " + "You can set job configuration 'table.exec.sink.not-null-enforcer'='DROP' " + "to suppress this exception and drop such records silently.").isPresent()).isTrue();
// Test not including a NOT NULL column
results.get().clear();
final ValidationException ve = assertThrows(ValidationException.class, () -> tableEnv.executeSql("INSERT INTO T1(a, b) SELECT (a, b) FROM T1").await());
assertThat(ve.getMessage()).isEqualTo("SQL validation failed. At line 0, column 0: Column 'c' has no default " + "value and does not allow NULLs");
// Change config option to "drop", to drop the columns instead of throwing errors
try {
tableEnv.getConfig().set(TABLE_EXEC_SINK_NOT_NULL_ENFORCER.key(), ExecutionConfigOptions.NotNullEnforcer.DROP.name());
results.get().clear();
tableEnv.executeSql("INSERT INTO T1 SELECT * FROM T1").await();
assertThat(results.get().size()).isEqualTo(2);
assertThat(results.get().get(0).getInt(0)).isEqualTo(1);
assertThat(results.get().get(0).getString(1).toString()).isEqualTo("Apache");
assertThat(results.get().get(0).getInt(2)).isEqualTo(11);
assertThat(results.get().get(1).isNullAt(0)).isTrue();
assertThat(results.get().get(1).getString(1).toString()).isEqualTo("Flink");
assertThat(results.get().get(1).getInt(2)).isEqualTo(33);
} finally {
tableEnv.getConfig().set(TABLE_EXEC_SINK_NOT_NULL_ENFORCER.key(), ExecutionConfigOptions.NotNullEnforcer.ERROR.name());
}
}
use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class SchemaValidator method validate.
@Override
public void validate(DescriptorProperties properties) {
Map<String, String> names = properties.getIndexedProperty(SCHEMA, SCHEMA_NAME);
Map<String, String> legacyTypes = properties.getIndexedProperty(SCHEMA, SCHEMA_TYPE);
Map<String, String> dataTypes = properties.getIndexedProperty(SCHEMA, SCHEMA_DATA_TYPE);
if (names.isEmpty() && legacyTypes.isEmpty() && dataTypes.isEmpty()) {
throw new ValidationException(format("Could not find the required schema in property '%s'.", SCHEMA));
}
boolean proctimeFound = false;
for (int i = 0; i < Math.max(names.size(), legacyTypes.size()); i++) {
properties.validateString(SCHEMA + "." + i + "." + SCHEMA_NAME, false, 1);
properties.validateDataType(SCHEMA + "." + i + "." + SCHEMA_DATA_TYPE, SCHEMA + "." + i + "." + SCHEMA_TYPE, false);
properties.validateString(SCHEMA + "." + i + "." + SCHEMA_FROM, true, 1);
// either proctime or rowtime
String proctime = SCHEMA + "." + i + "." + SCHEMA_PROCTIME;
String rowtime = SCHEMA + "." + i + "." + ROWTIME;
if (properties.containsKey(proctime)) {
// check the environment
if (!isStreamEnvironment) {
throw new ValidationException(format("Property '%s' is not allowed in a batch environment.", proctime));
} else // check for only one proctime attribute
if (proctimeFound) {
throw new ValidationException("A proctime attribute must only be defined once.");
}
// check proctime
properties.validateBoolean(proctime, false);
proctimeFound = properties.getBoolean(proctime);
// no rowtime
properties.validatePrefixExclusion(rowtime);
} else if (properties.hasPrefix(rowtime)) {
// check rowtime
RowtimeValidator rowtimeValidator = new RowtimeValidator(supportsSourceTimestamps, supportsSourceWatermarks, SCHEMA + "." + i + ".");
rowtimeValidator.validate(properties);
// no proctime
properties.validateExclusion(proctime);
}
}
}
use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class SchemaValidator method deriveFieldMapping.
/**
* Finds a table source field mapping.
*
* @param properties The properties describing a schema.
* @param inputType The input type that a connector and/or format produces. This parameter can
* be used to resolve a rowtime field against an input field.
*/
public static Map<String, String> deriveFieldMapping(DescriptorProperties properties, Optional<TypeInformation<?>> inputType) {
Map<String, String> mapping = new HashMap<>();
TableSchema schema = properties.getTableSchema(SCHEMA);
List<String> columnNames = new ArrayList<>();
inputType.ifPresent(t -> columnNames.addAll(Arrays.asList(((CompositeType) t).getFieldNames())));
// add all source fields first because rowtime might reference one of them
columnNames.forEach(name -> mapping.put(name, name));
// add all schema fields first for implicit mappings
Arrays.stream(schema.getFieldNames()).forEach(name -> mapping.put(name, name));
Map<String, String> names = properties.getIndexedProperty(SCHEMA, SCHEMA_NAME);
for (int i = 0; i < names.size(); i++) {
String name = properties.getString(SCHEMA + "." + i + "." + SCHEMA_NAME);
Optional<String> source = properties.getOptionalString(SCHEMA + "." + i + "." + SCHEMA_FROM);
if (source.isPresent()) {
// add explicit mapping
mapping.put(name, source.get());
} else {
// implicit mapping or time
boolean isProctime = properties.getOptionalBoolean(SCHEMA + "." + i + "." + SCHEMA_PROCTIME).orElse(false);
boolean isRowtime = properties.containsKey(SCHEMA + "." + i + "." + ROWTIME_TIMESTAMPS_TYPE);
boolean isGeneratedColumn = properties.containsKey(SCHEMA + "." + i + "." + EXPR);
// remove proctime/rowtime from mapping
if (isProctime || isRowtime || isGeneratedColumn) {
mapping.remove(name);
} else // check for invalid fields
if (!columnNames.contains(name)) {
throw new ValidationException(format("Could not map the schema field '%s' to a field " + "from source. Please specify the source field from which it can be derived.", name));
}
}
}
return mapping;
}
use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class ProjectionOperationFactory method validateAndGetUniqueNames.
private String[] validateAndGetUniqueNames(List<ResolvedExpression> namedExpressions) {
// we need to maintain field names order to match with types
final Set<String> names = new LinkedHashSet<>();
extractNames(namedExpressions).stream().map(name -> name.orElseThrow(() -> new TableException("Could not name a field in a projection."))).forEach(name -> {
if (!names.add(name)) {
throw new ValidationException("Ambiguous column name: " + name);
}
});
return names.toArray(new String[0]);
}
use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class OperationTreeBuilder method filter.
public QueryOperation filter(Expression condition, QueryOperation child) {
ExpressionResolver resolver = getResolver(child);
ResolvedExpression resolvedExpression = resolveSingleExpression(condition, resolver);
DataType conditionType = resolvedExpression.getOutputDataType();
if (!conditionType.getLogicalType().is(BOOLEAN)) {
throw new ValidationException("Filter operator requires a boolean expression as input," + " but $condition is of type " + conditionType);
}
return new FilterQueryOperation(resolvedExpression, child);
}
Aggregations