use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class LogicalTypeJsonSerializer method serializeTypeWithGenericSerialization.
// --------------------------------------------------------------------------------------------
// Generic Serialization
// --------------------------------------------------------------------------------------------
private static void serializeTypeWithGenericSerialization(LogicalType logicalType, JsonGenerator jsonGenerator, SerializerProvider serializerProvider, boolean serializeCatalogObjects) throws IOException {
jsonGenerator.writeStartObject();
jsonGenerator.writeStringField(FIELD_NAME_TYPE_NAME, logicalType.getTypeRoot().name());
if (!logicalType.isNullable()) {
jsonGenerator.writeBooleanField(FIELD_NAME_NULLABLE, false);
}
switch(logicalType.getTypeRoot()) {
case CHAR:
case VARCHAR:
case BINARY:
case VARBINARY:
serializeZeroLengthString(jsonGenerator);
break;
case TIMESTAMP_WITHOUT_TIME_ZONE:
final TimestampType timestampType = (TimestampType) logicalType;
serializeTimestamp(timestampType.getPrecision(), timestampType.getKind(), jsonGenerator, serializerProvider);
break;
case TIMESTAMP_WITH_TIME_ZONE:
final ZonedTimestampType zonedTimestampType = (ZonedTimestampType) logicalType;
serializeTimestamp(zonedTimestampType.getPrecision(), zonedTimestampType.getKind(), jsonGenerator, serializerProvider);
break;
case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
final LocalZonedTimestampType localZonedTimestampType = (LocalZonedTimestampType) logicalType;
serializeTimestamp(localZonedTimestampType.getPrecision(), localZonedTimestampType.getKind(), jsonGenerator, serializerProvider);
break;
case ARRAY:
serializeCollection(((ArrayType) logicalType).getElementType(), jsonGenerator, serializerProvider, serializeCatalogObjects);
break;
case MULTISET:
serializeCollection(((MultisetType) logicalType).getElementType(), jsonGenerator, serializerProvider, serializeCatalogObjects);
break;
case MAP:
serializeMap((MapType) logicalType, jsonGenerator, serializerProvider, serializeCatalogObjects);
break;
case ROW:
serializeRow((RowType) logicalType, jsonGenerator, serializerProvider, serializeCatalogObjects);
break;
case DISTINCT_TYPE:
serializeDistinctType((DistinctType) logicalType, jsonGenerator, serializerProvider, serializeCatalogObjects);
break;
case STRUCTURED_TYPE:
serializeStructuredType((StructuredType) logicalType, jsonGenerator, serializerProvider, serializeCatalogObjects);
break;
case SYMBOL:
// type root is enough
break;
case RAW:
if (logicalType instanceof RawType) {
serializeSpecializedRaw((RawType<?>) logicalType, jsonGenerator, serializerProvider);
break;
}
// fall through
default:
throw new ValidationException(String.format("Unable to serialize logical type '%s'. Please check the documentation for supported types.", logicalType.asSummaryString()));
}
jsonGenerator.writeEndObject();
}
use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class ResolvedExpressionJsonDeserializer method deserialize.
@Override
public ResolvedExpression deserialize(JsonParser jsonParser, DeserializationContext ctx) throws IOException {
ObjectNode jsonNode = jsonParser.readValueAsTree();
String expressionType = Optional.ofNullable(jsonNode.get(TYPE)).map(JsonNode::asText).orElse(TYPE_REX_NODE_EXPRESSION);
if (TYPE_REX_NODE_EXPRESSION.equals(expressionType)) {
return deserializeRexNodeExpression(jsonNode, jsonParser.getCodec(), ctx);
} else {
throw new ValidationException(String.format("Expression '%s' cannot be deserialized. " + "Currently, only SQL expressions can be deserialized from the persisted plan.", jsonNode));
}
}
use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testCreateSinkTableWithoutPK.
@Test
public void testCreateSinkTableWithoutPK() {
thrown.expect(ValidationException.class);
thrown.expect(containsCause(new ValidationException("'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys.")));
ResolvedSchema illegalSchema = ResolvedSchema.of(Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT()));
createTableSink(illegalSchema, getFullSinkOptions());
}
use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class UpsertKafkaDynamicTableFactoryTest method testCreateSourceTableWithoutPK.
// --------------------------------------------------------------------------------------------
// Negative tests
// --------------------------------------------------------------------------------------------
@Test
public void testCreateSourceTableWithoutPK() {
thrown.expect(ValidationException.class);
thrown.expect(containsCause(new ValidationException("'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys.")));
ResolvedSchema illegalSchema = ResolvedSchema.of(Column.physical("window_start", DataTypes.STRING()), Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT()));
createTableSource(illegalSchema, getFullSourceOptions());
}
use of org.apache.flink.table.api.ValidationException in project flink by apache.
the class KafkaConnectorOptionsUtil method createKeyFormatProjection.
/**
* Creates an array of indices that determine which physical fields of the table schema to
* include in the key format and the order that those fields have in the key format.
*
* <p>See {@link KafkaConnectorOptions#KEY_FORMAT}, {@link KafkaConnectorOptions#KEY_FIELDS},
* and {@link KafkaConnectorOptions#KEY_FIELDS_PREFIX} for more information.
*/
public static int[] createKeyFormatProjection(ReadableConfig options, DataType physicalDataType) {
final LogicalType physicalType = physicalDataType.getLogicalType();
Preconditions.checkArgument(physicalType.is(LogicalTypeRoot.ROW), "Row data type expected.");
final Optional<String> optionalKeyFormat = options.getOptional(KEY_FORMAT);
final Optional<List<String>> optionalKeyFields = options.getOptional(KEY_FIELDS);
if (!optionalKeyFormat.isPresent() && optionalKeyFields.isPresent()) {
throw new ValidationException(String.format("The option '%s' can only be declared if a key format is defined using '%s'.", KEY_FIELDS.key(), KEY_FORMAT.key()));
} else if (optionalKeyFormat.isPresent() && (!optionalKeyFields.isPresent() || optionalKeyFields.get().size() == 0)) {
throw new ValidationException(String.format("A key format '%s' requires the declaration of one or more of key fields using '%s'.", KEY_FORMAT.key(), KEY_FIELDS.key()));
}
if (!optionalKeyFormat.isPresent()) {
return new int[0];
}
final String keyPrefix = options.getOptional(KEY_FIELDS_PREFIX).orElse("");
final List<String> keyFields = optionalKeyFields.get();
final List<String> physicalFields = LogicalTypeChecks.getFieldNames(physicalType);
return keyFields.stream().mapToInt(keyField -> {
final int pos = physicalFields.indexOf(keyField);
// check that field name exists
if (pos < 0) {
throw new ValidationException(String.format("Could not find the field '%s' in the table schema for usage in the key format. " + "A key field must be a regular, physical column. " + "The following columns can be selected in the '%s' option:\n" + "%s", keyField, KEY_FIELDS.key(), physicalFields));
}
// check that field name is prefixed correctly
if (!keyField.startsWith(keyPrefix)) {
throw new ValidationException(String.format("All fields in '%s' must be prefixed with '%s' when option '%s' " + "is set but field '%s' is not prefixed.", KEY_FIELDS.key(), keyPrefix, KEY_FIELDS_PREFIX.key(), keyField));
}
return pos;
}).toArray();
}
Aggregations