Search in sources :

Example 76 with ValidationException

use of org.apache.flink.table.api.ValidationException in project flink by apache.

the class LogicalTypeJsonSerializer method serializeTypeWithGenericSerialization.

// --------------------------------------------------------------------------------------------
// Generic Serialization
// --------------------------------------------------------------------------------------------
private static void serializeTypeWithGenericSerialization(LogicalType logicalType, JsonGenerator jsonGenerator, SerializerProvider serializerProvider, boolean serializeCatalogObjects) throws IOException {
    jsonGenerator.writeStartObject();
    jsonGenerator.writeStringField(FIELD_NAME_TYPE_NAME, logicalType.getTypeRoot().name());
    if (!logicalType.isNullable()) {
        jsonGenerator.writeBooleanField(FIELD_NAME_NULLABLE, false);
    }
    switch(logicalType.getTypeRoot()) {
        case CHAR:
        case VARCHAR:
        case BINARY:
        case VARBINARY:
            serializeZeroLengthString(jsonGenerator);
            break;
        case TIMESTAMP_WITHOUT_TIME_ZONE:
            final TimestampType timestampType = (TimestampType) logicalType;
            serializeTimestamp(timestampType.getPrecision(), timestampType.getKind(), jsonGenerator, serializerProvider);
            break;
        case TIMESTAMP_WITH_TIME_ZONE:
            final ZonedTimestampType zonedTimestampType = (ZonedTimestampType) logicalType;
            serializeTimestamp(zonedTimestampType.getPrecision(), zonedTimestampType.getKind(), jsonGenerator, serializerProvider);
            break;
        case TIMESTAMP_WITH_LOCAL_TIME_ZONE:
            final LocalZonedTimestampType localZonedTimestampType = (LocalZonedTimestampType) logicalType;
            serializeTimestamp(localZonedTimestampType.getPrecision(), localZonedTimestampType.getKind(), jsonGenerator, serializerProvider);
            break;
        case ARRAY:
            serializeCollection(((ArrayType) logicalType).getElementType(), jsonGenerator, serializerProvider, serializeCatalogObjects);
            break;
        case MULTISET:
            serializeCollection(((MultisetType) logicalType).getElementType(), jsonGenerator, serializerProvider, serializeCatalogObjects);
            break;
        case MAP:
            serializeMap((MapType) logicalType, jsonGenerator, serializerProvider, serializeCatalogObjects);
            break;
        case ROW:
            serializeRow((RowType) logicalType, jsonGenerator, serializerProvider, serializeCatalogObjects);
            break;
        case DISTINCT_TYPE:
            serializeDistinctType((DistinctType) logicalType, jsonGenerator, serializerProvider, serializeCatalogObjects);
            break;
        case STRUCTURED_TYPE:
            serializeStructuredType((StructuredType) logicalType, jsonGenerator, serializerProvider, serializeCatalogObjects);
            break;
        case SYMBOL:
            // type root is enough
            break;
        case RAW:
            if (logicalType instanceof RawType) {
                serializeSpecializedRaw((RawType<?>) logicalType, jsonGenerator, serializerProvider);
                break;
            }
        // fall through
        default:
            throw new ValidationException(String.format("Unable to serialize logical type '%s'. Please check the documentation for supported types.", logicalType.asSummaryString()));
    }
    jsonGenerator.writeEndObject();
}
Also used : LocalZonedTimestampType(org.apache.flink.table.types.logical.LocalZonedTimestampType) ZonedTimestampType(org.apache.flink.table.types.logical.ZonedTimestampType) ValidationException(org.apache.flink.table.api.ValidationException) LocalZonedTimestampType(org.apache.flink.table.types.logical.LocalZonedTimestampType) TimestampType(org.apache.flink.table.types.logical.TimestampType) LocalZonedTimestampType(org.apache.flink.table.types.logical.LocalZonedTimestampType) ZonedTimestampType(org.apache.flink.table.types.logical.ZonedTimestampType) RawType(org.apache.flink.table.types.logical.RawType) TypeInformationRawType(org.apache.flink.table.types.logical.TypeInformationRawType)

Example 77 with ValidationException

use of org.apache.flink.table.api.ValidationException in project flink by apache.

the class ResolvedExpressionJsonDeserializer method deserialize.

@Override
public ResolvedExpression deserialize(JsonParser jsonParser, DeserializationContext ctx) throws IOException {
    ObjectNode jsonNode = jsonParser.readValueAsTree();
    String expressionType = Optional.ofNullable(jsonNode.get(TYPE)).map(JsonNode::asText).orElse(TYPE_REX_NODE_EXPRESSION);
    if (TYPE_REX_NODE_EXPRESSION.equals(expressionType)) {
        return deserializeRexNodeExpression(jsonNode, jsonParser.getCodec(), ctx);
    } else {
        throw new ValidationException(String.format("Expression '%s' cannot be deserialized. " + "Currently, only SQL expressions can be deserialized from the persisted plan.", jsonNode));
    }
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) ObjectNode(org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.ObjectNode)

Example 78 with ValidationException

use of org.apache.flink.table.api.ValidationException in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testCreateSinkTableWithoutPK.

@Test
public void testCreateSinkTableWithoutPK() {
    thrown.expect(ValidationException.class);
    thrown.expect(containsCause(new ValidationException("'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys.")));
    ResolvedSchema illegalSchema = ResolvedSchema.of(Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT()));
    createTableSink(illegalSchema, getFullSinkOptions());
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Example 79 with ValidationException

use of org.apache.flink.table.api.ValidationException in project flink by apache.

the class UpsertKafkaDynamicTableFactoryTest method testCreateSourceTableWithoutPK.

// --------------------------------------------------------------------------------------------
// Negative tests
// --------------------------------------------------------------------------------------------
@Test
public void testCreateSourceTableWithoutPK() {
    thrown.expect(ValidationException.class);
    thrown.expect(containsCause(new ValidationException("'upsert-kafka' tables require to define a PRIMARY KEY constraint. " + "The PRIMARY KEY specifies which columns should be read from or write to the Kafka message key. " + "The PRIMARY KEY also defines records in the 'upsert-kafka' table should update or delete on which keys.")));
    ResolvedSchema illegalSchema = ResolvedSchema.of(Column.physical("window_start", DataTypes.STRING()), Column.physical("region", DataTypes.STRING()), Column.physical("view_count", DataTypes.BIGINT()));
    createTableSource(illegalSchema, getFullSourceOptions());
}
Also used : ValidationException(org.apache.flink.table.api.ValidationException) ResolvedSchema(org.apache.flink.table.catalog.ResolvedSchema) Test(org.junit.Test)

Example 80 with ValidationException

use of org.apache.flink.table.api.ValidationException in project flink by apache.

the class KafkaConnectorOptionsUtil method createKeyFormatProjection.

/**
 * Creates an array of indices that determine which physical fields of the table schema to
 * include in the key format and the order that those fields have in the key format.
 *
 * <p>See {@link KafkaConnectorOptions#KEY_FORMAT}, {@link KafkaConnectorOptions#KEY_FIELDS},
 * and {@link KafkaConnectorOptions#KEY_FIELDS_PREFIX} for more information.
 */
public static int[] createKeyFormatProjection(ReadableConfig options, DataType physicalDataType) {
    final LogicalType physicalType = physicalDataType.getLogicalType();
    Preconditions.checkArgument(physicalType.is(LogicalTypeRoot.ROW), "Row data type expected.");
    final Optional<String> optionalKeyFormat = options.getOptional(KEY_FORMAT);
    final Optional<List<String>> optionalKeyFields = options.getOptional(KEY_FIELDS);
    if (!optionalKeyFormat.isPresent() && optionalKeyFields.isPresent()) {
        throw new ValidationException(String.format("The option '%s' can only be declared if a key format is defined using '%s'.", KEY_FIELDS.key(), KEY_FORMAT.key()));
    } else if (optionalKeyFormat.isPresent() && (!optionalKeyFields.isPresent() || optionalKeyFields.get().size() == 0)) {
        throw new ValidationException(String.format("A key format '%s' requires the declaration of one or more of key fields using '%s'.", KEY_FORMAT.key(), KEY_FIELDS.key()));
    }
    if (!optionalKeyFormat.isPresent()) {
        return new int[0];
    }
    final String keyPrefix = options.getOptional(KEY_FIELDS_PREFIX).orElse("");
    final List<String> keyFields = optionalKeyFields.get();
    final List<String> physicalFields = LogicalTypeChecks.getFieldNames(physicalType);
    return keyFields.stream().mapToInt(keyField -> {
        final int pos = physicalFields.indexOf(keyField);
        // check that field name exists
        if (pos < 0) {
            throw new ValidationException(String.format("Could not find the field '%s' in the table schema for usage in the key format. " + "A key field must be a regular, physical column. " + "The following columns can be selected in the '%s' option:\n" + "%s", keyField, KEY_FIELDS.key(), physicalFields));
        }
        // check that field name is prefixed correctly
        if (!keyField.startsWith(keyPrefix)) {
            throw new ValidationException(String.format("All fields in '%s' must be prefixed with '%s' when option '%s' " + "is set but field '%s' is not prefixed.", KEY_FIELDS.key(), keyPrefix, KEY_FIELDS_PREFIX.key(), keyField));
        }
        return pos;
    }).toArray();
}
Also used : DynamicTableFactory(org.apache.flink.table.factories.DynamicTableFactory) IntStream(java.util.stream.IntStream) DeliveryGuarantee(org.apache.flink.connector.base.DeliveryGuarantee) DataType(org.apache.flink.table.types.DataType) FlinkException(org.apache.flink.util.FlinkException) ConfigOptions(org.apache.flink.configuration.ConfigOptions) Arrays(java.util.Arrays) SCAN_STARTUP_TIMESTAMP_MILLIS(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SCAN_STARTUP_TIMESTAMP_MILLIS) KEY_FORMAT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FORMAT) TOPIC(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.TOPIC) TRANSACTIONAL_ID_PREFIX(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.TRANSACTIONAL_ID_PREFIX) DELIVERY_GUARANTEE(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.DELIVERY_GUARANTEE) TOPIC_PATTERN(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.TOPIC_PATTERN) HashMap(java.util.HashMap) VALUE_FIELDS_INCLUDE(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.VALUE_FIELDS_INCLUDE) InstantiationUtil(org.apache.flink.util.InstantiationUtil) ReadableConfig(org.apache.flink.configuration.ReadableConfig) Map(java.util.Map) ConfigOption(org.apache.flink.configuration.ConfigOption) FORMAT(org.apache.flink.table.factories.FactoryUtil.FORMAT) FlinkFixedPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkFixedPartitioner) SCAN_STARTUP_MODE(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SCAN_STARTUP_MODE) RowData(org.apache.flink.table.data.RowData) Properties(java.util.Properties) KafkaTopicPartition(org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition) ValueFieldsStrategy(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.ValueFieldsStrategy) Configuration(org.apache.flink.configuration.Configuration) TableException(org.apache.flink.table.api.TableException) VALUE_FORMAT(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.VALUE_FORMAT) ScanStartupMode(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.ScanStartupMode) Preconditions(org.apache.flink.util.Preconditions) FlinkKafkaPartitioner(org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner) StartupMode(org.apache.flink.streaming.connectors.kafka.config.StartupMode) List(java.util.List) SINK_PARTITIONER(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SINK_PARTITIONER) FactoryUtil(org.apache.flink.table.factories.FactoryUtil) LogicalType(org.apache.flink.table.types.logical.LogicalType) ValidationException(org.apache.flink.table.api.ValidationException) Optional(java.util.Optional) SCAN_STARTUP_SPECIFIC_OFFSETS(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.SCAN_STARTUP_SPECIFIC_OFFSETS) Internal(org.apache.flink.annotation.Internal) Pattern(java.util.regex.Pattern) KEY_FIELDS_PREFIX(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FIELDS_PREFIX) LogicalTypeRoot(org.apache.flink.table.types.logical.LogicalTypeRoot) LogicalTypeChecks(org.apache.flink.table.types.logical.utils.LogicalTypeChecks) KEY_FIELDS(org.apache.flink.streaming.connectors.kafka.table.KafkaConnectorOptions.KEY_FIELDS) ValidationException(org.apache.flink.table.api.ValidationException) LogicalType(org.apache.flink.table.types.logical.LogicalType) List(java.util.List)

Aggregations

ValidationException (org.apache.flink.table.api.ValidationException)143 DataType (org.apache.flink.table.types.DataType)25 Test (org.junit.Test)23 HashMap (java.util.HashMap)21 ObjectIdentifier (org.apache.flink.table.catalog.ObjectIdentifier)19 LogicalType (org.apache.flink.table.types.logical.LogicalType)18 TableException (org.apache.flink.table.api.TableException)17 List (java.util.List)14 CatalogBaseTable (org.apache.flink.table.catalog.CatalogBaseTable)14 QueryOperation (org.apache.flink.table.operations.QueryOperation)14 LinkedHashMap (java.util.LinkedHashMap)13 DescriptorProperties (org.apache.flink.table.descriptors.DescriptorProperties)13 CatalogTable (org.apache.flink.table.catalog.CatalogTable)12 Expression (org.apache.flink.table.expressions.Expression)12 TableSchema (org.apache.flink.table.api.TableSchema)11 Catalog (org.apache.flink.table.catalog.Catalog)11 ContextResolvedTable (org.apache.flink.table.catalog.ContextResolvedTable)11 ArrayList (java.util.ArrayList)10 Map (java.util.Map)10 Internal (org.apache.flink.annotation.Internal)10