use of org.apache.flink.table.types.DataType in project flink by apache.
the class HiveCatalogHiveMetadataTest method testViewCompatibility.
// ------ table and column stats ------
@Test
public void testViewCompatibility() throws Exception {
// we always store view schema via properties now
// make sure non-generic views created previously can still be used
catalog.createDatabase(db1, createDb(), false);
Table hiveView = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(path1.getDatabaseName(), path1.getObjectName());
// mark as a view
hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
final String originQuery = "view origin query";
final String expandedQuery = "view expanded query";
hiveView.setViewOriginalText(originQuery);
hiveView.setViewExpandedText(expandedQuery);
// set schema in SD
Schema schema = Schema.newBuilder().fromFields(new String[] { "i", "s" }, new AbstractDataType[] { DataTypes.INT(), DataTypes.STRING() }).build();
List<FieldSchema> fields = new ArrayList<>();
for (Schema.UnresolvedColumn column : schema.getColumns()) {
String name = column.getName();
DataType type = (DataType) ((Schema.UnresolvedPhysicalColumn) column).getDataType();
fields.add(new FieldSchema(name, HiveTypeUtil.toHiveTypeInfo(type, true).getTypeName(), null));
}
hiveView.getSd().setCols(fields);
// test mark as non-generic with is_generic
hiveView.getParameters().put(CatalogPropertiesUtil.IS_GENERIC, "false");
// add some other properties
hiveView.getParameters().put("k1", "v1");
((HiveCatalog) catalog).client.createTable(hiveView);
CatalogBaseTable baseTable = catalog.getTable(path1);
assertTrue(baseTable instanceof CatalogView);
CatalogView catalogView = (CatalogView) baseTable;
assertEquals(schema, catalogView.getUnresolvedSchema());
assertEquals(originQuery, catalogView.getOriginalQuery());
assertEquals(expandedQuery, catalogView.getExpandedQuery());
assertEquals("v1", catalogView.getOptions().get("k1"));
// test mark as non-generic with connector
hiveView.setDbName(path3.getDatabaseName());
hiveView.setTableName(path3.getObjectName());
hiveView.getParameters().remove(CatalogPropertiesUtil.IS_GENERIC);
hiveView.getParameters().put(CONNECTOR.key(), IDENTIFIER);
((HiveCatalog) catalog).client.createTable(hiveView);
baseTable = catalog.getTable(path3);
assertTrue(baseTable instanceof CatalogView);
catalogView = (CatalogView) baseTable;
assertEquals(schema, catalogView.getUnresolvedSchema());
assertEquals(originQuery, catalogView.getOriginalQuery());
assertEquals(expandedQuery, catalogView.getExpandedQuery());
assertEquals("v1", catalogView.getOptions().get("k1"));
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class HiveGenericUDFTest method testStruct.
@Test
public void testStruct() {
HiveGenericUDF udf = init(GenericUDFStruct.class, new Object[] { null, null, null }, new DataType[] { DataTypes.INT(), DataTypes.CHAR(2), DataTypes.VARCHAR(10) });
Row result = (Row) udf.eval(1, "222", "3");
assertEquals(Row.of(1, "22", "3"), result);
udf = init(TestGenericUDFStructSize.class, new Object[] { null }, new DataType[] { DataTypes.ROW(DataTypes.FIELD("1", DataTypes.INT()), DataTypes.FIELD("2", DataTypes.CHAR(2)), DataTypes.FIELD("3", DataTypes.VARCHAR(10))) });
assertEquals(3, udf.eval(result));
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class KinesisDynamicTableFactoryTest method testGoodTableSinkCopyForPartitionedTable.
@Test
public void testGoodTableSinkCopyForPartitionedTable() {
ResolvedSchema sinkSchema = defaultSinkSchema();
DataType physicalDataType = sinkSchema.toPhysicalRowDataType();
Map<String, String> sinkOptions = defaultSinkTableOptions().build();
List<String> sinkPartitionKeys = Arrays.asList("name", "curr_id");
// Construct actual DynamicTableSink using FactoryUtil
KinesisDynamicSink actualSink = (KinesisDynamicSink) createTableSink(sinkSchema, sinkPartitionKeys, sinkOptions);
// Construct expected DynamicTableSink using factory under test
KinesisDynamicSink expectedSink = (KinesisDynamicSink) new KinesisDynamicSink.KinesisDynamicTableSinkBuilder().setConsumedDataType(physicalDataType).setStream(STREAM_NAME).setKinesisClientProperties(defaultProducerProperties()).setEncodingFormat(new TestFormatFactory.EncodingFormatMock(",")).setPartitioner(new RowDataFieldsKinesisPartitionKeyGenerator((RowType) physicalDataType.getLogicalType(), sinkPartitionKeys)).build();
Assertions.assertThat(actualSink).isEqualTo(expectedSink.copy());
Assertions.assertThat(expectedSink).isNotSameAs(expectedSink.copy());
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class SocketDynamicTableFactory method createDynamicTableSource.
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
// either implement your custom validation logic here ...
// or use the provided helper utility
final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);
// discover a suitable decoding format
final DecodingFormat<DeserializationSchema<RowData>> decodingFormat = helper.discoverDecodingFormat(DeserializationFormatFactory.class, FactoryUtil.FORMAT);
// validate all options
helper.validate();
// get the validated options
final ReadableConfig options = helper.getOptions();
final String hostname = options.get(HOSTNAME);
final int port = options.get(PORT);
final byte byteDelimiter = (byte) (int) options.get(BYTE_DELIMITER);
// derive the produced data type (excluding computed columns) from the catalog table
final DataType producedDataType = context.getCatalogTable().getResolvedSchema().toPhysicalRowDataType();
// create and return dynamic table source
return new SocketDynamicTableSource(hostname, port, byteDelimiter, decodingFormat, producedDataType);
}
use of org.apache.flink.table.types.DataType in project flink by apache.
the class DebeziumJsonSerDeSchemaTest method testDeserializationWithMetadata.
private void testDeserializationWithMetadata(String resourceFile, boolean schemaInclude, Consumer<RowData> testConsumer) throws Exception {
// we only read the first line for keeping the test simple
final String firstLine = readLines(resourceFile).get(0);
final List<ReadableMetadata> requestedMetadata = Arrays.asList(ReadableMetadata.values());
final DataType producedDataType = DataTypeUtils.appendRowFields(PHYSICAL_DATA_TYPE, requestedMetadata.stream().map(m -> DataTypes.FIELD(m.key, m.dataType)).collect(Collectors.toList()));
final DebeziumJsonDeserializationSchema deserializationSchema = new DebeziumJsonDeserializationSchema(PHYSICAL_DATA_TYPE, requestedMetadata, InternalTypeInfo.of(producedDataType.getLogicalType()), schemaInclude, false, TimestampFormat.ISO_8601);
final SimpleCollector collector = new SimpleCollector();
deserializationSchema.deserialize(firstLine.getBytes(StandardCharsets.UTF_8), collector);
assertEquals(1, collector.list.size());
testConsumer.accept(collector.list.get(0));
}
Aggregations