use of org.apache.flink.table.types.AbstractDataType in project flink by apache.
the class HiveCatalogHiveMetadataTest method testViewCompatibility.
// ------ table and column stats ------
@Test
public void testViewCompatibility() throws Exception {
// we always store view schema via properties now
// make sure non-generic views created previously can still be used
catalog.createDatabase(db1, createDb(), false);
Table hiveView = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(path1.getDatabaseName(), path1.getObjectName());
// mark as a view
hiveView.setTableType(TableType.VIRTUAL_VIEW.name());
final String originQuery = "view origin query";
final String expandedQuery = "view expanded query";
hiveView.setViewOriginalText(originQuery);
hiveView.setViewExpandedText(expandedQuery);
// set schema in SD
Schema schema = Schema.newBuilder().fromFields(new String[] { "i", "s" }, new AbstractDataType[] { DataTypes.INT(), DataTypes.STRING() }).build();
List<FieldSchema> fields = new ArrayList<>();
for (Schema.UnresolvedColumn column : schema.getColumns()) {
String name = column.getName();
DataType type = (DataType) ((Schema.UnresolvedPhysicalColumn) column).getDataType();
fields.add(new FieldSchema(name, HiveTypeUtil.toHiveTypeInfo(type, true).getTypeName(), null));
}
hiveView.getSd().setCols(fields);
// test mark as non-generic with is_generic
hiveView.getParameters().put(CatalogPropertiesUtil.IS_GENERIC, "false");
// add some other properties
hiveView.getParameters().put("k1", "v1");
((HiveCatalog) catalog).client.createTable(hiveView);
CatalogBaseTable baseTable = catalog.getTable(path1);
assertTrue(baseTable instanceof CatalogView);
CatalogView catalogView = (CatalogView) baseTable;
assertEquals(schema, catalogView.getUnresolvedSchema());
assertEquals(originQuery, catalogView.getOriginalQuery());
assertEquals(expandedQuery, catalogView.getExpandedQuery());
assertEquals("v1", catalogView.getOptions().get("k1"));
// test mark as non-generic with connector
hiveView.setDbName(path3.getDatabaseName());
hiveView.setTableName(path3.getObjectName());
hiveView.getParameters().remove(CatalogPropertiesUtil.IS_GENERIC);
hiveView.getParameters().put(CONNECTOR.key(), IDENTIFIER);
((HiveCatalog) catalog).client.createTable(hiveView);
baseTable = catalog.getTable(path3);
assertTrue(baseTable instanceof CatalogView);
catalogView = (CatalogView) baseTable;
assertEquals(schema, catalogView.getUnresolvedSchema());
assertEquals(originQuery, catalogView.getOriginalQuery());
assertEquals(expandedQuery, catalogView.getExpandedQuery());
assertEquals("v1", catalogView.getOptions().get("k1"));
}
use of org.apache.flink.table.types.AbstractDataType in project flink by apache.
the class SchemaTranslator method createProducingResult.
/**
* Converts the given {@link DataType} into the final {@link ProducingResult}.
*
* <p>This method serves one type of use case:
*
* <ul>
* <li>1. Derive physical columns from the input data type.
* </ul>
*/
public static ProducingResult createProducingResult(DataTypeFactory dataTypeFactory, ResolvedSchema inputSchema, AbstractDataType<?> targetDataType) {
final List<String> inputFieldNames = inputSchema.getColumnNames();
final List<String> inputFieldNamesNormalized = inputFieldNames.stream().map(n -> n.toLowerCase(Locale.ROOT)).collect(Collectors.toList());
final DataType resolvedDataType = dataTypeFactory.createDataType(targetDataType);
final List<String> targetFieldNames = flattenToNames(resolvedDataType);
final List<String> targetFieldNamesNormalized = targetFieldNames.stream().map(n -> n.toLowerCase(Locale.ROOT)).collect(Collectors.toList());
final List<DataType> targetFieldDataTypes = flattenToDataTypes(resolvedDataType);
// help in reorder fields for POJOs if all field names are present but out of order,
// otherwise let the sink validation fail later
List<String> projections = null;
if (targetFieldNames.size() == inputFieldNames.size()) {
// reordering by name (case-sensitive)
if (targetFieldNames.containsAll(inputFieldNames)) {
projections = targetFieldNames;
} else // reordering by name (case-insensitive) but fields must be unique
if (targetFieldNamesNormalized.containsAll(inputFieldNamesNormalized) && targetFieldNamesNormalized.stream().distinct().count() == targetFieldNames.size() && inputFieldNamesNormalized.stream().distinct().count() == inputFieldNames.size()) {
projections = targetFieldNamesNormalized.stream().map(targetName -> {
final int inputFieldPos = inputFieldNamesNormalized.indexOf(targetName);
return inputFieldNames.get(inputFieldPos);
}).collect(Collectors.toList());
}
}
final Schema schema = Schema.newBuilder().fromFields(targetFieldNames, targetFieldDataTypes).build();
return new ProducingResult(projections, schema, resolvedDataType);
}
Aggregations