use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class TestValuesTableFactory method validateAndExtractRowtimeIndex.
private static int validateAndExtractRowtimeIndex(CatalogTable sinkTable, boolean dropLateEvent, boolean isInsertOnly) {
if (!dropLateEvent) {
return -1;
} else if (!isInsertOnly) {
throw new ValidationException("Option 'sink.drop-late-event' only works for insert-only sink now.");
}
TableSchema schema = sinkTable.getSchema();
List<WatermarkSpec> watermarkSpecs = schema.getWatermarkSpecs();
if (watermarkSpecs.size() == 0) {
throw new ValidationException("Please define the watermark in the schema that is used to indicate the rowtime column. " + "The sink function will compare the rowtime and the current watermark to determine whether the event is late.");
}
String rowtimeName = watermarkSpecs.get(0).getRowtimeAttribute();
return Arrays.asList(schema.getFieldNames()).indexOf(rowtimeName);
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class OperationConverterUtils method convertChangeColumn.
public static Operation convertChangeColumn(ObjectIdentifier tableIdentifier, SqlChangeColumn changeColumn, CatalogTable catalogTable, SqlValidator sqlValidator) {
String oldName = changeColumn.getOldName().getSimple();
if (catalogTable.getPartitionKeys().indexOf(oldName) >= 0) {
// disallow changing partition columns
throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns");
}
TableSchema oldSchema = catalogTable.getSchema();
boolean first = changeColumn.isFirst();
String after = changeColumn.getAfter() == null ? null : changeColumn.getAfter().getSimple();
TableColumn newTableColumn = toTableColumn(changeColumn.getNewColumn(), sqlValidator);
TableSchema newSchema = changeColumn(oldSchema, oldName, newTableColumn, first, after);
Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions());
newProperties.putAll(extractProperties(changeColumn.getProperties()));
return new AlterTableSchemaOperation(tableIdentifier, new CatalogTableImpl(newSchema, catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment()));
// TODO: handle watermark and constraints
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class HiveCatalogUdfITCase method testFlinkUdf.
@Test
public void testFlinkUdf() throws Exception {
final TableSchema schema = TableSchema.builder().field("name", DataTypes.STRING()).field("age", DataTypes.INT()).build();
final Map<String, String> sourceOptions = new HashMap<>();
sourceOptions.put("connector.type", "filesystem");
sourceOptions.put("connector.path", getClass().getResource("/csv/test.csv").getPath());
sourceOptions.put("format.type", "csv");
CatalogTable source = new CatalogTableImpl(schema, sourceOptions, "Comment.");
hiveCatalog.createTable(new ObjectPath(HiveCatalog.DEFAULT_DB, sourceTableName), source, false);
hiveCatalog.createFunction(new ObjectPath(HiveCatalog.DEFAULT_DB, "myudf"), new CatalogFunctionImpl(TestHiveSimpleUDF.class.getCanonicalName()), false);
hiveCatalog.createFunction(new ObjectPath(HiveCatalog.DEFAULT_DB, "mygenericudf"), new CatalogFunctionImpl(TestHiveGenericUDF.class.getCanonicalName()), false);
hiveCatalog.createFunction(new ObjectPath(HiveCatalog.DEFAULT_DB, "myudtf"), new CatalogFunctionImpl(TestHiveUDTF.class.getCanonicalName()), false);
hiveCatalog.createFunction(new ObjectPath(HiveCatalog.DEFAULT_DB, "myudaf"), new CatalogFunctionImpl(GenericUDAFSum.class.getCanonicalName()), false);
testUdf(true);
testUdf(false);
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class HiveCatalogDataTypeTest method createCatalogTable.
private CatalogTable createCatalogTable(DataType[] types) {
String[] colNames = new String[types.length];
for (int i = 0; i < types.length; i++) {
colNames[i] = String.format("%s_%d", types[i].toString().toLowerCase(), i);
}
TableSchema schema = TableSchema.builder().fields(colNames, types).build();
return new CatalogTableImpl(schema, new HashMap<String, String>() {
{
put("is_streaming", "false");
put(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
}
}, "");
}
use of org.apache.flink.table.api.TableSchema in project flink by apache.
the class HiveTableSource method getProducedTableSchema.
protected TableSchema getProducedTableSchema() {
TableSchema fullSchema = getTableSchema();
if (projectedFields == null) {
return fullSchema;
} else {
String[] fullNames = fullSchema.getFieldNames();
DataType[] fullTypes = fullSchema.getFieldDataTypes();
return TableSchema.builder().fields(Arrays.stream(projectedFields).mapToObj(i -> fullNames[i]).toArray(String[]::new), Arrays.stream(projectedFields).mapToObj(i -> fullTypes[i]).toArray(DataType[]::new)).build();
}
}
Aggregations