use of org.apache.flink.table.descriptors.DescriptorProperties in project flink by apache.
the class HiveTableUtil method instantiateHiveTable.
public static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table, HiveConf hiveConf, boolean managedTable) {
final boolean isView = table instanceof CatalogView;
// let Hive set default parameters for us, e.g. serialization.format
Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(), tablePath.getObjectName());
hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000));
Map<String, String> properties = new HashMap<>(table.getOptions());
if (managedTable) {
properties.put(CONNECTOR.key(), ManagedTableFactory.DEFAULT_IDENTIFIER);
}
// Table comment
if (table.getComment() != null) {
properties.put(HiveCatalogConfig.COMMENT, table.getComment());
}
boolean isHiveTable = HiveCatalog.isHiveTable(properties);
// Hive table's StorageDescriptor
StorageDescriptor sd = hiveTable.getSd();
HiveTableUtil.setDefaultStorageFormat(sd, hiveConf);
// because hive cannot understand the expanded query anyway
if (isHiveTable && !isView) {
HiveTableUtil.initiateTableFromProperties(hiveTable, properties, hiveConf);
List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema());
// Table columns and partition keys
if (table instanceof CatalogTable) {
CatalogTable catalogTable = (CatalogTable) table;
if (catalogTable.isPartitioned()) {
int partitionKeySize = catalogTable.getPartitionKeys().size();
List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize);
List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size());
sd.setCols(regularColumns);
hiveTable.setPartitionKeys(partitionColumns);
} else {
sd.setCols(allColumns);
hiveTable.setPartitionKeys(new ArrayList<>());
}
} else {
sd.setCols(allColumns);
}
// Table properties
hiveTable.getParameters().putAll(properties);
} else {
DescriptorProperties tableSchemaProps = new DescriptorProperties(true);
tableSchemaProps.putTableSchema(Schema.SCHEMA, table.getSchema());
if (table instanceof CatalogTable) {
tableSchemaProps.putPartitionKeys(((CatalogTable) table).getPartitionKeys());
}
properties.putAll(tableSchemaProps.asMap());
properties = maskFlinkProperties(properties);
// 2. when creating views which don't have connector properties
if (isView || (!properties.containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR.key()) && !properties.containsKey(FLINK_PROPERTY_PREFIX + CONNECTOR_TYPE))) {
properties.put(IS_GENERIC, "true");
}
hiveTable.setParameters(properties);
}
if (isView) {
// TODO: [FLINK-12398] Support partitioned view in catalog API
hiveTable.setPartitionKeys(new ArrayList<>());
CatalogView view = (CatalogView) table;
hiveTable.setViewOriginalText(view.getOriginalQuery());
hiveTable.setViewExpandedText(view.getExpandedQuery());
hiveTable.setTableType(TableType.VIRTUAL_VIEW.name());
}
return hiveTable;
}
use of org.apache.flink.table.descriptors.DescriptorProperties in project flink by apache.
the class HiveCatalog method instantiateCatalogTable.
@VisibleForTesting
CatalogBaseTable instantiateCatalogTable(Table hiveTable) {
boolean isView = TableType.valueOf(hiveTable.getTableType()) == TableType.VIRTUAL_VIEW;
// Table properties
Map<String, String> properties = new HashMap<>(hiveTable.getParameters());
boolean isHiveTable = isHiveTable(properties);
TableSchema tableSchema;
// Partition keys
List<String> partitionKeys = new ArrayList<>();
if (isHiveTable) {
// Table schema
tableSchema = HiveTableUtil.createTableSchema(hiveConf, hiveTable, client, hiveShim);
if (!hiveTable.getPartitionKeys().isEmpty()) {
partitionKeys = getFieldNames(hiveTable.getPartitionKeys());
}
} else {
properties = retrieveFlinkProperties(properties);
if (ManagedTableFactory.DEFAULT_IDENTIFIER.equalsIgnoreCase(properties.get(CONNECTOR.key()))) {
// for Flink's managed table, we remove the connector option
properties.remove(CONNECTOR.key());
}
DescriptorProperties tableSchemaProps = new DescriptorProperties(true);
tableSchemaProps.putProperties(properties);
// try to get table schema with both new and old (1.10) key, in order to support tables
// created in old version
tableSchema = tableSchemaProps.getOptionalTableSchema(Schema.SCHEMA).orElseGet(() -> tableSchemaProps.getOptionalTableSchema("generic.table.schema").orElseGet(() -> TableSchema.builder().build()));
partitionKeys = tableSchemaProps.getPartitionKeys();
// remove the schema from properties
properties = CatalogTableImpl.removeRedundant(properties, tableSchema, partitionKeys);
}
String comment = properties.remove(HiveCatalogConfig.COMMENT);
if (isView) {
return new CatalogViewImpl(hiveTable.getViewOriginalText(), hiveTable.getViewExpandedText(), tableSchema, properties, comment);
} else {
return new CatalogTableImpl(tableSchema, partitionKeys, properties, comment);
}
}
use of org.apache.flink.table.descriptors.DescriptorProperties in project flink by apache.
the class CatalogTableImpl method fromProperties.
/**
* Construct a {@link CatalogTableImpl} from complete properties that contains table schema.
*/
public static CatalogTableImpl fromProperties(Map<String, String> properties) {
DescriptorProperties descriptorProperties = new DescriptorProperties(false);
descriptorProperties.putProperties(properties);
TableSchema tableSchema = descriptorProperties.getTableSchema(SCHEMA);
List<String> partitionKeys = descriptorProperties.getPartitionKeys();
return new CatalogTableImpl(tableSchema, partitionKeys, removeRedundant(properties, tableSchema, partitionKeys), "");
}
use of org.apache.flink.table.descriptors.DescriptorProperties in project flink by apache.
the class DataGenTableSourceFactoryTest method testSource.
@Test
public void testSource() throws Exception {
DescriptorProperties descriptor = new DescriptorProperties();
descriptor.putString(FactoryUtil.CONNECTOR.key(), "datagen");
descriptor.putLong(DataGenConnectorOptions.ROWS_PER_SECOND.key(), 100);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f0." + DataGenConnectorOptionsUtil.KIND, DataGenConnectorOptionsUtil.RANDOM);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f0." + DataGenConnectorOptionsUtil.LENGTH, 20);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f1." + DataGenConnectorOptionsUtil.KIND, DataGenConnectorOptionsUtil.RANDOM);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f1." + DataGenConnectorOptionsUtil.MIN, 10);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f1." + DataGenConnectorOptionsUtil.MAX, 100);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f2." + DataGenConnectorOptionsUtil.KIND, DataGenConnectorOptionsUtil.SEQUENCE);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f2." + DataGenConnectorOptionsUtil.START, 50);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f2." + DataGenConnectorOptionsUtil.END, 60);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f3." + DataGenConnectorOptionsUtil.KIND, DataGenConnectorOptionsUtil.RANDOM);
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f3." + DataGenConnectorOptionsUtil.MAX_PAST, "5s");
final long begin = System.currentTimeMillis();
List<RowData> results = runGenerator(SCHEMA, descriptor);
final long end = System.currentTimeMillis();
Assert.assertEquals(11, results.size());
for (int i = 0; i < results.size(); i++) {
RowData row = results.get(i);
Assert.assertEquals(20, row.getString(0).toString().length());
long f1 = row.getLong(1);
Assert.assertTrue(f1 >= 10 && f1 <= 100);
Assert.assertEquals(i + 50, row.getLong(2));
final TimestampData f3 = row.getTimestamp(3, 3);
Assert.assertTrue(f3.getMillisecond() >= begin - 5000 && f3.getMillisecond() <= end);
}
}
use of org.apache.flink.table.descriptors.DescriptorProperties in project flink by apache.
the class DataGenTableSourceFactoryTest method testLackStartForSequence.
@Test
public void testLackStartForSequence() {
try {
DescriptorProperties descriptor = new DescriptorProperties();
descriptor.putString(FactoryUtil.CONNECTOR.key(), "datagen");
descriptor.putString(DataGenConnectorOptionsUtil.FIELDS + ".f0." + DataGenConnectorOptionsUtil.KIND, DataGenConnectorOptionsUtil.SEQUENCE);
descriptor.putLong(DataGenConnectorOptionsUtil.FIELDS + ".f0." + DataGenConnectorOptionsUtil.END, 100);
createTableSource(ResolvedSchema.of(Column.physical("f0", DataTypes.BIGINT())), descriptor.asMap());
} catch (ValidationException e) {
Throwable cause = e.getCause();
Assert.assertTrue(cause.toString(), cause instanceof ValidationException);
Assert.assertTrue(cause.getMessage(), cause.getMessage().contains("Could not find required property 'fields.f0.start' for sequence generator."));
return;
}
Assert.fail("Should fail by ValidationException.");
}
Aggregations