use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogHiveMetadataTest method testCreateTableWithConstraints.
@Test
public void testCreateTableWithConstraints() throws Exception {
Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER);
HiveCatalog hiveCatalog = (HiveCatalog) catalog;
hiveCatalog.createDatabase(db1, createDb(), false);
TableSchema.Builder builder = TableSchema.builder();
builder.fields(new String[] { "x", "y", "z" }, new DataType[] { DataTypes.INT().notNull(), DataTypes.TIMESTAMP(9).notNull(), DataTypes.BIGINT() });
builder.primaryKey("pk_name", new String[] { "x" });
hiveCatalog.createTable(path1, new CatalogTableImpl(builder.build(), getBatchTableProperties(), null), false);
CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(path1);
assertTrue("PK not present", catalogTable.getSchema().getPrimaryKey().isPresent());
UniqueConstraint pk = catalogTable.getSchema().getPrimaryKey().get();
assertEquals("pk_name", pk.getName());
assertEquals(Collections.singletonList("x"), pk.getColumns());
assertFalse(catalogTable.getSchema().getFieldDataTypes()[0].getLogicalType().isNullable());
assertFalse(catalogTable.getSchema().getFieldDataTypes()[1].getLogicalType().isNullable());
assertTrue(catalogTable.getSchema().getFieldDataTypes()[2].getLogicalType().isNullable());
hiveCatalog.dropDatabase(db1, false, true);
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogHiveMetadataTest method testAlterPartitionColumnStatistics.
@Test
public void testAlterPartitionColumnStatistics() throws Exception {
catalog.createDatabase(db1, createDb(), false);
CatalogTable catalogTable = createPartitionedTable();
catalog.createTable(path1, catalogTable, false);
CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(new HashMap<String, String>() {
{
put("second", "2010-04-21 09:45:00");
put("third", "2000");
}
});
catalog.createPartition(path1, partitionSpec, createPartition(), true);
Map<String, CatalogColumnStatisticsDataBase> columnStatisticsDataBaseMap = new HashMap<>();
columnStatisticsDataBaseMap.put("first", new CatalogColumnStatisticsDataString(10L, 5.2, 3L, 100L));
CatalogColumnStatistics catalogColumnStatistics = new CatalogColumnStatistics(columnStatisticsDataBaseMap);
catalog.alterPartitionColumnStatistics(path1, partitionSpec, catalogColumnStatistics, false);
checkEquals(catalogColumnStatistics, catalog.getPartitionColumnStatistics(path1, partitionSpec));
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogHiveMetadataTest method checkStatistics.
private void checkStatistics(int inputStat, int expectStat) throws Exception {
catalog.dropTable(path1, true);
Map<String, String> properties = new HashMap<>();
properties.put(FactoryUtil.CONNECTOR.key(), SqlCreateHiveTable.IDENTIFIER);
properties.put(StatsSetupConst.ROW_COUNT, String.valueOf(inputStat));
properties.put(StatsSetupConst.NUM_FILES, String.valueOf(inputStat));
properties.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(inputStat));
properties.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(inputStat));
CatalogTable catalogTable = new CatalogTableImpl(TableSchema.builder().field("f0", DataTypes.INT()).build(), properties, "");
catalog.createTable(path1, catalogTable, false);
CatalogTableStatistics statistics = catalog.getTableStatistics(path1);
assertEquals(expectStat, statistics.getRowCount());
assertEquals(expectStat, statistics.getFileCount());
assertEquals(expectStat, statistics.getRawDataSize());
assertEquals(expectStat, statistics.getTotalSize());
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveCatalogHiveMetadataTest method testAlterTableColumnStatistics.
@Test
public void testAlterTableColumnStatistics() throws Exception {
String hiveVersion = ((HiveCatalog) catalog).getHiveVersion();
boolean supportDateStats = hiveVersion.compareTo(HiveShimLoader.HIVE_VERSION_V1_2_0) >= 0;
catalog.createDatabase(db1, createDb(), false);
TableSchema.Builder builder = TableSchema.builder().field("first", DataTypes.STRING()).field("second", DataTypes.INT()).field("third", DataTypes.BOOLEAN()).field("fourth", DataTypes.DOUBLE()).field("fifth", DataTypes.BIGINT()).field("sixth", DataTypes.BYTES()).field("seventh", DataTypes.DECIMAL(10, 3)).field("eighth", DataTypes.DECIMAL(30, 3));
if (supportDateStats) {
builder.field("ninth", DataTypes.DATE());
}
TableSchema tableSchema = builder.build();
CatalogTable catalogTable = new CatalogTableImpl(tableSchema, getBatchTableProperties(), TEST_COMMENT);
catalog.createTable(path1, catalogTable, false);
Map<String, CatalogColumnStatisticsDataBase> columnStatisticsDataBaseMap = new HashMap<>();
columnStatisticsDataBaseMap.put("first", new CatalogColumnStatisticsDataString(10L, 5.2, 3L, 100L));
columnStatisticsDataBaseMap.put("second", new CatalogColumnStatisticsDataLong(0L, 1000L, 3L, 0L));
columnStatisticsDataBaseMap.put("third", new CatalogColumnStatisticsDataBoolean(15L, 20L, 3L));
columnStatisticsDataBaseMap.put("fourth", new CatalogColumnStatisticsDataDouble(15.02, 20.01, 3L, 10L));
columnStatisticsDataBaseMap.put("fifth", new CatalogColumnStatisticsDataLong(0L, 20L, 3L, 2L));
columnStatisticsDataBaseMap.put("sixth", new CatalogColumnStatisticsDataBinary(150L, 20D, 3L));
columnStatisticsDataBaseMap.put("seventh", new CatalogColumnStatisticsDataDouble(1.23, 99.456, 100L, 0L));
columnStatisticsDataBaseMap.put("eighth", new CatalogColumnStatisticsDataDouble(0.123, 123456.789, 5723L, 19L));
if (supportDateStats) {
columnStatisticsDataBaseMap.put("ninth", new CatalogColumnStatisticsDataDate(new Date(71L), new Date(17923L), 132L, 0L));
}
CatalogColumnStatistics catalogColumnStatistics = new CatalogColumnStatistics(columnStatisticsDataBaseMap);
catalog.alterTableColumnStatistics(path1, catalogColumnStatistics, false);
checkEquals(catalogColumnStatistics, catalog.getTableColumnStatistics(path1));
}
use of org.apache.flink.table.catalog.CatalogTable in project flink by apache.
the class HiveLookupJoinITCase method getLookupFunction.
private FileSystemLookupFunction<HiveTablePartition> getLookupFunction(String tableName) throws Exception {
TableEnvironmentInternal tableEnvInternal = (TableEnvironmentInternal) tableEnv;
ObjectIdentifier tableIdentifier = ObjectIdentifier.of(hiveCatalog.getName(), "default", tableName);
CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(tableIdentifier.toObjectPath());
HiveLookupTableSource hiveTableSource = (HiveLookupTableSource) FactoryUtil.createDynamicTableSource((DynamicTableSourceFactory) hiveCatalog.getFactory().orElseThrow(IllegalStateException::new), tableIdentifier, tableEnvInternal.getCatalogManager().resolveCatalogTable(catalogTable), tableEnv.getConfig().getConfiguration(), Thread.currentThread().getContextClassLoader(), false);
FileSystemLookupFunction<HiveTablePartition> lookupFunction = (FileSystemLookupFunction<HiveTablePartition>) hiveTableSource.getLookupFunction(new int[][] { { 0 } });
return lookupFunction;
}
Aggregations