use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableProps.
private Operation convertAlterTableProps(CatalogBaseTable oldBaseTable, String tableName, Map<String, String> partSpec, Map<String, String> newProps) {
ObjectIdentifier tableIdentifier = parseObjectIdentifier(tableName);
CatalogTable oldTable = (CatalogTable) oldBaseTable;
CatalogPartitionSpec catalogPartitionSpec = partSpec != null ? new CatalogPartitionSpec(partSpec) : null;
CatalogPartition catalogPartition = partSpec != null ? getPartition(tableIdentifier, catalogPartitionSpec) : null;
Map<String, String> props = new HashMap<>();
if (catalogPartition != null) {
props.putAll(catalogPartition.getProperties());
props.putAll(newProps);
return new AlterPartitionPropertiesOperation(tableIdentifier, catalogPartitionSpec, new CatalogPartitionImpl(props, catalogPartition.getComment()));
} else {
props.putAll(oldTable.getOptions());
props.putAll(newProps);
return new AlterTableOptionsOperation(tableIdentifier, oldTable.copy(props));
}
}
use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.
the class HiveCatalog method getPartition.
@Override
public CatalogPartition getPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec) throws PartitionNotExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be null");
try {
Partition hivePartition = getHivePartition(tablePath, partitionSpec);
Map<String, String> properties = hivePartition.getParameters();
properties.put(SqlCreateHiveTable.TABLE_LOCATION_URI, hivePartition.getSd().getLocation());
String comment = properties.remove(HiveCatalogConfig.COMMENT);
return new CatalogPartitionImpl(properties, comment);
} catch (NoSuchObjectException | MetaException | TableNotExistException | PartitionSpecInvalidException e) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e);
} catch (TException e) {
throw new CatalogException(String.format("Failed to get partition %s of table %s", partitionSpec, tablePath), e);
}
}
use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.
the class PushPartitionIntoTableSourceScanRuleTest method setup.
@Override
public void setup() throws Exception {
util().buildBatchProgram(FlinkBatchProgram.DEFAULT_REWRITE());
CalciteConfig calciteConfig = TableConfigUtils.getCalciteConfig(util().tableEnv().getConfig());
calciteConfig.getBatchProgram().get().addLast("rules", FlinkHepRuleSetProgramBuilder.<BatchOptimizeContext>newBuilder().setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE()).setHepMatchOrder(HepMatchOrder.BOTTOM_UP).add(RuleSets.ofList(CoreRules.FILTER_PROJECT_TRANSPOSE, PushPartitionIntoTableSourceScanRule.INSTANCE)).build());
// define ddl
String ddlTemp = "CREATE TABLE MyTable (\n" + " id int,\n" + " name string,\n" + " part1 string,\n" + " part2 int)\n" + " partitioned by (part1, part2)\n" + " WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true',\n" + " 'partition-list' = '%s'" + ")";
String ddlTempWithVirtualColumn = "CREATE TABLE VirtualTable (\n" + " id int,\n" + " name string,\n" + " part1 string,\n" + " part2 int,\n" + " virtualField AS part2 + 1)\n" + " partitioned by (part1, part2)\n" + " WITH (\n" + " 'connector' = 'values',\n" + " 'bounded' = 'true',\n" + " 'partition-list' = '%s'" + ")";
if (sourceFetchPartitions()) {
String partitionString = "part1:A,part2:1;part1:A,part2:2;part1:B,part2:3;part1:C,part2:1";
util().tableEnv().executeSql(String.format(ddlTemp, partitionString));
util().tableEnv().executeSql(String.format(ddlTempWithVirtualColumn, partitionString));
} else {
TestValuesCatalog catalog = new TestValuesCatalog("test_catalog", "test_database", useCatalogFilter());
util().tableEnv().registerCatalog("test_catalog", catalog);
util().tableEnv().useCatalog("test_catalog");
// register table without partitions
util().tableEnv().executeSql(String.format(ddlTemp, ""));
util().tableEnv().executeSql(String.format(ddlTempWithVirtualColumn, ""));
ObjectPath mytablePath = ObjectPath.fromString("test_database.MyTable");
ObjectPath virtualTablePath = ObjectPath.fromString("test_database.VirtualTable");
// partition map
List<Map<String, String>> partitions = Arrays.asList(new HashMap<String, String>() {
{
put("part1", "A");
put("part2", "1");
}
}, new HashMap<String, String>() {
{
put("part1", "A");
put("part2", "2");
}
}, new HashMap<String, String>() {
{
put("part1", "B");
put("part2", "3");
}
}, new HashMap<String, String>() {
{
put("part1", "C");
put("part2", "1");
}
});
for (Map<String, String> partition : partitions) {
CatalogPartitionSpec catalogPartitionSpec = new CatalogPartitionSpec(partition);
CatalogPartition catalogPartition = new CatalogPartitionImpl(new HashMap<>(), "");
catalog.createPartition(mytablePath, catalogPartitionSpec, catalogPartition, true);
catalog.createPartition(virtualTablePath, catalogPartitionSpec, catalogPartition, true);
}
}
}
use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.
the class SqlToOperationConverter method convertAlterTableOptions.
private Operation convertAlterTableOptions(ObjectIdentifier tableIdentifier, CatalogTable oldTable, SqlAlterTableOptions alterTableOptions) {
LinkedHashMap<String, String> partitionKVs = alterTableOptions.getPartitionKVs();
// it's altering partitions
if (partitionKVs != null) {
CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(partitionKVs);
CatalogPartition catalogPartition = catalogManager.getPartition(tableIdentifier, partitionSpec).orElseThrow(() -> new ValidationException(String.format("Partition %s of table %s doesn't exist", partitionSpec.getPartitionSpec(), tableIdentifier)));
Map<String, String> newProps = new HashMap<>(catalogPartition.getProperties());
newProps.putAll(OperationConverterUtils.extractProperties(alterTableOptions.getPropertyList()));
return new AlterPartitionPropertiesOperation(tableIdentifier, partitionSpec, new CatalogPartitionImpl(newProps, catalogPartition.getComment()));
} else {
// it's altering a table
Map<String, String> newOptions = new HashMap<>(oldTable.getOptions());
newOptions.putAll(OperationConverterUtils.extractProperties(alterTableOptions.getPropertyList()));
return new AlterTableOptionsOperation(tableIdentifier, oldTable.copy(newOptions));
}
}
use of org.apache.flink.table.catalog.CatalogPartitionImpl in project flink by apache.
the class CatalogStatisticsTest method createPartitionStats.
private void createPartitionStats(String part1, int part2, long rowCount) throws Exception {
ObjectPath path = ObjectPath.fromString("default_database.PartT");
LinkedHashMap<String, String> partSpecMap = new LinkedHashMap<>();
partSpecMap.put("part1", part1);
partSpecMap.put("part2", String.valueOf(part2));
CatalogPartitionSpec partSpec = new CatalogPartitionSpec(partSpecMap);
catalog.createPartition(path, partSpec, new CatalogPartitionImpl(new HashMap<>(), ""), true);
catalog.alterPartitionStatistics(path, partSpec, new CatalogTableStatistics(rowCount, 10, 1000L, 2000L), true);
}
Aggregations