use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class CatalogStatisticsTest method createPartitionStats.
private void createPartitionStats(String part1, int part2, long rowCount) throws Exception {
ObjectPath path = ObjectPath.fromString("default_database.PartT");
LinkedHashMap<String, String> partSpecMap = new LinkedHashMap<>();
partSpecMap.put("part1", part1);
partSpecMap.put("part2", String.valueOf(part2));
CatalogPartitionSpec partSpec = new CatalogPartitionSpec(partSpecMap);
catalog.createPartition(path, partSpec, new CatalogPartitionImpl(new HashMap<>(), ""), true);
catalog.alterPartitionStatistics(path, partSpec, new CatalogTableStatistics(rowCount, 10, 1000L, 2000L), true);
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class CatalogStatisticsTest method createPartitionColumnStats.
private void createPartitionColumnStats(String part1, int part2, boolean unknown) throws Exception {
ObjectPath path = ObjectPath.fromString("default_database.PartT");
LinkedHashMap<String, String> partSpecMap = new LinkedHashMap<>();
partSpecMap.put("part1", part1);
partSpecMap.put("part2", String.valueOf(part2));
CatalogPartitionSpec partSpec = new CatalogPartitionSpec(partSpecMap);
CatalogColumnStatisticsDataLong longColStats = new CatalogColumnStatisticsDataLong(-123L, 763322L, 23L, 77L);
CatalogColumnStatisticsDataString stringColStats = new CatalogColumnStatisticsDataString(152L, 43.5D, 20L, 0L);
Map<String, CatalogColumnStatisticsDataBase> colStatsMap = new HashMap<>();
colStatsMap.put("id", unknown ? new CatalogColumnStatisticsDataLong(null, null, null, null) : longColStats);
colStatsMap.put("name", unknown ? new CatalogColumnStatisticsDataString(null, null, null, null) : stringColStats);
catalog.alterPartitionColumnStatistics(path, partSpec, new CatalogColumnStatistics(colStatsMap), true);
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class HiveCatalog method alterTableViaProperties.
private void alterTableViaProperties(AlterTableOp alterOp, Table hiveTable, CatalogTable catalogTable, Map<String, String> oldProps, Map<String, String> newProps, StorageDescriptor sd) {
switch(alterOp) {
case CHANGE_TBL_PROPS:
oldProps.putAll(newProps);
break;
case CHANGE_LOCATION:
HiveTableUtil.extractLocation(sd, newProps);
break;
case CHANGE_FILE_FORMAT:
String newFileFormat = newProps.remove(STORED_AS_FILE_FORMAT);
HiveTableUtil.setStorageFormat(sd, newFileFormat, hiveConf);
break;
case CHANGE_SERDE_PROPS:
HiveTableUtil.extractRowFormat(sd, newProps);
break;
case ALTER_COLUMNS:
if (hiveTable == null) {
throw new CatalogException("ALTER COLUMNS cannot be done with ALTER PARTITION");
}
HiveTableUtil.alterColumns(hiveTable.getSd(), catalogTable);
boolean cascade = Boolean.parseBoolean(newProps.remove(ALTER_COL_CASCADE));
if (cascade) {
if (!isTablePartitioned(hiveTable)) {
throw new CatalogException("ALTER COLUMNS CASCADE for non-partitioned table");
}
try {
for (CatalogPartitionSpec spec : listPartitions(new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()))) {
Partition partition = getHivePartition(hiveTable, spec);
HiveTableUtil.alterColumns(partition.getSd(), catalogTable);
client.alter_partition(hiveTable.getDbName(), hiveTable.getTableName(), partition);
}
} catch (Exception e) {
throw new CatalogException("Failed to cascade add/replace columns to partitions", e);
}
}
break;
default:
throw new CatalogException("Unsupported alter table operation " + alterOp);
}
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class HiveCatalog method listPartitionsByFilter.
@Override
public List<CatalogPartitionSpec> listPartitionsByFilter(ObjectPath tablePath, List<Expression> expressions) throws TableNotExistException, TableNotPartitionedException, CatalogException {
Table hiveTable = getHiveTable(tablePath);
ensurePartitionedTable(tablePath, hiveTable);
List<String> partColNames = getFieldNames(hiveTable.getPartitionKeys());
Optional<String> filter = HiveTableUtil.makePartitionFilter(HiveTableUtil.getNonPartitionFields(hiveConf, hiveTable, hiveShim).size(), partColNames, expressions, hiveShim);
if (!filter.isPresent()) {
throw new UnsupportedOperationException("HiveCatalog is unable to handle the partition filter expressions: " + expressions);
}
try {
PartitionSpecProxy partitionSpec = client.listPartitionSpecsByFilter(tablePath.getDatabaseName(), tablePath.getObjectName(), filter.get(), (short) -1);
List<CatalogPartitionSpec> res = new ArrayList<>(partitionSpec.size());
PartitionSpecProxy.PartitionIterator partitions = partitionSpec.getPartitionIterator();
while (partitions.hasNext()) {
Partition partition = partitions.next();
Map<String, String> spec = new HashMap<>();
for (int i = 0; i < partColNames.size(); i++) {
spec.put(partColNames.get(i), partition.getValues().get(i));
}
res.add(new CatalogPartitionSpec(spec));
}
return res;
} catch (TException e) {
throw new UnsupportedOperationException("Failed to list partition by filter from HMS, filter expressions: " + expressions, e);
}
}
use of org.apache.flink.table.catalog.CatalogPartitionSpec in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableDropParts.
private Operation convertAlterTableDropParts(String[] qualified, HiveParserASTNode ast) {
boolean ifExists = ast.getFirstChildWithType(HiveASTParser.TOK_IFEXISTS) != null;
// If the drop has to fail on non-existent partitions, we cannot batch expressions.
// That is because we actually have to check each separate expression for existence.
// We could do a small optimization for the case where expr has all columns and all
// operators are equality, if we assume those would always match one partition (which
// may not be true with legacy, non-normalized column values). This is probably a
// popular case but that's kinda hacky. Let's not do it for now.
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
// hive represents drop partition specs with generic func desc, but what we need is just
// spec maps
List<Map<String, String>> partSpecs = new ArrayList<>();
for (int i = 0; i < ast.getChildCount(); i++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(i);
if (child.getType() == HiveASTParser.TOK_PARTSPEC) {
partSpecs.add(getPartSpec(child));
}
}
validateAlterTableType(tab);
ObjectIdentifier tableIdentifier = catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(qualified[0], qualified[1]));
List<CatalogPartitionSpec> specs = partSpecs.stream().map(CatalogPartitionSpec::new).collect(Collectors.toList());
return new DropPartitionsOperation(tableIdentifier, ifExists, specs);
}
Aggregations