use of org.apache.flink.table.catalog.CatalogPartition in project flink by apache.
the class SqlToOperationConverter method convertAlterTableOptions.
private Operation convertAlterTableOptions(ObjectIdentifier tableIdentifier, CatalogTable oldTable, SqlAlterTableOptions alterTableOptions) {
LinkedHashMap<String, String> partitionKVs = alterTableOptions.getPartitionKVs();
// it's altering partitions
if (partitionKVs != null) {
CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(partitionKVs);
CatalogPartition catalogPartition = catalogManager.getPartition(tableIdentifier, partitionSpec).orElseThrow(() -> new ValidationException(String.format("Partition %s of table %s doesn't exist", partitionSpec.getPartitionSpec(), tableIdentifier)));
Map<String, String> newProps = new HashMap<>(catalogPartition.getProperties());
newProps.putAll(OperationConverterUtils.extractProperties(alterTableOptions.getPropertyList()));
return new AlterPartitionPropertiesOperation(tableIdentifier, partitionSpec, new CatalogPartitionImpl(newProps, catalogPartition.getComment()));
} else {
// it's altering a table
Map<String, String> newOptions = new HashMap<>(oldTable.getOptions());
newOptions.putAll(OperationConverterUtils.extractProperties(alterTableOptions.getPropertyList()));
return new AlterTableOptionsOperation(tableIdentifier, oldTable.copy(newOptions));
}
}
use of org.apache.flink.table.catalog.CatalogPartition in project flink by apache.
the class HiveCatalog method alterPartition.
@Override
public void alterPartition(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, CatalogPartition newPartition, boolean ignoreIfNotExists) throws PartitionNotExistException, CatalogException {
checkNotNull(tablePath, "Table path cannot be null");
checkNotNull(partitionSpec, "CatalogPartitionSpec cannot be null");
checkNotNull(newPartition, "New partition cannot be null");
// the target doesn't exist
try {
Table hiveTable = getHiveTable(tablePath);
boolean isHiveTable = isHiveTable(hiveTable.getParameters());
if (!isHiveTable) {
throw new CatalogException("Currently only supports partition for hive tables");
}
Partition hivePartition = getHivePartition(hiveTable, partitionSpec);
if (hivePartition == null) {
if (ignoreIfNotExists) {
return;
}
throw new PartitionNotExistException(getName(), tablePath, partitionSpec);
}
AlterTableOp op = HiveTableUtil.extractAlterTableOp(newPartition.getProperties());
if (op == null) {
throw new CatalogException(ALTER_TABLE_OP + " is missing for alter table operation");
}
alterTableViaProperties(op, null, null, hivePartition.getParameters(), newPartition.getProperties(), hivePartition.getSd());
client.alter_partition(tablePath.getDatabaseName(), tablePath.getObjectName(), hivePartition);
} catch (NoSuchObjectException e) {
if (!ignoreIfNotExists) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e);
}
} catch (InvalidOperationException | MetaException | TableNotExistException | PartitionSpecInvalidException e) {
throw new PartitionNotExistException(getName(), tablePath, partitionSpec, e);
} catch (TException e) {
throw new CatalogException(String.format("Failed to alter existing partition with new partition %s of table %s", partitionSpec, tablePath), e);
}
}
use of org.apache.flink.table.catalog.CatalogPartition in project flink by apache.
the class HiveParserDDLSemanticAnalyzer method convertAlterTableAddParts.
/**
* Add one or more partitions to a table. Useful when the data has been copied to the right
* location by some other process.
*/
private Operation convertAlterTableAddParts(String[] qualified, CommonTree ast) {
// ^(TOK_ALTERTABLE_ADDPARTS identifier ifNotExists?
// alterStatementSuffixAddPartitionsElement+)
boolean ifNotExists = ast.getChild(0).getType() == HiveASTParser.TOK_IFNOTEXISTS;
Table tab = getTable(new ObjectPath(qualified[0], qualified[1]));
boolean isView = tab.isView();
validateAlterTableType(tab);
int numCh = ast.getChildCount();
int start = ifNotExists ? 1 : 0;
String currentLocation = null;
Map<String, String> currentPartSpec = null;
// Parser has done some verification, so the order of tokens doesn't need to be verified
// here.
List<CatalogPartitionSpec> specs = new ArrayList<>();
List<CatalogPartition> partitions = new ArrayList<>();
for (int num = start; num < numCh; num++) {
HiveParserASTNode child = (HiveParserASTNode) ast.getChild(num);
switch(child.getToken().getType()) {
case HiveASTParser.TOK_PARTSPEC:
if (currentPartSpec != null) {
specs.add(new CatalogPartitionSpec(currentPartSpec));
Map<String, String> props = new HashMap<>();
if (currentLocation != null) {
props.put(TABLE_LOCATION_URI, currentLocation);
}
partitions.add(new CatalogPartitionImpl(props, null));
currentLocation = null;
}
currentPartSpec = getPartSpec(child);
// validate reserved values
validatePartitionValues(currentPartSpec);
break;
case HiveASTParser.TOK_PARTITIONLOCATION:
// if location specified, set in partition
if (isView) {
throw new ValidationException("LOCATION clause illegal for view partition");
}
currentLocation = HiveParserBaseSemanticAnalyzer.unescapeSQLString(child.getChild(0).getText());
break;
default:
throw new ValidationException("Unknown child: " + child);
}
}
// add the last one
if (currentPartSpec != null) {
specs.add(new CatalogPartitionSpec(currentPartSpec));
Map<String, String> props = new HashMap<>();
if (currentLocation != null) {
props.put(TABLE_LOCATION_URI, currentLocation);
}
partitions.add(new CatalogPartitionImpl(props, null));
}
ObjectIdentifier tableIdentifier = tab.getDbName() == null ? parseObjectIdentifier(tab.getTableName()) : catalogManager.qualifyIdentifier(UnresolvedIdentifier.of(tab.getDbName(), tab.getTableName()));
return new AddPartitionsOperation(tableIdentifier, ifNotExists, specs, partitions);
}
Aggregations