use of org.apache.hadoop.hive.metastore.api.RequestPartsSpec in project metacat by Netflix.
the class CatalogThriftHiveMetastore method drop_partitions_req.
/**
* {@inheritDoc}
*/
@Override
public DropPartitionsResult drop_partitions_req(final DropPartitionsRequest request) throws TException {
return requestWrapper("drop_partitions_req", new Object[] { request }, () -> {
final String databaseName = request.getDbName();
final String tableName = request.getTblName();
final boolean ifExists = request.isSetIfExists() && request.isIfExists();
final boolean needResult = !request.isSetNeedResult() || request.isNeedResult();
final List<Partition> parts = Lists.newArrayList();
final List<String> partNames = Lists.newArrayList();
int minCount = 0;
final RequestPartsSpec spec = request.getParts();
if (spec.isSetExprs()) {
final Table table = get_table(databaseName, tableName);
// Dropping by expressions.
for (DropPartitionsExpr expr : spec.getExprs()) {
// At least one partition per expression, if not ifExists
++minCount;
final PartitionsByExprResult partitionsByExprResult = get_partitions_by_expr(new PartitionsByExprRequest(databaseName, tableName, expr.bufferForExpr()));
if (partitionsByExprResult.isHasUnknownPartitions()) {
// Expr is built by DDLSA, it should only contain part cols and simple ops
throw new MetaException("Unexpected unknown partitions to drop");
}
parts.addAll(partitionsByExprResult.getPartitions());
}
final List<String> colNames = new ArrayList<>(table.getPartitionKeys().size());
for (FieldSchema col : table.getPartitionKeys()) {
colNames.add(col.getName());
}
if (!colNames.isEmpty()) {
parts.forEach(partition -> partNames.add(FileUtils.makePartName(colNames, partition.getValues())));
}
} else if (spec.isSetNames()) {
partNames.addAll(spec.getNames());
minCount = partNames.size();
parts.addAll(get_partitions_by_names(databaseName, tableName, partNames));
} else {
throw new MetaException("Partition spec is not set");
}
if ((parts.size() < minCount) && !ifExists) {
throw new NoSuchObjectException("Some partitions to drop are missing");
}
partV1.deletePartitions(catalogName, databaseName, tableName, partNames);
final DropPartitionsResult result = new DropPartitionsResult();
if (needResult) {
result.setPartitions(parts);
}
return result;
});
}
use of org.apache.hadoop.hive.metastore.api.RequestPartsSpec in project metacat by Netflix.
the class MetacatHiveClient method dropHivePartitions.
private void dropHivePartitions(final HiveMetastoreClient client, final String dbName, final String tableName, final List<String> partitionNames) throws TException {
if (partitionNames != null && !partitionNames.isEmpty()) {
final DropPartitionsRequest request = new DropPartitionsRequest(dbName, tableName, new RequestPartsSpec(RequestPartsSpec._Fields.NAMES, partitionNames));
request.setDeleteData(false);
client.drop_partitions_req(request);
}
}
Aggregations