use of org.apache.hadoop.hive.metastore.PartitionDropOptions in project hive by apache.
the class AlterTableDropPartitionOperation method dropPartitions.
private void dropPartitions(boolean isRepl) throws HiveException {
// ifExists is currently verified in AlterTableDropPartitionAnalyzer
TableName tableName = HiveTableName.of(desc.getTableName());
List<Pair<Integer, byte[]>> partitionExpressions = new ArrayList<>(desc.getPartSpecs().size());
for (AlterTableDropPartitionDesc.PartitionDesc partSpec : desc.getPartSpecs()) {
partitionExpressions.add(Pair.of(partSpec.getPrefixLength(), SerializationUtilities.serializeExpressionToKryo(partSpec.getPartSpec())));
}
PartitionDropOptions options = PartitionDropOptions.instance().deleteData(desc.getDeleteData()).ifExists(true).purgeData(desc.getIfPurge());
List<Partition> droppedPartitions = context.getDb().dropPartitions(tableName.getDb(), tableName.getTable(), partitionExpressions, options);
if (isRepl) {
LOG.info("Dropped {} partitions for replication.", droppedPartitions.size());
// If replaying an event, we need not to bother about the further steps, we can return from here itself.
return;
}
ProactiveEviction.Request.Builder llapEvictRequestBuilder = LlapHiveUtils.isLlapMode(context.getConf()) ? ProactiveEviction.Request.Builder.create() : null;
for (Partition partition : droppedPartitions) {
context.getConsole().printInfo("Dropped the partition " + partition.getName());
// We have already locked the table, don't lock the partitions.
DDLUtils.addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK), context);
if (llapEvictRequestBuilder != null) {
llapEvictRequestBuilder.addPartitionOfATable(tableName.getDb(), tableName.getTable(), partition.getSpec());
}
}
if (llapEvictRequestBuilder != null) {
ProactiveEviction.evict(context.getConf(), llapEvictRequestBuilder.build());
}
}
use of org.apache.hadoop.hive.metastore.PartitionDropOptions in project hive by apache.
the class AlterTableDropPartitionOperation method dropPartitionForReplication.
private void dropPartitionForReplication(Table table, ReplicationSpec replicationSpec) throws HiveException {
/**
* ALTER TABLE DROP PARTITION ... FOR REPLICATION(x) behaves as a DROP PARTITION IF OLDER THAN x
*
* So, we check each partition that matches our DropTableDesc.getPartSpecs(), and drop it only
* if it's older than the event that spawned this replicated request to drop partition
*/
if (table == null) {
// If table is missing, then partitions are also would've been dropped. Just no-op.
return;
}
Map<String, String> dbParams = context.getDb().getDatabase(table.getDbName()).getParameters();
for (AlterTableDropPartitionDesc.PartitionDesc partSpec : desc.getPartSpecs()) {
List<Partition> partitions = new ArrayList<>();
try {
context.getDb().getPartitionsByExpr(table, partSpec.getPartSpec(), context.getConf(), partitions);
// Check if that is a comeback from a checkpoint, if not call normal drop partition.
boolean modifySinglePartition = false;
for (Partition p : partitions) {
if (p != null && !replicationSpec.allowEventReplacementInto(dbParams)) {
modifySinglePartition = true;
break;
}
}
// regular drop partition with all the partitions specified in one go.
if (!modifySinglePartition) {
LOG.info("Replication calling normal drop partitions for regular partition drops {}", partitions);
dropPartitions(true);
} else {
for (Partition p : partitions) {
if (replicationSpec.allowEventReplacementInto(dbParams)) {
PartitionDropOptions options = PartitionDropOptions.instance().deleteData(desc.getDeleteData()).setWriteId(desc.getWriteId());
context.getDb().dropPartition(table.getDbName(), table.getTableName(), p.getValues(), options);
}
}
}
} catch (NoSuchObjectException e) {
// ignore NSOE because that means there's nothing to drop.
} catch (Exception e) {
throw new HiveException(e.getMessage(), e);
}
}
}
use of org.apache.hadoop.hive.metastore.PartitionDropOptions in project hive by apache.
the class TestDropPartitions method testDropPartitionNotDeleteData.
// Tests for dropPartition(String db_name, String tbl_name, List<String> part_vals,
// PartitionDropOptions options) method
@Test
public void testDropPartitionNotDeleteData() throws Exception {
PartitionDropOptions partDropOptions = PartitionDropOptions.instance();
partDropOptions.deleteData(false);
partDropOptions.purgeData(false);
client.dropPartition(DB_NAME, TABLE_NAME, PARTITIONS[0].getValues(), partDropOptions);
List<Partition> droppedPartitions = Lists.newArrayList(PARTITIONS[0]);
List<Partition> remainingPartitions = Lists.newArrayList(PARTITIONS[1], PARTITIONS[2]);
checkPartitionsAfterDelete(TABLE_NAME, droppedPartitions, remainingPartitions, false, false);
}
use of org.apache.hadoop.hive.metastore.PartitionDropOptions in project hive by apache.
the class TestDropPartitions method testDropPartitionPurgeSetInTable.
@Test
public void testDropPartitionPurgeSetInTable() throws Exception {
PartitionDropOptions partDropOptions = PartitionDropOptions.instance();
partDropOptions.deleteData(true);
partDropOptions.purgeData(false);
String tableName = "purge_test";
Map<String, String> tableParams = new HashMap<>();
tableParams.put("skip.trash", "true");
createTable(tableName, getYearPartCol(), tableParams);
Partition partition1 = createPartition(tableName, null, Lists.newArrayList("2017"), getYearPartCol(), null);
Partition partition2 = createPartition(tableName, null, Lists.newArrayList("2018"), getYearPartCol(), null);
client.dropPartition(DB_NAME, tableName, partition1.getValues(), true);
List<Partition> droppedPartitions = Lists.newArrayList(partition1);
List<Partition> remainingPartitions = Lists.newArrayList(partition2);
checkPartitionsAfterDelete(tableName, droppedPartitions, remainingPartitions, true, true);
}
Aggregations