use of org.apache.hadoop.hive.metastore.PartitionDropOptions in project hive by apache.
the class Hive method dropPartitions.
public List<Partition> dropPartitions(String dbName, String tableName, List<Pair<Integer, byte[]>> partitionExpressions, PartitionDropOptions dropOptions) throws HiveException {
try {
Table table = getTable(dbName, tableName);
if (!dropOptions.deleteData) {
AcidUtils.TableSnapshot snapshot = AcidUtils.getTableSnapshot(conf, table, true);
if (snapshot != null) {
dropOptions.setWriteId(snapshot.getWriteId());
}
long txnId = Optional.ofNullable(SessionState.get()).map(ss -> ss.getTxnMgr().getCurrentTxnId()).orElse(0L);
dropOptions.setTxnId(txnId);
}
List<org.apache.hadoop.hive.metastore.api.Partition> partitions = getMSC().dropPartitions(dbName, tableName, partitionExpressions, dropOptions);
return convertFromMetastore(table, partitions);
} catch (NoSuchObjectException e) {
throw new HiveException("Partition or table doesn't exist.", e);
} catch (Exception e) {
throw new HiveException(e.getMessage(), e);
}
}
use of org.apache.hadoop.hive.metastore.PartitionDropOptions in project hive by apache.
the class TestDropPartitions method testDropPartitionDeleteDataNoPurge.
@Test
public void testDropPartitionDeleteDataNoPurge() throws Exception {
PartitionDropOptions partDropOptions = PartitionDropOptions.instance();
partDropOptions.deleteData(true);
partDropOptions.purgeData(false);
client.dropPartition(DB_NAME, TABLE_NAME, PARTITIONS[0].getValues(), partDropOptions);
List<Partition> droppedPartitions = Lists.newArrayList(PARTITIONS[0]);
List<Partition> remainingPartitions = Lists.newArrayList(PARTITIONS[1], PARTITIONS[2]);
checkPartitionsAfterDelete(TABLE_NAME, droppedPartitions, remainingPartitions, true, false);
}
use of org.apache.hadoop.hive.metastore.PartitionDropOptions in project hive by apache.
the class TestDropPartitions method testDropPartitionDeleteDataAndPurgeExternalTable.
@Test
public void testDropPartitionDeleteDataAndPurgeExternalTable() throws Exception {
String tableName = "external_table";
Map<String, String> tableParams = new HashMap<>();
tableParams.put("EXTERNAL", "true");
createTable(tableName, getYearPartCol(), tableParams);
String location = metaStore.getExternalWarehouseRoot() + "/externalTable/year=2017";
Partition partition = createPartition(tableName, location, Lists.newArrayList("2017"), getYearPartCol(), null);
PartitionDropOptions partDropOptions = PartitionDropOptions.instance();
partDropOptions.deleteData(true);
partDropOptions.purgeData(true);
client.dropPartition(DB_NAME, tableName, partition.getValues(), partDropOptions);
List<Partition> partitionsAfterDrop = client.listPartitions(DB_NAME, tableName, MAX);
Assert.assertTrue(partitionsAfterDrop.isEmpty());
Assert.assertTrue("The location '" + location + "' should exist.", metaStore.isPathExists(new Path(location)));
}
use of org.apache.hadoop.hive.metastore.PartitionDropOptions in project hive by apache.
the class TestDropPartitions method testDropPartitionNotDeleteDataPurge.
@Test
public void testDropPartitionNotDeleteDataPurge() throws Exception {
PartitionDropOptions partDropOptions = PartitionDropOptions.instance();
partDropOptions.deleteData(false);
partDropOptions.purgeData(true);
client.dropPartition(DB_NAME, TABLE_NAME, PARTITIONS[0].getValues(), partDropOptions);
List<Partition> droppedPartitions = Lists.newArrayList(PARTITIONS[0]);
List<Partition> remainingPartitions = Lists.newArrayList(PARTITIONS[1], PARTITIONS[2]);
checkPartitionsAfterDelete(TABLE_NAME, droppedPartitions, remainingPartitions, false, false);
}
use of org.apache.hadoop.hive.metastore.PartitionDropOptions in project hive by apache.
the class TestDropPartitions method testDropPartitionDeleteDataAndPurge.
@Test
public void testDropPartitionDeleteDataAndPurge() throws Exception {
PartitionDropOptions partDropOptions = PartitionDropOptions.instance();
partDropOptions.deleteData(true);
partDropOptions.purgeData(true);
client.dropPartition(DB_NAME, TABLE_NAME, PARTITIONS[0].getValues(), partDropOptions);
List<Partition> droppedPartitions = Lists.newArrayList(PARTITIONS[0]);
List<Partition> remainingPartitions = Lists.newArrayList(PARTITIONS[1], PARTITIONS[2]);
checkPartitionsAfterDelete(TABLE_NAME, droppedPartitions, remainingPartitions, true, true);
}
Aggregations