use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class TestDbTxnManager method testDDLExclusive.
@Test
public void testDDLExclusive() throws Exception {
WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_EXCLUSIVE);
QueryPlan qp = new MockQueryPlan(this);
txnMgr.acquireLocks(qp, ctx, "fred");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertEquals(1, locks.size());
Assert.assertEquals(1, TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
txnMgr.getLockManager().unlock(locks.get(0));
locks = txnMgr.getLockManager().getLocks(false, false);
Assert.assertEquals(0, locks.size());
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class TestDbTxnManager method testDDLShared.
@Test
public void testDDLShared() throws Exception {
WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_SHARED);
QueryPlan qp = new MockQueryPlan(this);
txnMgr.acquireLocks(qp, ctx, "fred");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertEquals(1, locks.size());
Assert.assertEquals(1, TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
txnMgr.getLockManager().unlock(locks.get(0));
locks = txnMgr.getLockManager().getLocks(false, false);
Assert.assertEquals(0, locks.size());
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class TestDbTxnManager method addDynamicPartitionedOutput.
private WriteEntity addDynamicPartitionedOutput(Table t, WriteEntity.WriteType writeType) throws Exception {
DummyPartition dp = new DummyPartition(t, "no clue what I should call this");
WriteEntity we = new WriteEntity(dp, writeType, false);
writeEntities.add(we);
return we;
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class TestDbTxnManager method testSingleWritePartition.
@Test
public void testSingleWritePartition() throws Exception {
WriteEntity we = addPartitionOutput(newTable(true), WriteEntity.WriteType.INSERT);
QueryPlan qp = new MockQueryPlan(this);
txnMgr.openTxn(ctx, "fred");
txnMgr.acquireLocks(qp, ctx, "fred");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertEquals(1, locks.size());
Assert.assertEquals(1, TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
txnMgr.commitTxn();
locks = txnMgr.getLockManager().getLocks(false, false);
Assert.assertEquals(0, locks.size());
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class DDLTask method dropPartitions.
private void dropPartitions(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveException {
ReplicationSpec replicationSpec = dropTbl.getReplicationSpec();
if (replicationSpec.isInReplicationScope()) {
// parameter key values.
for (DropTableDesc.PartSpec partSpec : dropTbl.getPartSpecs()) {
try {
for (Partition p : Iterables.filter(db.getPartitionsByFilter(tbl, partSpec.getPartSpec().getExprString()), replicationSpec.allowEventReplacementInto())) {
db.dropPartition(tbl.getDbName(), tbl.getTableName(), p.getValues(), true);
}
} catch (NoSuchObjectException e) {
// ignore NSOE because that means there's nothing to drop.
} catch (Exception e) {
throw new HiveException(e.getMessage(), e);
}
}
return;
}
// ifExists is currently verified in DDLSemanticAnalyzer
List<Partition> droppedParts = db.dropPartitions(dropTbl.getTableName(), dropTbl.getPartSpecs(), PartitionDropOptions.instance().deleteData(true).ifExists(true).purgeData(dropTbl.getIfPurge()));
for (Partition partition : droppedParts) {
console.printInfo("Dropped the partition " + partition.getName());
// We have already locked the table, don't lock the partitions.
addIfAbsentByName(new WriteEntity(partition, WriteEntity.WriteType.DDL_NO_LOCK));
}
;
}
Aggregations