use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class TestDbTxnManager method testDelete.
@Test
public void testDelete() throws Exception {
WriteEntity we = addTableOutput(WriteEntity.WriteType.DELETE);
QueryPlan qp = new MockQueryPlan(this, HiveOperation.QUERY);
txnMgr.openTxn(ctx, "fred");
txnMgr.acquireLocks(qp, ctx, "fred");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertEquals(1, locks.size());
Assert.assertEquals(1, TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId));
txnMgr.commitTxn();
locks = txnMgr.getLockManager().getLocks(false, false);
Assert.assertEquals(0, locks.size());
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class TestDbTxnManager method testDDLNoLock.
@Test
public void testDDLNoLock() throws Exception {
WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_NO_LOCK);
QueryPlan qp = new MockQueryPlan(this, HiveOperation.CREATEDATABASE);
txnMgr.openTxn(ctx, "fred");
txnMgr.acquireLocks(qp, ctx, "fred");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertNull(locks);
txnMgr.rollbackTxn();
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class TestDbTxnManager method testSingleWriteTable.
@Test
public void testSingleWriteTable() throws Exception {
WriteEntity we = addTableOutput(WriteEntity.WriteType.INSERT);
QueryPlan qp = new MockQueryPlan(this, HiveOperation.QUERY);
txnMgr.openTxn(ctx, "fred");
txnMgr.acquireLocks(qp, ctx, "fred");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertEquals(1, locks.size());
Assert.assertEquals(1, TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId));
txnMgr.commitTxn();
locks = txnMgr.getLockManager().getLocks(false, false);
Assert.assertEquals(0, locks.size());
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class TestDbTxnManager method testReadWrite.
@Test
public void testReadWrite() throws Exception {
Table t = newTable(true);
addPartitionInput(t);
addPartitionInput(t);
addPartitionInput(t);
WriteEntity we = addTableOutput(WriteEntity.WriteType.INSERT);
QueryPlan qp = new MockQueryPlan(this, HiveOperation.QUERY);
txnMgr.openTxn(ctx, "fred");
txnMgr.acquireLocks(qp, ctx, "fred");
List<HiveLock> locks = ctx.getHiveLocks();
Assert.assertEquals(1, locks.size());
Assert.assertEquals(4, TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId));
txnMgr.commitTxn();
locks = txnMgr.getLockManager().getLocks(false, false);
Assert.assertEquals(0, locks.size());
}
use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.
the class ImportSemanticAnalyzer method createRegularImportTasks.
/**
* Create tasks for regular import, no repl complexity
* @param tblDesc
* @param partitionDescs
* @param isPartSpecSet
* @param replicationSpec
* @param table
* @param fromURI
* @param fs
* @param wh
*/
private static void createRegularImportTasks(ImportTableDesc tblDesc, List<AddPartitionDesc> partitionDescs, boolean isPartSpecSet, ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh, EximUtil.SemanticAnalyzerWrapperContext x) throws HiveException, URISyntaxException, IOException, MetaException {
if (table != null) {
if (table.isPartitioned()) {
x.getLOG().debug("table partitioned");
for (AddPartitionDesc addPartitionDesc : partitionDescs) {
Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
org.apache.hadoop.hive.ql.metadata.Partition ptn = null;
if ((ptn = x.getHive().getPartition(table, partSpec, false)) == null) {
x.getTasks().add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x));
} else {
throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec)));
}
}
} else {
x.getLOG().debug("table non-partitioned");
// ensure if destination is not empty only for regular import
Path tgtPath = new Path(table.getDataLocation().toString());
FileSystem tgtFs = FileSystem.get(tgtPath.toUri(), x.getConf());
checkTargetLocationEmpty(tgtFs, tgtPath, replicationSpec, x);
loadTable(fromURI, table, false, tgtPath, replicationSpec, x);
}
// Set this to read because we can't overwrite any existing partitions
x.getOutputs().add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
} else {
x.getLOG().debug("table " + tblDesc.getTableName() + " does not exist");
Task<?> t = createTableTask(tblDesc, x);
table = new Table(tblDesc.getDatabaseName(), tblDesc.getTableName());
Database parentDb = x.getHive().getDatabase(tblDesc.getDatabaseName());
// Since we are going to be creating a new table in a db, we should mark that db as a write entity
// so that the auth framework can go to work there.
x.getOutputs().add(new WriteEntity(parentDb, WriteEntity.WriteType.DDL_SHARED));
if (isPartitioned(tblDesc)) {
for (AddPartitionDesc addPartitionDesc : partitionDescs) {
t.addDependentTask(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x));
}
} else {
x.getLOG().debug("adding dependent CopyWork/MoveWork for table");
if (tblDesc.isExternal() && (tblDesc.getLocation() == null)) {
x.getLOG().debug("Importing in place, no emptiness check, no copying/loading");
Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
tblDesc.setLocation(dataPath.toString());
} else {
Path tablePath = null;
if (tblDesc.getLocation() != null) {
tablePath = new Path(tblDesc.getLocation());
} else {
tablePath = wh.getTablePath(parentDb, tblDesc.getTableName());
}
FileSystem tgtFs = FileSystem.get(tablePath.toUri(), x.getConf());
checkTargetLocationEmpty(tgtFs, tablePath, replicationSpec, x);
t.addDependentTask(loadTable(fromURI, table, false, tablePath, replicationSpec, x));
}
}
x.getTasks().add(t);
}
}
Aggregations