Search in sources :

Example 41 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class TestDbTxnManager method testDelete.

@Test
public void testDelete() throws Exception {
    WriteEntity we = addTableOutput(WriteEntity.WriteType.DELETE);
    QueryPlan qp = new MockQueryPlan(this, HiveOperation.QUERY);
    txnMgr.openTxn(ctx, "fred");
    txnMgr.acquireLocks(qp, ctx, "fred");
    List<HiveLock> locks = ctx.getHiveLocks();
    Assert.assertEquals(1, locks.size());
    Assert.assertEquals(1, TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId));
    txnMgr.commitTxn();
    locks = txnMgr.getLockManager().getLocks(false, false);
    Assert.assertEquals(0, locks.size());
}
Also used : QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Test(org.junit.Test)

Example 42 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class TestDbTxnManager method testDDLNoLock.

@Test
public void testDDLNoLock() throws Exception {
    WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_NO_LOCK);
    QueryPlan qp = new MockQueryPlan(this, HiveOperation.CREATEDATABASE);
    txnMgr.openTxn(ctx, "fred");
    txnMgr.acquireLocks(qp, ctx, "fred");
    List<HiveLock> locks = ctx.getHiveLocks();
    Assert.assertNull(locks);
    txnMgr.rollbackTxn();
}
Also used : QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Test(org.junit.Test)

Example 43 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class TestDbTxnManager method testSingleWriteTable.

@Test
public void testSingleWriteTable() throws Exception {
    WriteEntity we = addTableOutput(WriteEntity.WriteType.INSERT);
    QueryPlan qp = new MockQueryPlan(this, HiveOperation.QUERY);
    txnMgr.openTxn(ctx, "fred");
    txnMgr.acquireLocks(qp, ctx, "fred");
    List<HiveLock> locks = ctx.getHiveLocks();
    Assert.assertEquals(1, locks.size());
    Assert.assertEquals(1, TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId));
    txnMgr.commitTxn();
    locks = txnMgr.getLockManager().getLocks(false, false);
    Assert.assertEquals(0, locks.size());
}
Also used : QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Test(org.junit.Test)

Example 44 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class TestDbTxnManager method testReadWrite.

@Test
public void testReadWrite() throws Exception {
    Table t = newTable(true);
    addPartitionInput(t);
    addPartitionInput(t);
    addPartitionInput(t);
    WriteEntity we = addTableOutput(WriteEntity.WriteType.INSERT);
    QueryPlan qp = new MockQueryPlan(this, HiveOperation.QUERY);
    txnMgr.openTxn(ctx, "fred");
    txnMgr.acquireLocks(qp, ctx, "fred");
    List<HiveLock> locks = ctx.getHiveLocks();
    Assert.assertEquals(1, locks.size());
    Assert.assertEquals(4, TxnDbUtil.countLockComponents(conf, ((DbLockManager.DbHiveLock) locks.get(0)).lockId));
    txnMgr.commitTxn();
    locks = txnMgr.getLockManager().getLocks(false, false);
    Assert.assertEquals(0, locks.size());
}
Also used : Table(org.apache.hadoop.hive.ql.metadata.Table) QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Test(org.junit.Test)

Example 45 with WriteEntity

use of org.apache.hadoop.hive.ql.hooks.WriteEntity in project hive by apache.

the class ImportSemanticAnalyzer method createRegularImportTasks.

/**
   * Create tasks for regular import, no repl complexity
   * @param tblDesc
   * @param partitionDescs
   * @param isPartSpecSet
   * @param replicationSpec
   * @param table
   * @param fromURI
   * @param fs
   * @param wh
   */
private static void createRegularImportTasks(ImportTableDesc tblDesc, List<AddPartitionDesc> partitionDescs, boolean isPartSpecSet, ReplicationSpec replicationSpec, Table table, URI fromURI, FileSystem fs, Warehouse wh, EximUtil.SemanticAnalyzerWrapperContext x) throws HiveException, URISyntaxException, IOException, MetaException {
    if (table != null) {
        if (table.isPartitioned()) {
            x.getLOG().debug("table partitioned");
            for (AddPartitionDesc addPartitionDesc : partitionDescs) {
                Map<String, String> partSpec = addPartitionDesc.getPartition(0).getPartSpec();
                org.apache.hadoop.hive.ql.metadata.Partition ptn = null;
                if ((ptn = x.getHive().getPartition(table, partSpec, false)) == null) {
                    x.getTasks().add(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x));
                } else {
                    throw new SemanticException(ErrorMsg.PARTITION_EXISTS.getMsg(partSpecToString(partSpec)));
                }
            }
        } else {
            x.getLOG().debug("table non-partitioned");
            // ensure if destination is not empty only for regular import
            Path tgtPath = new Path(table.getDataLocation().toString());
            FileSystem tgtFs = FileSystem.get(tgtPath.toUri(), x.getConf());
            checkTargetLocationEmpty(tgtFs, tgtPath, replicationSpec, x);
            loadTable(fromURI, table, false, tgtPath, replicationSpec, x);
        }
        // Set this to read because we can't overwrite any existing partitions
        x.getOutputs().add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
    } else {
        x.getLOG().debug("table " + tblDesc.getTableName() + " does not exist");
        Task<?> t = createTableTask(tblDesc, x);
        table = new Table(tblDesc.getDatabaseName(), tblDesc.getTableName());
        Database parentDb = x.getHive().getDatabase(tblDesc.getDatabaseName());
        // Since we are going to be creating a new table in a db, we should mark that db as a write entity
        // so that the auth framework can go to work there.
        x.getOutputs().add(new WriteEntity(parentDb, WriteEntity.WriteType.DDL_SHARED));
        if (isPartitioned(tblDesc)) {
            for (AddPartitionDesc addPartitionDesc : partitionDescs) {
                t.addDependentTask(addSinglePartition(fromURI, fs, tblDesc, table, wh, addPartitionDesc, replicationSpec, x));
            }
        } else {
            x.getLOG().debug("adding dependent CopyWork/MoveWork for table");
            if (tblDesc.isExternal() && (tblDesc.getLocation() == null)) {
                x.getLOG().debug("Importing in place, no emptiness check, no copying/loading");
                Path dataPath = new Path(fromURI.toString(), EximUtil.DATA_PATH_NAME);
                tblDesc.setLocation(dataPath.toString());
            } else {
                Path tablePath = null;
                if (tblDesc.getLocation() != null) {
                    tablePath = new Path(tblDesc.getLocation());
                } else {
                    tablePath = wh.getTablePath(parentDb, tblDesc.getTableName());
                }
                FileSystem tgtFs = FileSystem.get(tablePath.toUri(), x.getConf());
                checkTargetLocationEmpty(tgtFs, tablePath, replicationSpec, x);
                t.addDependentTask(loadTable(fromURI, table, false, tablePath, replicationSpec, x));
            }
        }
        x.getTasks().add(t);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Table(org.apache.hadoop.hive.ql.metadata.Table) FileSystem(org.apache.hadoop.fs.FileSystem) AddPartitionDesc(org.apache.hadoop.hive.ql.plan.AddPartitionDesc) Database(org.apache.hadoop.hive.metastore.api.Database) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity)

Aggregations

WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)88 Table (org.apache.hadoop.hive.ql.metadata.Table)39 ReadEntity (org.apache.hadoop.hive.ql.hooks.ReadEntity)35 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)24 Partition (org.apache.hadoop.hive.ql.metadata.Partition)24 ArrayList (java.util.ArrayList)18 DDLWork (org.apache.hadoop.hive.ql.plan.DDLWork)14 Path (org.apache.hadoop.fs.Path)13 AlterTableExchangePartition (org.apache.hadoop.hive.ql.plan.AlterTableExchangePartition)13 Referenceable (org.apache.atlas.typesystem.Referenceable)11 Database (org.apache.hadoop.hive.metastore.api.Database)11 Test (org.junit.Test)11 QueryPlan (org.apache.hadoop.hive.ql.QueryPlan)10 HashMap (java.util.HashMap)9 LinkedHashMap (java.util.LinkedHashMap)9 Test (org.testng.annotations.Test)9 SQLCheckConstraint (org.apache.hadoop.hive.metastore.api.SQLCheckConstraint)8 SQLDefaultConstraint (org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint)8 SQLNotNullConstraint (org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint)8 SQLUniqueConstraint (org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint)8