Search in sources :

Example 31 with QueryPlan

use of org.apache.hadoop.hive.ql.QueryPlan in project hive by apache.

the class TestDbTxnManager method testDDLShared.

@Test
public void testDDLShared() throws Exception {
    WriteEntity we = addTableOutput(WriteEntity.WriteType.DDL_SHARED);
    QueryPlan qp = new MockQueryPlan(this);
    txnMgr.acquireLocks(qp, ctx, "fred");
    List<HiveLock> locks = ctx.getHiveLocks();
    Assert.assertEquals(1, locks.size());
    Assert.assertEquals(1, TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
    txnMgr.getLockManager().unlock(locks.get(0));
    locks = txnMgr.getLockManager().getLocks(false, false);
    Assert.assertEquals(0, locks.size());
}
Also used : QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Test(org.junit.Test)

Example 32 with QueryPlan

use of org.apache.hadoop.hive.ql.QueryPlan in project hive by apache.

the class TestDbTxnManager method testHeartbeater.

@Test
public void testHeartbeater() throws Exception {
    Assert.assertTrue(txnMgr instanceof DbTxnManager);
    addTableInput();
    LockException exception = null;
    QueryPlan qp = new MockQueryPlan(this);
    // Case 1: If there's no delay for the heartbeat, txn should be able to commit
    txnMgr.openTxn(ctx, "fred");
    // heartbeat started..
    txnMgr.acquireLocks(qp, ctx, "fred");
    runReaper();
    try {
        txnMgr.commitTxn();
    } catch (LockException e) {
        exception = e;
    }
    Assert.assertNull("Txn commit should be successful", exception);
    exception = null;
    // Case 2: If there's delay for the heartbeat, but the delay is within the reaper's tolerance,
    //         then txt should be able to commit
    // Start the heartbeat after a delay, which is shorter than  the HIVE_TXN_TIMEOUT
    ((DbTxnManager) txnMgr).openTxn(ctx, "tom", HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) / 2);
    txnMgr.acquireLocks(qp, ctx, "tom");
    runReaper();
    try {
        txnMgr.commitTxn();
    } catch (LockException e) {
        exception = e;
    }
    Assert.assertNull("Txn commit should also be successful", exception);
    exception = null;
    // Case 3: If there's delay for the heartbeat, and the delay is long enough to trigger the reaper,
    //         then the txn will time out and be aborted.
    //         Here we just don't send the heartbeat at all - an infinite delay.
    // Start the heartbeat after a delay, which exceeds the HIVE_TXN_TIMEOUT
    ((DbTxnManager) txnMgr).openTxn(ctx, "jerry", HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) * 2);
    txnMgr.acquireLocks(qp, ctx, "jerry");
    Thread.sleep(HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS));
    runReaper();
    try {
        txnMgr.commitTxn();
    } catch (LockException e) {
        exception = e;
    }
    Assert.assertNotNull("Txn should have been aborted", exception);
    Assert.assertEquals(ErrorMsg.TXN_ABORTED, exception.getCanonicalErrorMsg());
}
Also used : QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) Test(org.junit.Test)

Example 33 with QueryPlan

use of org.apache.hadoop.hive.ql.QueryPlan in project hive by apache.

the class TestDbTxnManager method testSingleWritePartition.

@Test
public void testSingleWritePartition() throws Exception {
    WriteEntity we = addPartitionOutput(newTable(true), WriteEntity.WriteType.INSERT);
    QueryPlan qp = new MockQueryPlan(this);
    txnMgr.openTxn(ctx, "fred");
    txnMgr.acquireLocks(qp, ctx, "fred");
    List<HiveLock> locks = ctx.getHiveLocks();
    Assert.assertEquals(1, locks.size());
    Assert.assertEquals(1, TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId));
    txnMgr.commitTxn();
    locks = txnMgr.getLockManager().getLocks(false, false);
    Assert.assertEquals(0, locks.size());
}
Also used : QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) WriteEntity(org.apache.hadoop.hive.ql.hooks.WriteEntity) Test(org.junit.Test)

Example 34 with QueryPlan

use of org.apache.hadoop.hive.ql.QueryPlan in project hive by apache.

the class ATSHook method run.

@Override
public void run(final HookContext hookContext) throws Exception {
    final long currentTime = System.currentTimeMillis();
    final HiveConf conf = new HiveConf(hookContext.getConf());
    final QueryState queryState = hookContext.getQueryState();
    final String queryId = queryState.getQueryId();
    final Map<String, Long> durations = new HashMap<String, Long>();
    for (String key : hookContext.getPerfLogger().getEndTimes().keySet()) {
        durations.put(key, hookContext.getPerfLogger().getDuration(key));
    }
    try {
        setupAtsExecutor(conf);
        final String domainId = createOrGetDomain(hookContext);
        executor.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    QueryPlan plan = hookContext.getQueryPlan();
                    if (plan == null) {
                        return;
                    }
                    String queryId = plan.getQueryId();
                    String opId = hookContext.getOperationId();
                    long queryStartTime = plan.getQueryStartTime();
                    String user = hookContext.getUgi().getShortUserName();
                    String requestuser = hookContext.getUserName();
                    if (hookContext.getUserName() == null) {
                        requestuser = hookContext.getUgi().getUserName();
                    }
                    int numMrJobs = Utilities.getMRTasks(plan.getRootTasks()).size();
                    int numTezJobs = Utilities.getTezTasks(plan.getRootTasks()).size();
                    if (numMrJobs + numTezJobs <= 0) {
                        // ignore client only queries
                        return;
                    }
                    switch(hookContext.getHookType()) {
                        case PRE_EXEC_HOOK:
                            ExplainConfiguration config = new ExplainConfiguration();
                            config.setFormatted(true);
                            ExplainWork work = new // resFile
                            ExplainWork(// resFile
                            null, // pCtx
                            null, // RootTasks
                            plan.getRootTasks(), // FetchTask
                            plan.getFetchTask(), // analyzer
                            null, //explainConfig
                            config, // cboInfo
                            null);
                            @SuppressWarnings("unchecked") ExplainTask explain = (ExplainTask) TaskFactory.get(work, conf);
                            explain.initialize(queryState, plan, null, null);
                            String query = plan.getQueryStr();
                            JSONObject explainPlan = explain.getJSONPlan(null, work);
                            String logID = conf.getLogIdVar(hookContext.getSessionId());
                            List<String> tablesRead = getTablesFromEntitySet(hookContext.getInputs());
                            List<String> tablesWritten = getTablesFromEntitySet(hookContext.getOutputs());
                            String executionMode = getExecutionMode(plan).name();
                            String hiveInstanceAddress = hookContext.getHiveInstanceAddress();
                            if (hiveInstanceAddress == null) {
                                hiveInstanceAddress = InetAddress.getLocalHost().getHostAddress();
                            }
                            String hiveInstanceType = hookContext.isHiveServerQuery() ? "HS2" : "CLI";
                            ApplicationId llapId = determineLlapId(conf, plan);
                            fireAndForget(createPreHookEvent(queryId, query, explainPlan, queryStartTime, user, requestuser, numMrJobs, numTezJobs, opId, hookContext.getIpAddress(), hiveInstanceAddress, hiveInstanceType, hookContext.getSessionId(), logID, hookContext.getThreadId(), executionMode, tablesRead, tablesWritten, conf, llapId, domainId));
                            break;
                        case POST_EXEC_HOOK:
                            fireAndForget(createPostHookEvent(queryId, currentTime, user, requestuser, true, opId, durations, domainId));
                            break;
                        case ON_FAILURE_HOOK:
                            fireAndForget(createPostHookEvent(queryId, currentTime, user, requestuser, false, opId, durations, domainId));
                            break;
                        default:
                            //ignore
                            break;
                    }
                } catch (Exception e) {
                    LOG.warn("Failed to submit plan to ATS for " + queryId, e);
                }
            }
        });
    } catch (Exception e) {
        LOG.warn("Failed to submit to ATS for " + queryId, e);
    }
}
Also used : ExplainConfiguration(org.apache.hadoop.hive.ql.parse.ExplainConfiguration) ExplainTask(org.apache.hadoop.hive.ql.exec.ExplainTask) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ExplainWork(org.apache.hadoop.hive.ql.plan.ExplainWork) QueryState(org.apache.hadoop.hive.ql.QueryState) QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) IOException(java.io.IOException) JSONObject(org.json.JSONObject) HiveConf(org.apache.hadoop.hive.conf.HiveConf) ArrayList(java.util.ArrayList) List(java.util.List) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Aggregations

QueryPlan (org.apache.hadoop.hive.ql.QueryPlan)34 Test (org.junit.Test)21 HiveConf (org.apache.hadoop.hive.conf.HiveConf)11 WriteEntity (org.apache.hadoop.hive.ql.hooks.WriteEntity)10 List (java.util.List)7 Driver (org.apache.hadoop.hive.ql.Driver)6 IOException (java.io.IOException)4 LinkedHashMap (java.util.LinkedHashMap)4 Table (org.apache.hadoop.hive.ql.metadata.Table)4 SessionState (org.apache.hadoop.hive.ql.session.SessionState)4 LogHelper (org.apache.hadoop.hive.ql.session.SessionState.LogHelper)4 FileSystem (org.apache.hadoop.fs.FileSystem)3 Path (org.apache.hadoop.fs.Path)3 Context (org.apache.hadoop.hive.ql.Context)3 TezTask (org.apache.hadoop.hive.ql.exec.tez.TezTask)3 ArrayList (java.util.ArrayList)2 HashMap (java.util.HashMap)2 Map (java.util.Map)2 LlapIOCounters (org.apache.hadoop.hive.llap.counters.LlapIOCounters)2 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)2