Search in sources :

Example 56 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class GenericUDTFGetSplits method createPlanFragment.

public PlanFragment createPlanFragment(String query, int num, ApplicationId splitsAppId) throws HiveException {
    HiveConf conf = new HiveConf(SessionState.get().getConf());
    HiveConf.setVar(conf, ConfVars.HIVEFETCHTASKCONVERSION, "none");
    HiveConf.setVar(conf, ConfVars.HIVEQUERYRESULTFILEFORMAT, PlanUtils.LLAP_OUTPUT_FORMAT_KEY);
    String originalMode = HiveConf.getVar(conf, ConfVars.HIVE_EXECUTION_MODE);
    HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, "llap");
    HiveConf.setBoolVar(conf, ConfVars.HIVE_TEZ_GENERATE_CONSISTENT_SPLITS, true);
    HiveConf.setBoolVar(conf, ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS, true);
    conf.setBoolean(TezSplitGrouper.TEZ_GROUPING_NODE_LOCAL_ONLY, true);
    // Tez/LLAP requires RPC query plan
    HiveConf.setBoolVar(conf, ConfVars.HIVE_RPC_QUERY_PLAN, true);
    try {
        jc = DagUtils.getInstance().createConfiguration(conf);
    } catch (IOException e) {
        throw new HiveException(e);
    }
    // Instantiate Driver to compile the query passed in.
    // This UDF is running as part of an existing query, which may already be using the
    // SessionState TxnManager. If this new Driver also tries to use the same TxnManager
    // then this may mess up the existing state of the TxnManager.
    // So initialize the new Driver with a new TxnManager so that it does not use the
    // Session TxnManager that is already in use.
    HiveTxnManager txnManager = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    Driver driver = new Driver(new QueryState.Builder().withHiveConf(conf).nonIsolated().build(), null, null, txnManager);
    DriverCleanup driverCleanup = new DriverCleanup(driver, txnManager, splitsAppId.toString());
    boolean needsCleanup = true;
    try {
        CommandProcessorResponse cpr = driver.compileAndRespond(query);
        if (cpr.getResponseCode() != 0) {
            throw new HiveException("Failed to compile query: " + cpr.getException());
        }
        QueryPlan plan = driver.getPlan();
        List<Task<?>> roots = plan.getRootTasks();
        Schema schema = convertSchema(plan.getResultSchema());
        if (roots == null || roots.size() != 1 || !(roots.get(0) instanceof TezTask)) {
            throw new HiveException("Was expecting a single TezTask.");
        }
        TezWork tezWork = ((TezTask) roots.get(0)).getWork();
        if (tezWork.getAllWork().size() != 1) {
            String tableName = "table_" + UUID.randomUUID().toString().replaceAll("[^A-Za-z0-9 ]", "");
            String ctas = "create temporary table " + tableName + " as " + query;
            LOG.info("Materializing the query for LLAPIF; CTAS: " + ctas);
            driver.releaseResources();
            HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, originalMode);
            cpr = driver.run(ctas, false);
            if (cpr.getResponseCode() != 0) {
                throw new HiveException("Failed to create temp table: " + cpr.getException());
            }
            HiveConf.setVar(conf, ConfVars.HIVE_EXECUTION_MODE, "llap");
            query = "select * from " + tableName;
            cpr = driver.compileAndRespond(query);
            if (cpr.getResponseCode() != 0) {
                throw new HiveException("Failed to create temp table: " + cpr.getException());
            }
            plan = driver.getPlan();
            roots = plan.getRootTasks();
            schema = convertSchema(plan.getResultSchema());
            if (roots == null || roots.size() != 1 || !(roots.get(0) instanceof TezTask)) {
                throw new HiveException("Was expecting a single TezTask.");
            }
            tezWork = ((TezTask) roots.get(0)).getWork();
        } else {
            // The read will have READ_COMMITTED level semantics.
            try {
                driver.lockAndRespond();
            } catch (CommandProcessorResponse cpr1) {
                throw new HiveException("Failed to acquire locks", cpr1);
            }
            // Attach the resources to the session cleanup.
            SessionState.get().addCleanupItem(driverCleanup);
            needsCleanup = false;
        }
        // Pass the ValidTxnList and ValidTxnWriteIdList snapshot configurations corresponding to the input query
        HiveConf driverConf = driver.getConf();
        String validTxnString = driverConf.get(ValidTxnList.VALID_TXNS_KEY);
        if (validTxnString != null) {
            jc.set(ValidTxnList.VALID_TXNS_KEY, validTxnString);
        }
        String validWriteIdString = driverConf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY);
        if (validWriteIdString != null) {
            jc.set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, validWriteIdString);
        }
        return new PlanFragment(tezWork, schema, jc);
    } finally {
        if (needsCleanup) {
            if (driverCleanup != null) {
                try {
                    driverCleanup.close();
                } catch (IOException err) {
                    throw new HiveException(err);
                }
            } else if (driver != null) {
                driver.close();
                driver.destroy();
            }
        }
    }
}
Also used : TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) Task(org.apache.hadoop.hive.ql.exec.Task) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) TaskSpecBuilder(org.apache.tez.dag.api.TaskSpecBuilder) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) FieldSchema(org.apache.hadoop.hive.metastore.api.FieldSchema) Schema(org.apache.hadoop.hive.llap.Schema) Driver(org.apache.hadoop.hive.ql.Driver) IOException(java.io.IOException) QueryPlan(org.apache.hadoop.hive.ql.QueryPlan) TezTask(org.apache.hadoop.hive.ql.exec.tez.TezTask) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) HiveConf(org.apache.hadoop.hive.conf.HiveConf) TezWork(org.apache.hadoop.hive.ql.plan.TezWork)

Example 57 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class Driver method compileInternal.

private void compileInternal(String command, boolean deferClose) throws CommandProcessorResponse {
    Metrics metrics = MetricsFactory.getInstance();
    if (metrics != null) {
        metrics.incrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1);
    }
    PerfLogger perfLogger = SessionState.getPerfLogger();
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.WAIT_COMPILE);
    final ReentrantLock compileLock = tryAcquireCompileLock(isParallelEnabled, command);
    perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.WAIT_COMPILE);
    if (metrics != null) {
        metrics.decrementCounter(MetricsConstant.WAITING_COMPILE_OPS, 1);
    }
    if (compileLock == null) {
        throw createProcessorResponse(ErrorMsg.COMPILE_LOCK_TIMED_OUT.getErrorCode());
    }
    try {
        compile(command, true, deferClose);
    } catch (CommandProcessorResponse cpr) {
        try {
            releaseLocksAndCommitOrRollback(false);
        } catch (LockException e) {
            LOG.warn("Exception in releasing locks. " + org.apache.hadoop.util.StringUtils.stringifyException(e));
        }
        throw cpr;
    } finally {
        compileLock.unlock();
    }
    // Save compile-time PerfLogging for WebUI.
    // Execution-time Perf logs are done by either another thread's PerfLogger
    // or a reset PerfLogger.
    queryDisplay.setPerfLogStarts(QueryDisplay.Phase.COMPILATION, perfLogger.getStartTimes());
    queryDisplay.setPerfLogEnds(QueryDisplay.Phase.COMPILATION, perfLogger.getEndTimes());
}
Also used : ReentrantLock(java.util.concurrent.locks.ReentrantLock) Metrics(org.apache.hadoop.hive.common.metrics.common.Metrics) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger)

Example 58 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class Driver method compile.

// deferClose indicates if the close/destroy should be deferred when the process has been
// interrupted, it should be set to true if the compile is called within another method like
// runInternal, which defers the close to the called in that method.
private void compile(String command, boolean resetTaskIds, boolean deferClose) throws CommandProcessorResponse {
    PerfLogger perfLogger = SessionState.getPerfLogger(true);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DRIVER_RUN);
    perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.COMPILE);
    lDrvState.stateLock.lock();
    try {
        lDrvState.driverState = DriverState.COMPILING;
    } finally {
        lDrvState.stateLock.unlock();
    }
    command = new VariableSubstitution(new HiveVariableSource() {

        @Override
        public Map<String, String> getHiveVariable() {
            return SessionState.get().getHiveVariables();
        }
    }).substitute(conf, command);
    String queryStr = command;
    try {
        // command should be redacted to avoid to logging sensitive data
        queryStr = HookUtils.redactLogString(conf, command);
    } catch (Exception e) {
        LOG.warn("WARNING! Query command could not be redacted." + e);
    }
    checkInterrupted("at beginning of compilation.", null, null);
    if (ctx != null && ctx.getExplainAnalyze() != AnalyzeState.RUNNING) {
        // close the existing ctx etc before compiling a new query, but does not destroy driver
        closeInProcess(false);
    }
    if (resetTaskIds) {
        TaskFactory.resetId();
    }
    LockedDriverState.setLockedDriverState(lDrvState);
    String queryId = queryState.getQueryId();
    if (ctx != null) {
        setTriggerContext(queryId);
    }
    // save some info for webUI for use after plan is freed
    this.queryDisplay.setQueryStr(queryStr);
    this.queryDisplay.setQueryId(queryId);
    LOG.info("Compiling command(queryId=" + queryId + "): " + queryStr);
    conf.setQueryString(queryStr);
    // FIXME: sideeffect will leave the last query set at the session level
    SessionState.get().getConf().setQueryString(queryStr);
    SessionState.get().setupQueryCurrentTimestamp();
    // Whether any error occurred during query compilation. Used for query lifetime hook.
    boolean compileError = false;
    boolean parseError = false;
    try {
        // Initialize the transaction manager.  This must be done before analyze is called.
        if (initTxnMgr != null) {
            queryTxnMgr = initTxnMgr;
        } else {
            queryTxnMgr = SessionState.get().initTxnMgr(conf);
        }
        if (queryTxnMgr instanceof Configurable) {
            ((Configurable) queryTxnMgr).setConf(conf);
        }
        queryState.setTxnManager(queryTxnMgr);
        // In case when user Ctrl-C twice to kill Hive CLI JVM, we want to release locks
        // if compile is being called multiple times, clear the old shutdownhook
        ShutdownHookManager.removeShutdownHook(shutdownRunner);
        final HiveTxnManager txnMgr = queryTxnMgr;
        shutdownRunner = new Runnable() {

            @Override
            public void run() {
                try {
                    releaseLocksAndCommitOrRollback(false, txnMgr);
                } catch (LockException e) {
                    LOG.warn("Exception when releasing locks in ShutdownHook for Driver: " + e.getMessage());
                }
            }
        };
        ShutdownHookManager.addShutdownHook(shutdownRunner, SHUTDOWN_HOOK_PRIORITY);
        checkInterrupted("before parsing and analysing the query", null, null);
        if (ctx == null) {
            ctx = new Context(conf);
            setTriggerContext(queryId);
        }
        ctx.setRuntimeStatsSource(runtimeStatsSource);
        ctx.setCmd(command);
        ctx.setHDFSCleanup(true);
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.PARSE);
        // Trigger query hook before compilation
        hookRunner.runBeforeParseHook(command);
        ASTNode tree;
        try {
            tree = ParseUtils.parse(command, ctx);
        } catch (ParseException e) {
            parseError = true;
            throw e;
        } finally {
            hookRunner.runAfterParseHook(command, parseError);
        }
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.PARSE);
        hookRunner.runBeforeCompileHook(command);
        perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.ANALYZE);
        // Flush the metastore cache.  This assures that we don't pick up objects from a previous
        // query running in this same thread.  This has to be done after we get our semantic
        // analyzer (this is when the connection to the metastore is made) but before we analyze,
        // because at that point we need access to the objects.
        Hive.get().getMSC().flushCache();
        BaseSemanticAnalyzer sem;
        // Do semantic analysis and plan generation
        if (hookRunner.hasPreAnalyzeHooks()) {
            HiveSemanticAnalyzerHookContext hookCtx = new HiveSemanticAnalyzerHookContextImpl();
            hookCtx.setConf(conf);
            hookCtx.setUserName(userName);
            hookCtx.setIpAddress(SessionState.get().getUserIpAddress());
            hookCtx.setCommand(command);
            hookCtx.setHiveOperation(queryState.getHiveOperation());
            tree = hookRunner.runPreAnalyzeHooks(hookCtx, tree);
            sem = SemanticAnalyzerFactory.get(queryState, tree);
            openTransaction();
            sem.analyze(tree, ctx);
            hookCtx.update(sem);
            hookRunner.runPostAnalyzeHooks(hookCtx, sem.getAllRootTasks());
        } else {
            sem = SemanticAnalyzerFactory.get(queryState, tree);
            openTransaction();
            sem.analyze(tree, ctx);
        }
        LOG.info("Semantic Analysis Completed");
        // Retrieve information about cache usage for the query.
        if (conf.getBoolVar(HiveConf.ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED)) {
            cacheUsage = sem.getCacheUsage();
        }
        // validate the plan
        sem.validate();
        perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.ANALYZE);
        checkInterrupted("after analyzing query.", null, null);
        // get the output schema
        schema = getSchema(sem, conf);
        plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId, queryState.getHiveOperation(), schema);
        conf.set("mapreduce.workflow.id", "hive_" + queryId);
        conf.set("mapreduce.workflow.name", queryStr);
        // initialize FetchTask right here
        if (plan.getFetchTask() != null) {
            plan.getFetchTask().initialize(queryState, plan, null, ctx.getOpContext());
        }
        // do the authorization check
        if (!sem.skipAuthorization() && HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
            try {
                perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
                doAuthorization(queryState.getHiveOperation(), sem, command);
            } catch (AuthorizationException authExp) {
                console.printError("Authorization failed:" + authExp.getMessage() + ". Use SHOW GRANT to get more details.");
                errorMessage = authExp.getMessage();
                SQLState = "42000";
                throw createProcessorResponse(403);
            } finally {
                perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DO_AUTHORIZATION);
            }
        }
        if (conf.getBoolVar(ConfVars.HIVE_LOG_EXPLAIN_OUTPUT)) {
            String explainOutput = getExplainOutput(sem, plan, tree);
            if (explainOutput != null) {
                LOG.info("EXPLAIN output for queryid " + queryId + " : " + explainOutput);
                if (conf.isWebUiQueryInfoCacheEnabled()) {
                    queryDisplay.setExplainPlan(explainOutput);
                }
            }
        }
    } catch (CommandProcessorResponse cpr) {
        throw cpr;
    } catch (Exception e) {
        checkInterrupted("during query compilation: " + e.getMessage(), null, null);
        compileError = true;
        ErrorMsg error = ErrorMsg.getErrorMsg(e.getMessage());
        errorMessage = "FAILED: " + e.getClass().getSimpleName();
        if (error != ErrorMsg.GENERIC_ERROR) {
            errorMessage += " [Error " + error.getErrorCode() + "]:";
        }
        // HIVE-4889
        if ((e instanceof IllegalArgumentException) && e.getMessage() == null && e.getCause() != null) {
            errorMessage += " " + e.getCause().getMessage();
        } else {
            errorMessage += " " + e.getMessage();
        }
        if (error == ErrorMsg.TXNMGR_NOT_ACID) {
            errorMessage += ". Failed command: " + queryStr;
        }
        SQLState = error.getSQLState();
        downstreamError = e;
        console.printError(errorMessage, "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
        throw createProcessorResponse(error.getErrorCode());
    } finally {
        // before/after execution hook will never be executed.
        if (!parseError) {
            try {
                hookRunner.runAfterCompilationHook(command, compileError);
            } catch (Exception e) {
                LOG.warn("Failed when invoking query after-compilation hook.", e);
            }
        }
        double duration = perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.COMPILE) / 1000.00;
        ImmutableMap<String, Long> compileHMSTimings = dumpMetaCallTimingWithoutEx("compilation");
        queryDisplay.setHmsTimings(QueryDisplay.Phase.COMPILATION, compileHMSTimings);
        boolean isInterrupted = lDrvState.isAborted();
        if (isInterrupted && !deferClose) {
            closeInProcess(true);
        }
        lDrvState.stateLock.lock();
        try {
            if (isInterrupted) {
                lDrvState.driverState = deferClose ? DriverState.EXECUTING : DriverState.ERROR;
            } else {
                lDrvState.driverState = compileError ? DriverState.ERROR : DriverState.COMPILED;
            }
        } finally {
            lDrvState.stateLock.unlock();
        }
        if (isInterrupted) {
            LOG.info("Compiling command(queryId=" + queryId + ") has been interrupted after " + duration + " seconds");
        } else {
            LOG.info("Completed compiling command(queryId=" + queryId + "); Time taken: " + duration + " seconds");
        }
    }
}
Also used : HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) BaseSemanticAnalyzer(org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) HiveVariableSource(org.apache.hadoop.hive.conf.HiveVariableSource) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) PerfLogger(org.apache.hadoop.hive.ql.log.PerfLogger) Configurable(org.apache.hadoop.conf.Configurable) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) ASTNode(org.apache.hadoop.hive.ql.parse.ASTNode) PrivateHookContext(org.apache.hadoop.hive.ql.hooks.PrivateHookContext) ParseContext(org.apache.hadoop.hive.ql.parse.ParseContext) HiveAuthzContext(org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext) WmContext(org.apache.hadoop.hive.ql.wm.WmContext) HookContext(org.apache.hadoop.hive.ql.hooks.HookContext) HiveSemanticAnalyzerHookContext(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContext) VariableSubstitution(org.apache.hadoop.hive.conf.VariableSubstitution) LockException(org.apache.hadoop.hive.ql.lockmgr.LockException) IOException(java.io.IOException) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) AuthorizationException(org.apache.hadoop.hive.ql.metadata.AuthorizationException) HiveSemanticAnalyzerHookContextImpl(org.apache.hadoop.hive.ql.parse.HiveSemanticAnalyzerHookContextImpl) HiveTxnManager(org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager) ParseException(org.apache.hadoop.hive.ql.parse.ParseException) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ImmutableMap(com.google.common.collect.ImmutableMap) HashMap(java.util.HashMap)

Example 59 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class TestDbTxnManager2 method testValidWriteIdListSnapshot.

@Test
public void testValidWriteIdListSnapshot() throws Exception {
    // Create a transactional table
    dropTable(new String[] { "temp.T7" });
    CommandProcessorResponse cpr = driver.run("create database if not exists temp");
    checkCmdOnDriver(cpr);
    cpr = driver.run("create table if not exists temp.T7(a int, b int) clustered by(b) into 2 buckets stored as orc " + "TBLPROPERTIES ('transactional'='true')");
    checkCmdOnDriver(cpr);
    // Open a base txn which allocates write ID and then committed.
    long baseTxnId = txnMgr.openTxn(ctx, "u0");
    long baseWriteId = txnMgr.getTableWriteId("temp", "T7");
    Assert.assertEquals(1, baseWriteId);
    // committed baseTxnId
    txnMgr.commitTxn();
    // Open a txn with no writes.
    HiveTxnManager txnMgr1 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    long underHwmOpenTxnId = txnMgr1.openTxn(ctx, "u1");
    Assert.assertTrue("Invalid txn ID", underHwmOpenTxnId > baseTxnId);
    // Open a txn to be tested for ValidWriteIdList. Get the ValidTxnList during open itself.
    // Verify the ValidWriteIdList with no open/aborted write txns on this table.
    // Write ID of committed txn should be valid.
    HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    long testTxnId = txnMgr2.openTxn(ctx, "u2");
    Assert.assertTrue("Invalid txn ID", testTxnId > underHwmOpenTxnId);
    String testValidTxns = txnMgr2.getValidTxns().toString();
    ValidWriteIdList testValidWriteIds = txnMgr2.getValidWriteIds(Collections.singletonList("temp.t7"), testValidTxns).getTableValidWriteIdList("temp.t7");
    Assert.assertEquals(baseWriteId, testValidWriteIds.getHighWatermark());
    Assert.assertTrue("Invalid write ID list", testValidWriteIds.isWriteIdValid(baseWriteId));
    // Open a txn which allocate write ID and remain open state.
    HiveTxnManager txnMgr3 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    long aboveHwmOpenTxnId = txnMgr3.openTxn(ctx, "u3");
    Assert.assertTrue("Invalid txn ID", aboveHwmOpenTxnId > testTxnId);
    long aboveHwmOpenWriteId = txnMgr3.getTableWriteId("temp", "T7");
    Assert.assertEquals(2, aboveHwmOpenWriteId);
    // Allocate writeId to txn under HWM. This will get Id greater than a txn > HWM.
    long underHwmOpenWriteId = txnMgr1.getTableWriteId("temp", "T7");
    Assert.assertEquals(3, underHwmOpenWriteId);
    // Verify the ValidWriteIdList with one open txn on this table. Write ID of open txn should be invalid.
    testValidWriteIds = txnMgr2.getValidWriteIds(Collections.singletonList("temp.t7"), testValidTxns).getTableValidWriteIdList("temp.t7");
    Assert.assertEquals(underHwmOpenWriteId, testValidWriteIds.getHighWatermark());
    Assert.assertTrue("Invalid write ID list", testValidWriteIds.isWriteIdValid(baseWriteId));
    Assert.assertFalse("Invalid write ID list", testValidWriteIds.isWriteIdValid(underHwmOpenWriteId));
    Assert.assertFalse("Invalid write ID list", testValidWriteIds.isWriteIdValid(aboveHwmOpenWriteId));
    // Commit the txn under HWM.
    // Verify the writeId of this committed txn should be invalid for test txn.
    txnMgr1.commitTxn();
    testValidWriteIds = txnMgr2.getValidWriteIds(Collections.singletonList("temp.t7"), testValidTxns).getTableValidWriteIdList("temp.t7");
    Assert.assertEquals(underHwmOpenWriteId, testValidWriteIds.getHighWatermark());
    Assert.assertTrue("Invalid write ID list", testValidWriteIds.isWriteIdValid(baseWriteId));
    Assert.assertFalse("Invalid write ID list", testValidWriteIds.isWriteIdValid(underHwmOpenWriteId));
    Assert.assertFalse("Invalid write ID list", testValidWriteIds.isWriteIdValid(aboveHwmOpenWriteId));
    // Allocate writeId from test txn and then verify ValidWriteIdList.
    // Write Ids of committed and self test txn should be valid but writeId of open txn should be invalid.
    // WriteId of recently committed txn which was open when get ValidTxnList snapshot should be invalid as well.
    long testWriteId = txnMgr2.getTableWriteId("temp", "T7");
    Assert.assertEquals(4, testWriteId);
    testValidWriteIds = txnMgr2.getValidWriteIds(Collections.singletonList("temp.t7"), testValidTxns).getTableValidWriteIdList("temp.t7");
    Assert.assertEquals(testWriteId, testValidWriteIds.getHighWatermark());
    Assert.assertTrue("Invalid write ID list", testValidWriteIds.isWriteIdValid(baseWriteId));
    Assert.assertTrue("Invalid write ID list", testValidWriteIds.isWriteIdValid(testWriteId));
    Assert.assertFalse("Invalid write ID list", testValidWriteIds.isWriteIdValid(underHwmOpenWriteId));
    Assert.assertFalse("Invalid write ID list", testValidWriteIds.isWriteIdValid(aboveHwmOpenWriteId));
    txnMgr2.commitTxn();
    txnMgr3.commitTxn();
    cpr = driver.run("drop database if exists temp cascade");
    checkCmdOnDriver(cpr);
}
Also used : ValidWriteIdList(org.apache.hadoop.hive.common.ValidWriteIdList) CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) Test(org.junit.Test)

Example 60 with CommandProcessorResponse

use of org.apache.hadoop.hive.ql.processors.CommandProcessorResponse in project hive by apache.

the class TestDbTxnManager2 method testFairness.

@Test
public void testFairness() throws Exception {
    dropTable(new String[] { "T6" });
    CommandProcessorResponse cpr = driver.run("create table if not exists T6(a int)");
    checkCmdOnDriver(cpr);
    cpr = driver.compileAndRespond("select a from T6");
    checkCmdOnDriver(cpr);
    // gets S lock on T6
    txnMgr.acquireLocks(driver.getPlan(), ctx, "Fifer");
    HiveTxnManager txnMgr2 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    swapTxnManager(txnMgr2);
    cpr = driver.compileAndRespond("drop table if exists T6");
    checkCmdOnDriver(cpr);
    // tries to get X lock on T6 and gets Waiting state
    LockState lockState = ((DbTxnManager) txnMgr2).acquireLocks(driver.getPlan(), ctx, "Fiddler", false);
    List<ShowLocksResponseElement> locks = getLocks();
    Assert.assertEquals("Unexpected lock count", 2, locks.size());
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T6", null, locks);
    checkLock(LockType.EXCLUSIVE, LockState.WAITING, "default", "T6", null, locks);
    HiveTxnManager txnMgr3 = TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
    swapTxnManager(txnMgr3);
    // this should block behind the X lock on  T6
    // this is a contrived example, in practice this query would of course fail after drop table
    cpr = driver.compileAndRespond("select a from T6");
    checkCmdOnDriver(cpr);
    // gets S lock on T6
    ((DbTxnManager) txnMgr3).acquireLocks(driver.getPlan(), ctx, "Fifer", false);
    locks = getLocks();
    Assert.assertEquals("Unexpected lock count", 3, locks.size());
    checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "T6", null, locks);
    checkLock(LockType.SHARED_READ, LockState.WAITING, "default", "T6", null, locks);
    checkLock(LockType.EXCLUSIVE, LockState.WAITING, "default", "T6", null, locks);
}
Also used : CommandProcessorResponse(org.apache.hadoop.hive.ql.processors.CommandProcessorResponse) LockState(org.apache.hadoop.hive.metastore.api.LockState) ShowLocksResponseElement(org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement) Test(org.junit.Test)

Aggregations

CommandProcessorResponse (org.apache.hadoop.hive.ql.processors.CommandProcessorResponse)145 Test (org.junit.Test)92 ShowLocksResponseElement (org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement)24 HCatBaseTest (org.apache.hive.hcatalog.mapreduce.HCatBaseTest)19 IOException (java.io.IOException)18 ArrayList (java.util.ArrayList)17 AddDynamicPartitions (org.apache.hadoop.hive.metastore.api.AddDynamicPartitions)8 Database (org.apache.hadoop.hive.metastore.api.Database)8 HiveConf (org.apache.hadoop.hive.conf.HiveConf)7 Table (org.apache.hadoop.hive.metastore.api.Table)7 Path (org.apache.hadoop.fs.Path)6 HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)6 PigServer (org.apache.pig.PigServer)5 FieldSchema (org.apache.hadoop.hive.metastore.api.FieldSchema)4 Driver (org.apache.hadoop.hive.ql.Driver)4 IDriver (org.apache.hadoop.hive.ql.IDriver)4 LockException (org.apache.hadoop.hive.ql.lockmgr.LockException)4 PerfLogger (org.apache.hadoop.hive.ql.log.PerfLogger)4 ParseException (org.apache.hadoop.hive.ql.parse.ParseException)4 HashMap (java.util.HashMap)3