Search in sources :

Example 1 with KillQuery

use of org.apache.hadoop.hive.ql.session.KillQuery in project hive by apache.

the class WorkloadManager method scheduleWork.

private void scheduleWork(WmThreadSyncWork context) {
    // 1. Kill queries.
    for (KillQueryContext killCtx : context.toKillQuery.values()) {
        final WmTezSession toKill = killCtx.session;
        final String reason = killCtx.reason;
        LOG.info("Killing query for {}", toKill);
        workPool.submit(() -> {
            // Note: we get query ID here, rather than in the caller, where it would be more correct
            // because we know which exact query we intend to kill. This is valid because we
            // are not expecting query ID to change - we never reuse the session for which a
            // query is being killed until both the kill, and the user, return it.
            String queryId = toKill.getQueryId();
            KillQuery kq = toKill.getKillQuery();
            try {
                if (kq != null && queryId != null) {
                    WmEvent wmEvent = new WmEvent(WmEvent.EventType.KILL);
                    LOG.info("Invoking KillQuery for " + queryId + ": " + reason);
                    try {
                        kq.killQuery(queryId, reason);
                        addKillQueryResult(toKill, true);
                        killCtx.killSessionFuture.set(true);
                        wmEvent.endEvent(toKill);
                        LOG.debug("Killed " + queryId);
                        return;
                    } catch (HiveException ex) {
                        LOG.error("Failed to kill " + queryId + "; will try to restart AM instead", ex);
                    }
                } else {
                    LOG.info("Will queue restart for {}; queryId {}, killQuery {}", toKill, queryId, kq);
                }
            } finally {
                toKill.setQueryId(null);
            }
            // We cannot restart in place because the user might receive a failure and return the
            // session to the master thread without the "irrelevant" flag set. In fact, the query might
            // have succeeded in the gap and the session might already be returned. Queue restart thru
            // the master thread.
            addKillQueryResult(toKill, false);
        });
    }
    context.toKillQuery.clear();
    // 2. Restart pool sessions.
    for (final WmTezSession toRestart : context.toRestartInUse) {
        LOG.info("Replacing {} with a new session", toRestart);
        toRestart.setQueryId(null);
        workPool.submit(() -> {
            try {
                WmEvent wmEvent = new WmEvent(WmEvent.EventType.RESTART);
                // Note: sessions in toRestart are always in use, so they cannot expire in parallel.
                tezAmPool.replaceSession(toRestart);
                wmEvent.endEvent(toRestart);
            } catch (Exception ex) {
                LOG.error("Failed to restart an old session; ignoring", ex);
            }
        });
    }
    context.toRestartInUse.clear();
    // 3. Destroy the sessions that we don't need anymore.
    for (final WmTezSession toDestroy : context.toDestroyNoRestart) {
        LOG.info("Closing {} without restart", toDestroy);
        workPool.submit(() -> {
            try {
                WmEvent wmEvent = new WmEvent(WmEvent.EventType.DESTROY);
                toDestroy.close(false);
                wmEvent.endEvent(toDestroy);
            } catch (Exception ex) {
                LOG.error("Failed to close an old session; ignoring " + ex.getMessage());
            }
        });
    }
    context.toDestroyNoRestart.clear();
    // 4. Delete unneeded directories that were replaced by other ones via reopen.
    for (final Path path : context.pathsToDelete) {
        LOG.info("Deleting {}", path);
        workPool.submit(() -> {
            try {
                path.getFileSystem(conf).delete(path, true);
            } catch (Exception ex) {
                LOG.error("Failed to delete an old path; ignoring " + ex.getMessage());
            }
        });
    }
    context.pathsToDelete.clear();
}
Also used : Path(org.apache.hadoop.fs.Path) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) KillQuery(org.apache.hadoop.hive.ql.session.KillQuery) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) ExecutionException(java.util.concurrent.ExecutionException)

Example 2 with KillQuery

use of org.apache.hadoop.hive.ql.session.KillQuery in project hive by apache.

the class KillTriggerActionHandler method applyAction.

@Override
public void applyAction(final Map<TezSessionState, Trigger> queriesViolated) {
    for (Map.Entry<TezSessionState, Trigger> entry : queriesViolated.entrySet()) {
        switch(entry.getValue().getAction().getType()) {
            case KILL_QUERY:
                TezSessionState sessionState = entry.getKey();
                String queryId = sessionState.getWmContext().getQueryId();
                try {
                    KillQuery killQuery = sessionState.getKillQuery();
                    // if kill query is null then session might have been released to pool or closed already
                    if (killQuery != null) {
                        sessionState.getKillQuery().killQuery(queryId, entry.getValue().getViolationMsg());
                    }
                } catch (HiveException e) {
                    LOG.warn("Unable to kill query {} for trigger violation");
                }
                break;
            default:
                throw new RuntimeException("Unsupported action: " + entry.getValue());
        }
    }
}
Also used : Trigger(org.apache.hadoop.hive.ql.wm.Trigger) HiveException(org.apache.hadoop.hive.ql.metadata.HiveException) KillQuery(org.apache.hadoop.hive.ql.session.KillQuery) Map(java.util.Map)

Aggregations

HiveException (org.apache.hadoop.hive.ql.metadata.HiveException)2 KillQuery (org.apache.hadoop.hive.ql.session.KillQuery)2 Map (java.util.Map)1 ExecutionException (java.util.concurrent.ExecutionException)1 Path (org.apache.hadoop.fs.Path)1 Trigger (org.apache.hadoop.hive.ql.wm.Trigger)1