Search in sources :

Example 11 with LlapNodeId

use of org.apache.hadoop.hive.llap.LlapNodeId in project hive by apache.

the class AMReporter method taskKilled.

public void taskKilled(String amLocation, int port, String umbilicalUser, Token<JobTokenIdentifier> jobToken, final QueryIdentifier queryIdentifier, final TezTaskAttemptID taskAttemptId) {
    LlapNodeId amNodeId = LlapNodeId.getInstance(amLocation, port);
    AMNodeInfo amNodeInfo;
    synchronized (knownAppMasters) {
        amNodeInfo = knownAppMasters.get(queryIdentifier);
        if (amNodeInfo == null) {
            amNodeInfo = new AMNodeInfo(amNodeId, umbilicalUser, jobToken, queryIdentifier, retryPolicy, retryTimeout, socketFactory, conf);
        }
    }
    // Even if the service hasn't started up. It's OK to make this invocation since this will
    // only happen after the AtomicReference address has been populated. Not adding an additional check.
    ListenableFuture<Void> future = executor.submit(new KillTaskCallable(taskAttemptId, amNodeInfo));
    Futures.addCallback(future, new FutureCallback<Void>() {

        @Override
        public void onSuccess(Void result) {
            LOG.info("Sent taskKilled for {}", taskAttemptId);
        }

        @Override
        public void onFailure(Throwable t) {
            LOG.warn("Failed to send taskKilled for {}. The attempt will likely time out.", taskAttemptId);
        }
    });
}
Also used : LlapNodeId(org.apache.hadoop.hive.llap.LlapNodeId)

Example 12 with LlapNodeId

use of org.apache.hadoop.hive.llap.LlapNodeId in project hive by apache.

the class ContainerRunnerImpl method submitWork.

@Override
public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws IOException {
    LlapTokenInfo tokenInfo = null;
    try {
        tokenInfo = LlapTokenChecker.getTokenInfo(clusterId);
    } catch (SecurityException ex) {
        logSecurityErrorRarely(null);
        throw ex;
    }
    SignableVertexSpec vertex = extractVertexSpec(request, tokenInfo);
    TezEvent initialEvent = extractInitialEvent(request, tokenInfo);
    TezTaskAttemptID attemptId = Converters.createTaskAttemptId(vertex.getQueryIdentifier(), vertex.getVertexIndex(), request.getFragmentNumber(), request.getAttemptNumber());
    String fragmentIdString = attemptId.toString();
    if (LOG.isInfoEnabled()) {
        LOG.info("Queueing container for execution: fragemendId={}, {}", fragmentIdString, stringifySubmitRequest(request, vertex));
    }
    QueryIdentifierProto qIdProto = vertex.getQueryIdentifier();
    HistoryLogger.logFragmentStart(qIdProto.getApplicationIdString(), request.getContainerIdString(), localAddress.get().getHostName(), constructUniqueQueryId(vertex.getHiveQueryId(), qIdProto.getDagIndex()), qIdProto.getDagIndex(), vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber());
    // This is the start of container-annotated logging.
    final String dagId = attemptId.getTaskID().getVertexID().getDAGId().toString();
    final String queryId = vertex.getHiveQueryId();
    final String fragId = LlapTezUtils.stripAttemptPrefix(fragmentIdString);
    MDC.put("dagId", dagId);
    MDC.put("queryId", queryId);
    MDC.put("fragmentId", fragId);
    // TODO: Ideally we want tez to use CallableWithMdc that retains the MDC for threads created in
    // thread pool. For now, we will push both dagId and queryId into NDC and the custom thread
    // pool that we use for task execution and llap io (StatsRecordingThreadPool) will pop them
    // using reflection and update the MDC.
    NDC.push(dagId);
    NDC.push(queryId);
    NDC.push(fragId);
    Scheduler.SubmissionState submissionState;
    SubmitWorkResponseProto.Builder responseBuilder = SubmitWorkResponseProto.newBuilder();
    try {
        Map<String, String> env = new HashMap<>();
        // TODO What else is required in this environment map.
        env.putAll(localEnv);
        env.put(ApplicationConstants.Environment.USER.name(), vertex.getUser());
        TezTaskAttemptID taskAttemptId = TezTaskAttemptID.fromString(fragmentIdString);
        int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();
        QueryIdentifier queryIdentifier = new QueryIdentifier(qIdProto.getApplicationIdString(), dagIdentifier);
        Credentials credentials = new Credentials();
        DataInputBuffer dib = new DataInputBuffer();
        byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
        dib.reset(tokenBytes, tokenBytes.length);
        credentials.readTokenStorageStream(dib);
        Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);
        LlapNodeId amNodeId = LlapNodeId.getInstance(request.getAmHost(), request.getAmPort());
        QueryFragmentInfo fragmentInfo = queryTracker.registerFragment(queryIdentifier, qIdProto.getApplicationIdString(), dagId, vertex.getDagName(), vertex.getHiveQueryId(), dagIdentifier, vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber(), vertex.getUser(), vertex, jobToken, fragmentIdString, tokenInfo, amNodeId);
        String[] localDirs = fragmentInfo.getLocalDirs();
        Preconditions.checkNotNull(localDirs);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Dirs are: " + Arrays.toString(localDirs));
        }
        // May need to setup localDir for re-localization, which is usually setup as Environment.PWD.
        // Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
        Configuration callableConf = new Configuration(getConfig());
        UserGroupInformation fsTaskUgi = fsUgiFactory == null ? null : fsUgiFactory.createUgi();
        TaskRunnerCallable callable = new TaskRunnerCallable(request, fragmentInfo, callableConf, new ExecutionContextImpl(localAddress.get().getHostName()), env, credentials, memoryPerExecutor, amReporter, confParams, metrics, killedTaskHandler, this, tezHadoopShim, attemptId, vertex, initialEvent, fsTaskUgi, completionListener, socketFactory);
        submissionState = executorService.schedule(callable);
        if (LOG.isInfoEnabled()) {
            LOG.info("SubmissionState for {} : {} ", fragmentIdString, submissionState);
        }
        if (submissionState.equals(Scheduler.SubmissionState.REJECTED)) {
            // Stop tracking the fragment and re-throw the error.
            fragmentComplete(fragmentInfo);
            return responseBuilder.setSubmissionState(SubmissionStateProto.valueOf(submissionState.name())).build();
        }
        if (metrics != null) {
            metrics.incrExecutorTotalRequestsHandled();
        }
    } finally {
        MDC.clear();
        NDC.clear();
    }
    return responseBuilder.setUniqueNodeId(daemonId.getUniqueNodeIdInCluster()).setSubmissionState(SubmissionStateProto.valueOf(submissionState.name())).build();
}
Also used : LlapTokenInfo(org.apache.hadoop.hive.llap.daemon.impl.LlapTokenChecker.LlapTokenInfo) Configuration(org.apache.hadoop.conf.Configuration) TezConfiguration(org.apache.tez.dag.api.TezConfiguration) HashMap(java.util.HashMap) ByteString(com.google.protobuf.ByteString) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) ExecutionContextImpl(org.apache.tez.runtime.api.impl.ExecutionContextImpl) JobTokenIdentifier(org.apache.tez.common.security.JobTokenIdentifier) LlapNodeId(org.apache.hadoop.hive.llap.LlapNodeId) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) SignableVertexSpec(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec) QueryIdentifierProto(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) SubmitWorkResponseProto(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) NotTezEvent(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent) TezEvent(org.apache.tez.runtime.api.impl.TezEvent) Credentials(org.apache.hadoop.security.Credentials) TezTaskAttemptID(org.apache.tez.dag.records.TezTaskAttemptID)

Example 13 with LlapNodeId

use of org.apache.hadoop.hive.llap.LlapNodeId in project hive by apache.

the class LlapProtocolClientProxy method sendTerminateFragment.

public void sendTerminateFragment(final TerminateFragmentRequestProto request, final String host, final int port, final ExecuteRequestCallback<TerminateFragmentResponseProto> callback) {
    LlapNodeId nodeId = LlapNodeId.getInstance(host, port);
    requestManager.queueRequest(new SendTerminateFragmentCallable(nodeId, request, callback));
}
Also used : LlapNodeId(org.apache.hadoop.hive.llap.LlapNodeId)

Example 14 with LlapNodeId

use of org.apache.hadoop.hive.llap.LlapNodeId in project hive by apache.

the class LlapProtocolClientProxy method sendQueryComplete.

public void sendQueryComplete(final QueryCompleteRequestProto request, final String host, final int port, final ExecuteRequestCallback<QueryCompleteResponseProto> callback) {
    LlapNodeId nodeId = LlapNodeId.getInstance(host, port);
    requestManager.queueRequest(new SendQueryCompleteCallable(nodeId, request, callback));
}
Also used : LlapNodeId(org.apache.hadoop.hive.llap.LlapNodeId)

Aggregations

LlapNodeId (org.apache.hadoop.hive.llap.LlapNodeId)14 ByteString (com.google.protobuf.ByteString)3 LlapProtocolClientProxy (org.apache.hadoop.hive.llap.tez.LlapProtocolClientProxy)3 Message (com.google.protobuf.Message)2 QueryIdentifierProto (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto)2 SubmitWorkResponseProto (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto)2 VertexState (org.apache.tez.dag.api.event.VertexState)2 TezTaskAttemptID (org.apache.tez.dag.records.TezTaskAttemptID)2 Test (org.junit.Test)2 BiMap (com.google.common.collect.BiMap)1 HashBiMap (com.google.common.collect.HashBiMap)1 ServiceException (com.google.protobuf.ServiceException)1 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 Map (java.util.Map)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ConcurrentMap (java.util.concurrent.ConcurrentMap)1 Configuration (org.apache.hadoop.conf.Configuration)1 LlapTokenInfo (org.apache.hadoop.hive.llap.daemon.impl.LlapTokenChecker.LlapTokenInfo)1