use of org.apache.hadoop.hive.llap.LlapNodeId in project hive by apache.
the class AMReporter method taskKilled.
public void taskKilled(String amLocation, int port, String umbilicalUser, Token<JobTokenIdentifier> jobToken, final QueryIdentifier queryIdentifier, final TezTaskAttemptID taskAttemptId) {
LlapNodeId amNodeId = LlapNodeId.getInstance(amLocation, port);
AMNodeInfo amNodeInfo;
synchronized (knownAppMasters) {
amNodeInfo = knownAppMasters.get(queryIdentifier);
if (amNodeInfo == null) {
amNodeInfo = new AMNodeInfo(amNodeId, umbilicalUser, jobToken, queryIdentifier, retryPolicy, retryTimeout, socketFactory, conf);
}
}
// Even if the service hasn't started up. It's OK to make this invocation since this will
// only happen after the AtomicReference address has been populated. Not adding an additional check.
ListenableFuture<Void> future = executor.submit(new KillTaskCallable(taskAttemptId, amNodeInfo));
Futures.addCallback(future, new FutureCallback<Void>() {
@Override
public void onSuccess(Void result) {
LOG.info("Sent taskKilled for {}", taskAttemptId);
}
@Override
public void onFailure(Throwable t) {
LOG.warn("Failed to send taskKilled for {}. The attempt will likely time out.", taskAttemptId);
}
});
}
use of org.apache.hadoop.hive.llap.LlapNodeId in project hive by apache.
the class ContainerRunnerImpl method submitWork.
@Override
public SubmitWorkResponseProto submitWork(SubmitWorkRequestProto request) throws IOException {
LlapTokenInfo tokenInfo = null;
try {
tokenInfo = LlapTokenChecker.getTokenInfo(clusterId);
} catch (SecurityException ex) {
logSecurityErrorRarely(null);
throw ex;
}
SignableVertexSpec vertex = extractVertexSpec(request, tokenInfo);
TezEvent initialEvent = extractInitialEvent(request, tokenInfo);
TezTaskAttemptID attemptId = Converters.createTaskAttemptId(vertex.getQueryIdentifier(), vertex.getVertexIndex(), request.getFragmentNumber(), request.getAttemptNumber());
String fragmentIdString = attemptId.toString();
if (LOG.isInfoEnabled()) {
LOG.info("Queueing container for execution: fragemendId={}, {}", fragmentIdString, stringifySubmitRequest(request, vertex));
}
QueryIdentifierProto qIdProto = vertex.getQueryIdentifier();
HistoryLogger.logFragmentStart(qIdProto.getApplicationIdString(), request.getContainerIdString(), localAddress.get().getHostName(), constructUniqueQueryId(vertex.getHiveQueryId(), qIdProto.getDagIndex()), qIdProto.getDagIndex(), vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber());
// This is the start of container-annotated logging.
final String dagId = attemptId.getTaskID().getVertexID().getDAGId().toString();
final String queryId = vertex.getHiveQueryId();
final String fragId = LlapTezUtils.stripAttemptPrefix(fragmentIdString);
MDC.put("dagId", dagId);
MDC.put("queryId", queryId);
MDC.put("fragmentId", fragId);
// TODO: Ideally we want tez to use CallableWithMdc that retains the MDC for threads created in
// thread pool. For now, we will push both dagId and queryId into NDC and the custom thread
// pool that we use for task execution and llap io (StatsRecordingThreadPool) will pop them
// using reflection and update the MDC.
NDC.push(dagId);
NDC.push(queryId);
NDC.push(fragId);
Scheduler.SubmissionState submissionState;
SubmitWorkResponseProto.Builder responseBuilder = SubmitWorkResponseProto.newBuilder();
try {
Map<String, String> env = new HashMap<>();
// TODO What else is required in this environment map.
env.putAll(localEnv);
env.put(ApplicationConstants.Environment.USER.name(), vertex.getUser());
TezTaskAttemptID taskAttemptId = TezTaskAttemptID.fromString(fragmentIdString);
int dagIdentifier = taskAttemptId.getTaskID().getVertexID().getDAGId().getId();
QueryIdentifier queryIdentifier = new QueryIdentifier(qIdProto.getApplicationIdString(), dagIdentifier);
Credentials credentials = new Credentials();
DataInputBuffer dib = new DataInputBuffer();
byte[] tokenBytes = request.getCredentialsBinary().toByteArray();
dib.reset(tokenBytes, tokenBytes.length);
credentials.readTokenStorageStream(dib);
Token<JobTokenIdentifier> jobToken = TokenCache.getSessionToken(credentials);
LlapNodeId amNodeId = LlapNodeId.getInstance(request.getAmHost(), request.getAmPort());
QueryFragmentInfo fragmentInfo = queryTracker.registerFragment(queryIdentifier, qIdProto.getApplicationIdString(), dagId, vertex.getDagName(), vertex.getHiveQueryId(), dagIdentifier, vertex.getVertexName(), request.getFragmentNumber(), request.getAttemptNumber(), vertex.getUser(), vertex, jobToken, fragmentIdString, tokenInfo, amNodeId);
String[] localDirs = fragmentInfo.getLocalDirs();
Preconditions.checkNotNull(localDirs);
if (LOG.isDebugEnabled()) {
LOG.debug("Dirs are: " + Arrays.toString(localDirs));
}
// May need to setup localDir for re-localization, which is usually setup as Environment.PWD.
// Used for re-localization, to add the user specified configuration (conf_pb_binary_stream)
Configuration callableConf = new Configuration(getConfig());
UserGroupInformation fsTaskUgi = fsUgiFactory == null ? null : fsUgiFactory.createUgi();
TaskRunnerCallable callable = new TaskRunnerCallable(request, fragmentInfo, callableConf, new ExecutionContextImpl(localAddress.get().getHostName()), env, credentials, memoryPerExecutor, amReporter, confParams, metrics, killedTaskHandler, this, tezHadoopShim, attemptId, vertex, initialEvent, fsTaskUgi, completionListener, socketFactory);
submissionState = executorService.schedule(callable);
if (LOG.isInfoEnabled()) {
LOG.info("SubmissionState for {} : {} ", fragmentIdString, submissionState);
}
if (submissionState.equals(Scheduler.SubmissionState.REJECTED)) {
// Stop tracking the fragment and re-throw the error.
fragmentComplete(fragmentInfo);
return responseBuilder.setSubmissionState(SubmissionStateProto.valueOf(submissionState.name())).build();
}
if (metrics != null) {
metrics.incrExecutorTotalRequestsHandled();
}
} finally {
MDC.clear();
NDC.clear();
}
return responseBuilder.setUniqueNodeId(daemonId.getUniqueNodeIdInCluster()).setSubmissionState(SubmissionStateProto.valueOf(submissionState.name())).build();
}
use of org.apache.hadoop.hive.llap.LlapNodeId in project hive by apache.
the class LlapProtocolClientProxy method sendTerminateFragment.
public void sendTerminateFragment(final TerminateFragmentRequestProto request, final String host, final int port, final ExecuteRequestCallback<TerminateFragmentResponseProto> callback) {
LlapNodeId nodeId = LlapNodeId.getInstance(host, port);
requestManager.queueRequest(new SendTerminateFragmentCallable(nodeId, request, callback));
}
use of org.apache.hadoop.hive.llap.LlapNodeId in project hive by apache.
the class LlapProtocolClientProxy method sendQueryComplete.
public void sendQueryComplete(final QueryCompleteRequestProto request, final String host, final int port, final ExecuteRequestCallback<QueryCompleteResponseProto> callback) {
LlapNodeId nodeId = LlapNodeId.getInstance(host, port);
requestManager.queueRequest(new SendQueryCompleteCallable(nodeId, request, callback));
}
Aggregations