use of org.apache.tez.dag.history.DAGHistoryEvent in project tez by apache.
the class TaskImpl method logJobHistoryTaskFinishedEvent.
protected void logJobHistoryTaskFinishedEvent() {
// FIXME need to handle getting finish time as this function
// is called from within a transition
this.finishTime = clock.getTime();
TaskFinishedEvent finishEvt = new TaskFinishedEvent(taskId, getVertex().getName(), getLaunchTime(), this.finishTime, successfulAttempt, TaskState.SUCCEEDED, "", getCounters(), failedAttempts);
this.appContext.getHistoryHandler().handle(new DAGHistoryEvent(taskId.getVertexID().getDAGId(), finishEvt));
}
use of org.apache.tez.dag.history.DAGHistoryEvent in project tez by apache.
the class TaskImpl method logJobHistoryTaskFailedEvent.
protected void logJobHistoryTaskFailedEvent(TaskState finalState) {
this.finishTime = clock.getTime();
TaskFinishedEvent finishEvt = new TaskFinishedEvent(taskId, getVertex().getName(), getLaunchTime(), this.finishTime, null, finalState, StringUtils.join(getDiagnostics(), LINE_SEPARATOR), getCounters(), failedAttempts);
this.appContext.getHistoryHandler().handle(new DAGHistoryEvent(taskId.getVertexID().getDAGId(), finishEvt));
}
use of org.apache.tez.dag.history.DAGHistoryEvent in project tez by apache.
the class VertexImpl method commitOrFinish.
private static VertexState commitOrFinish(final VertexImpl vertex) {
// commit only once. Dont commit shared outputs
if (vertex.outputCommitters != null && !vertex.outputCommitters.isEmpty()) {
if (vertex.recoveryData != null && vertex.recoveryData.isVertexCommitted()) {
LOG.info("Vertex was already committed as per recovery" + " data, vertex=" + vertex.logIdentifier);
return vertex.finished(VertexState.SUCCEEDED);
}
boolean firstCommit = true;
for (Entry<String, OutputCommitter> entry : vertex.outputCommitters.entrySet()) {
final OutputCommitter committer = entry.getValue();
final String outputName = entry.getKey();
if (vertex.sharedOutputs.contains(outputName)) {
// dont commit shared committers. Will be committed by the DAG
continue;
}
if (firstCommit) {
LOG.info("Invoking committer commit for vertex, vertexId=" + vertex.logIdentifier);
// Log commit start event on first actual commit
try {
vertex.appContext.getHistoryHandler().handleCriticalEvent(new DAGHistoryEvent(vertex.getDAGId(), new VertexCommitStartedEvent(vertex.vertexId, vertex.clock.getTime())));
} catch (IOException e) {
LOG.error("Failed to persist commit start event to recovery, vertex=" + vertex.logIdentifier, e);
vertex.trySetTerminationCause(VertexTerminationCause.RECOVERY_ERROR);
return vertex.finished(VertexState.FAILED);
}
firstCommit = false;
}
VertexCommitCallback commitCallback = new VertexCommitCallback(vertex, outputName);
CallableEvent commitCallableEvent = new CallableEvent(commitCallback) {
@Override
public Void call() throws Exception {
try {
TezUtilsInternal.setHadoopCallerContext(vertex.appContext.getHadoopShim(), vertex.vertexId);
vertex.dagUgi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
LOG.info("Invoking committer commit for output=" + outputName + ", vertexId=" + vertex.logIdentifier);
committer.commitOutput();
return null;
}
});
} finally {
vertex.appContext.getHadoopShim().clearHadoopCallerContext();
}
return null;
}
};
ListenableFuture<Void> commitFuture = vertex.getAppContext().getExecService().submit(commitCallableEvent);
Futures.addCallback(commitFuture, commitCallableEvent.getCallback());
vertex.commitFutures.put(outputName, commitFuture);
}
}
if (vertex.commitFutures.isEmpty()) {
return vertex.finished(VertexState.SUCCEEDED);
} else {
return VertexState.COMMITTING;
}
}
use of org.apache.tez.dag.history.DAGHistoryEvent in project tez by apache.
the class VertexImpl method logVertexConfigurationDoneEvent.
void logVertexConfigurationDoneEvent() {
if (recoveryData == null || !recoveryData.shouldSkipInit()) {
Map<String, EdgeProperty> sourceEdgeProperties = new HashMap<String, EdgeProperty>();
for (Map.Entry<Vertex, Edge> entry : this.sourceVertices.entrySet()) {
sourceEdgeProperties.put(entry.getKey().getName(), entry.getValue().getEdgeProperty());
}
VertexConfigurationDoneEvent reconfigureDoneEvent = new VertexConfigurationDoneEvent(vertexId, clock.getTime(), numTasks, taskLocationHints == null ? null : VertexLocationHint.create(Lists.newArrayList(taskLocationHints)), sourceEdgeProperties, rootInputSpecs, setParallelismCalledFlag);
this.appContext.getHistoryHandler().handle(new DAGHistoryEvent(getDAGId(), reconfigureDoneEvent));
}
}
use of org.apache.tez.dag.history.DAGHistoryEvent in project tez by apache.
the class TaskAttemptImpl method logJobHistoryAttemptUnsuccesfulCompletion.
protected void logJobHistoryAttemptUnsuccesfulCompletion(TaskAttemptState state, TaskFailureType taskFailureType) {
Preconditions.checkArgument(recoveryData == null || recoveryData.getTaskAttemptFinishedEvent() == null, "log TaskAttemptFinishedEvent again in recovery when there's already another TaskAtttemptFinishedEvent");
if (state == TaskAttemptState.FAILED && taskFailureType == null) {
throw new IllegalStateException("FAILED state must be accompanied by a FailureType");
}
long finishTime = getFinishTime();
ContainerId unsuccessfulContainerId = null;
NodeId unsuccessfulContainerNodeId = null;
String inProgressLogsUrl = null;
String completedLogsUrl = null;
if (finishTime <= 0) {
// comes here in case it was terminated before launch
finishTime = clock.getTime();
unsuccessfulContainerId = containerId;
unsuccessfulContainerNodeId = containerNodeId;
inProgressLogsUrl = getInProgressLogsUrl();
completedLogsUrl = getCompletedLogsUrl();
}
TaskAttemptFinishedEvent finishEvt = new TaskAttemptFinishedEvent(attemptId, getVertex().getName(), getLaunchTime(), finishTime, state, taskFailureType, terminationCause, StringUtils.join(getDiagnostics(), LINE_SEPARATOR), getCounters(), lastDataEvents, taGeneratedEvents, creationTime, creationCausalTA, allocationTime, unsuccessfulContainerId, unsuccessfulContainerNodeId, inProgressLogsUrl, completedLogsUrl, nodeHttpAddress);
// FIXME how do we store information regd completion events
this.appContext.getHistoryHandler().handle(new DAGHistoryEvent(getDAGID(), finishEvt));
}
Aggregations