use of org.apache.tez.dag.history.events.DAGCommitStartedEvent in project tez by apache.
the class TestHistoryEventJsonConversion method testHandlerExists.
@Test(timeout = 5000)
public void testHandlerExists() throws JSONException {
for (HistoryEventType eventType : HistoryEventType.values()) {
HistoryEvent event = null;
switch(eventType) {
case APP_LAUNCHED:
event = new AppLaunchedEvent(applicationId, random.nextInt(), random.nextInt(), user, new Configuration(false), null);
break;
case AM_LAUNCHED:
event = new AMLaunchedEvent(applicationAttemptId, random.nextInt(), random.nextInt(), user);
break;
case AM_STARTED:
event = new AMStartedEvent(applicationAttemptId, random.nextInt(), user);
break;
case DAG_SUBMITTED:
event = new DAGSubmittedEvent(tezDAGID, random.nextInt(), dagPlan, applicationAttemptId, null, user, null, null, "Q_" + eventType.name());
break;
case DAG_INITIALIZED:
event = new DAGInitializedEvent(tezDAGID, random.nextInt(), user, dagPlan.getName(), null);
break;
case DAG_STARTED:
event = new DAGStartedEvent(tezDAGID, random.nextInt(), user, dagPlan.getName());
break;
case DAG_FINISHED:
event = new DAGFinishedEvent(tezDAGID, random.nextInt(), random.nextInt(), DAGState.ERROR, null, null, user, dagPlan.getName(), null, applicationAttemptId, dagPlan);
break;
case VERTEX_INITIALIZED:
event = new VertexInitializedEvent(tezVertexID, "v1", random.nextInt(), random.nextInt(), random.nextInt(), "proc", null, null, null);
break;
case VERTEX_STARTED:
event = new VertexStartedEvent(tezVertexID, random.nextInt(), random.nextInt());
break;
case VERTEX_CONFIGURE_DONE:
event = new VertexConfigurationDoneEvent(tezVertexID, 0L, 1, null, null, null, true);
break;
case VERTEX_FINISHED:
event = new VertexFinishedEvent(tezVertexID, "v1", 1, random.nextInt(), random.nextInt(), random.nextInt(), random.nextInt(), random.nextInt(), VertexState.ERROR, null, null, null, null, null);
break;
case TASK_STARTED:
event = new TaskStartedEvent(tezTaskID, "v1", random.nextInt(), random.nextInt());
break;
case TASK_FINISHED:
event = new TaskFinishedEvent(tezTaskID, "v1", random.nextInt(), random.nextInt(), tezTaskAttemptID, TaskState.FAILED, null, null, 0);
break;
case TASK_ATTEMPT_STARTED:
event = new TaskAttemptStartedEvent(tezTaskAttemptID, "v1", random.nextInt(), containerId, nodeId, null, null, "nodeHttpAddress");
break;
case TASK_ATTEMPT_FINISHED:
event = new TaskAttemptFinishedEvent(tezTaskAttemptID, "v1", random.nextInt(), random.nextInt(), TaskAttemptState.KILLED, null, TaskAttemptTerminationCause.TERMINATED_BY_CLIENT, null, null, null, null, 0, null, 0, containerId, nodeId, null, null, "nodeHttpAddress");
break;
case CONTAINER_LAUNCHED:
event = new ContainerLaunchedEvent(containerId, random.nextInt(), applicationAttemptId);
break;
case CONTAINER_STOPPED:
event = new ContainerStoppedEvent(containerId, random.nextInt(), -1, applicationAttemptId);
break;
case DAG_COMMIT_STARTED:
event = new DAGCommitStartedEvent();
break;
case VERTEX_COMMIT_STARTED:
event = new VertexCommitStartedEvent();
break;
case VERTEX_GROUP_COMMIT_STARTED:
event = new VertexGroupCommitStartedEvent();
break;
case VERTEX_GROUP_COMMIT_FINISHED:
event = new VertexGroupCommitFinishedEvent();
break;
case DAG_RECOVERED:
event = new DAGRecoveredEvent(applicationAttemptId, tezDAGID, dagPlan.getName(), user, 1l, null);
break;
case DAG_KILL_REQUEST:
event = new DAGKillRequestEvent();
break;
default:
Assert.fail("Unhandled event type " + eventType);
}
if (event == null || !event.isHistoryEvent()) {
continue;
}
JSONObject json = HistoryEventJsonConversion.convertToJson(event);
if (eventType == HistoryEventType.DAG_SUBMITTED) {
try {
Assert.assertEquals("Q_" + eventType.name(), json.getJSONObject(ATSConstants.OTHER_INFO).getString(ATSConstants.DAG_QUEUE_NAME));
Assert.assertEquals("Q_" + eventType.name(), json.getJSONObject(ATSConstants.PRIMARY_FILTERS).getString(ATSConstants.DAG_QUEUE_NAME));
} catch (JSONException ex) {
Assert.fail("Exception: " + ex.getMessage() + " for type: " + eventType);
}
}
}
}
use of org.apache.tez.dag.history.events.DAGCommitStartedEvent in project tez by apache.
the class DAGImpl method commitOrFinish.
// either commit when all vertices are completed or just finish if there's no committer
private synchronized DAGState commitOrFinish() {
// commit all other outputs
// we come here for successful dag completion and when outputs need to be
// committed at the end for all or none visibility
Map<OutputKey, CallableEvent> commitEvents = new HashMap<OutputKey, CallableEvent>();
// commit all shared outputs
for (final VertexGroupInfo groupInfo : vertexGroups.values()) {
if (!groupInfo.outputs.isEmpty()) {
groupInfo.commitStarted = true;
final Vertex v = getVertex(groupInfo.groupMembers.iterator().next());
try {
TezUtilsInternal.setHadoopCallerContext(appContext.getHadoopShim(), v.getVertexId());
for (final String outputName : groupInfo.outputs) {
final OutputKey outputKey = new OutputKey(outputName, groupInfo.groupName, true);
CommitCallback groupCommitCallback = new CommitCallback(outputKey);
CallableEvent groupCommitCallableEvent = new CallableEvent(groupCommitCallback) {
@Override
public Void call() throws Exception {
OutputCommitter committer = v.getOutputCommitters().get(outputName);
LOG.info("Committing output: " + outputKey);
commitOutput(committer);
return null;
}
};
commitEvents.put(outputKey, groupCommitCallableEvent);
}
} finally {
appContext.getHadoopShim().clearHadoopCallerContext();
}
}
}
for (final Vertex vertex : vertices.values()) {
if (vertex.getOutputCommitters() == null) {
LOG.info("No output committers for vertex: " + vertex.getLogIdentifier());
continue;
}
Map<String, OutputCommitter> outputCommitters = new HashMap<String, OutputCommitter>(vertex.getOutputCommitters());
Set<String> sharedOutputs = vertex.getSharedOutputs();
// remove shared outputs
if (sharedOutputs != null) {
Iterator<Map.Entry<String, OutputCommitter>> iter = outputCommitters.entrySet().iterator();
while (iter.hasNext()) {
if (sharedOutputs.contains(iter.next().getKey())) {
iter.remove();
}
}
}
if (outputCommitters.isEmpty()) {
LOG.info("No exclusive output committers for vertex: " + vertex.getLogIdentifier());
continue;
}
try {
TezUtilsInternal.setHadoopCallerContext(appContext.getHadoopShim(), vertex.getVertexId());
for (final Map.Entry<String, OutputCommitter> entry : outputCommitters.entrySet()) {
if (vertex.getState() != VertexState.SUCCEEDED) {
throw new TezUncheckedException("Vertex: " + vertex.getLogIdentifier() + " not in SUCCEEDED state. State= " + vertex.getState());
}
OutputKey outputKey = new OutputKey(entry.getKey(), vertex.getName(), false);
CommitCallback commitCallback = new CommitCallback(outputKey);
CallableEvent commitCallableEvent = new CallableEvent(commitCallback) {
@Override
public Void call() throws Exception {
LOG.info("Committing output: " + entry.getKey() + " for vertex: " + vertex.getLogIdentifier() + ", outputName: " + entry.getKey());
commitOutput(entry.getValue());
return null;
}
};
commitEvents.put(outputKey, commitCallableEvent);
}
} finally {
appContext.getHadoopShim().clearHadoopCallerContext();
}
}
if (!commitEvents.isEmpty()) {
try {
LOG.info("Start writing dag commit event, " + getID());
appContext.getHistoryHandler().handleCriticalEvent(new DAGHistoryEvent(getID(), new DAGCommitStartedEvent(getID(), clock.getTime())));
} catch (IOException e) {
LOG.error("Failed to send commit event to history/recovery handler", e);
trySetTerminationCause(DAGTerminationCause.RECOVERY_FAILURE);
return finished(DAGState.FAILED);
}
for (Map.Entry<OutputKey, CallableEvent> entry : commitEvents.entrySet()) {
ListenableFuture<Void> commitFuture = appContext.getExecService().submit(entry.getValue());
Futures.addCallback(commitFuture, entry.getValue().getCallback());
commitFutures.put(entry.getKey(), commitFuture);
}
}
if (commitFutures.isEmpty()) {
// no commit needs to be done
return finished(DAGState.SUCCEEDED);
} else {
return DAGState.COMMITTING;
}
}
use of org.apache.tez.dag.history.events.DAGCommitStartedEvent in project tez by apache.
the class RecoveryParser method getNextEvent.
private static HistoryEvent getNextEvent(FSDataInputStream inputStream) throws IOException {
int eventTypeOrdinal = -1;
try {
eventTypeOrdinal = inputStream.readInt();
} catch (EOFException eof) {
return null;
}
if (eventTypeOrdinal < 0 || eventTypeOrdinal >= HistoryEventType.values().length) {
// reached end
throw new IOException("Corrupt data found when trying to read next event type" + ", eventTypeOrdinal=" + eventTypeOrdinal);
}
HistoryEventType eventType = HistoryEventType.values()[eventTypeOrdinal];
HistoryEvent event;
switch(eventType) {
case AM_LAUNCHED:
event = new AMLaunchedEvent();
break;
case AM_STARTED:
event = new AMStartedEvent();
break;
case DAG_SUBMITTED:
event = new DAGSubmittedEvent();
break;
case DAG_INITIALIZED:
event = new DAGInitializedEvent();
break;
case DAG_STARTED:
event = new DAGStartedEvent();
break;
case DAG_COMMIT_STARTED:
event = new DAGCommitStartedEvent();
break;
case DAG_FINISHED:
event = new DAGFinishedEvent();
break;
case DAG_KILL_REQUEST:
event = new DAGKillRequestEvent();
break;
case CONTAINER_LAUNCHED:
event = new ContainerLaunchedEvent();
break;
case CONTAINER_STOPPED:
event = new ContainerStoppedEvent();
break;
case VERTEX_INITIALIZED:
event = new VertexInitializedEvent();
break;
case VERTEX_CONFIGURE_DONE:
event = new VertexConfigurationDoneEvent();
break;
case VERTEX_STARTED:
event = new VertexStartedEvent();
break;
case VERTEX_COMMIT_STARTED:
event = new VertexCommitStartedEvent();
break;
case VERTEX_GROUP_COMMIT_STARTED:
event = new VertexGroupCommitStartedEvent();
break;
case VERTEX_GROUP_COMMIT_FINISHED:
event = new VertexGroupCommitFinishedEvent();
break;
case VERTEX_FINISHED:
event = new VertexFinishedEvent();
break;
case TASK_STARTED:
event = new TaskStartedEvent();
break;
case TASK_FINISHED:
event = new TaskFinishedEvent();
break;
case TASK_ATTEMPT_STARTED:
event = new TaskAttemptStartedEvent();
break;
case TASK_ATTEMPT_FINISHED:
event = new TaskAttemptFinishedEvent();
break;
default:
throw new IOException("Invalid data found, unknown event type " + eventType);
}
try {
event.fromProtoStream(inputStream);
} catch (EOFException eof) {
return null;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Parsed event from input stream" + ", eventType=" + eventType + ", event=" + event.toString());
}
return event;
}
Aggregations