use of org.apache.tez.dag.history.HistoryEvent in project tez by apache.
the class TestAMRecovery method testVertexPartiallyFinished_Broadcast.
/**
* Fine-grained recovery task-level, In a vertex (v1), task 0 is done task 1
* is not started. History flush happens. AM dies. Once AM is recovered, task 0 is
* not re-run. Task 1 is re-run. (Broadcast)
*
* @throws Exception
*/
@Test(timeout = 120000)
public void testVertexPartiallyFinished_Broadcast() throws Exception {
DAG dag = createDAG("VertexPartiallyFinished_Broadcast", ControlledImmediateStartVertexManager.class, DataMovementType.BROADCAST, true);
TezCounters counters = runDAGAndVerify(dag, DAGStatus.State.SUCCEEDED);
assertEquals(4, counters.findCounter(DAGCounter.NUM_SUCCEEDED_TASKS).getValue());
assertEquals(2, counters.findCounter(TestCounter.Counter_1).getValue());
List<HistoryEvent> historyEvents1 = readRecoveryLog(1);
List<HistoryEvent> historyEvents2 = readRecoveryLog(2);
printHistoryEvents(historyEvents1, 1);
printHistoryEvents(historyEvents1, 2);
// task_0 of v1 is finished in attempt 1, task_1 of v1 is not finished in
// attempt 1
assertEquals(1, findTaskAttemptFinishedEvent(historyEvents1, 0, 0).size());
assertEquals(0, findTaskAttemptFinishedEvent(historyEvents1, 0, 1).size());
// task_0 of v1 is finished in attempt 1 and not rerun, task_1 of v1 is
// finished in attempt 2
assertEquals(1, findTaskAttemptFinishedEvent(historyEvents2, 0, 0).size());
assertEquals(1, findTaskAttemptFinishedEvent(historyEvents2, 0, 1).size());
}
use of org.apache.tez.dag.history.HistoryEvent in project tez by apache.
the class TestAMRecovery method printHistoryEvents.
private void printHistoryEvents(List<HistoryEvent> historyEvents, int attemptId) {
LOG.info("RecoveryLogs from attempt:" + attemptId);
for (HistoryEvent historyEvent : historyEvents) {
LOG.info("Parsed event from recovery stream" + ", eventType=" + historyEvent.getEventType() + ", event=" + historyEvent);
}
LOG.info("");
}
use of org.apache.tez.dag.history.HistoryEvent in project tez by apache.
the class TestATSHistoryV15 method testGetGroupId.
@Test
public void testGetGroupId() throws Exception {
ApplicationId appId = ApplicationId.newInstance(1000l, 1);
TezDAGID dagid = TezDAGID.getInstance(appId, 1);
for (final HistoryEventType eventType : HistoryEventType.values()) {
HistoryEvent historyEvent = new HistoryEvent() {
@Override
public HistoryEventType getEventType() {
return eventType;
}
@Override
public boolean isRecoveryEvent() {
return false;
}
@Override
public boolean isHistoryEvent() {
return false;
}
@Override
public void toProtoStream(OutputStream outputStream) throws IOException {
}
@Override
public void fromProtoStream(InputStream inputStream) throws IOException {
}
};
DAGHistoryEvent event = new DAGHistoryEvent(dagid, historyEvent);
ATSV15HistoryLoggingService service = new ATSV15HistoryLoggingService();
AppContext appContext = mock(AppContext.class);
when(appContext.getApplicationID()).thenReturn(appId);
when(appContext.getHadoopShim()).thenReturn(new HadoopShim() {
});
service.setAppContext(appContext);
TimelineEntityGroupId grpId = service.getGroupId(event);
Assert.assertNotNull(grpId);
Assert.assertEquals(appId, grpId.getApplicationId());
switch(eventType) {
case AM_LAUNCHED:
case APP_LAUNCHED:
case AM_STARTED:
case CONTAINER_LAUNCHED:
case CONTAINER_STOPPED:
Assert.assertEquals(appId.toString(), grpId.getTimelineEntityGroupId());
break;
default:
Assert.assertEquals(dagid.toString(), grpId.getTimelineEntityGroupId());
}
service.close();
}
}
Aggregations