Search in sources :

Example 1 with RecoveryService

use of org.apache.tez.dag.history.recovery.RecoveryService in project tez by apache.

the class TestRecoveryParser method testRecoverableSummary_VertexGroupInCommitting.

@Test(timeout = 5000)
public void testRecoverableSummary_VertexGroupInCommitting() throws IOException {
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/1"));
    when(appContext.getClock()).thenReturn(new SystemClock());
    when(mockDAGImpl.getID()).thenReturn(dagID);
    when(appContext.getHadoopShim()).thenReturn(new DefaultHadoopShim());
    when(appContext.getApplicationID()).thenReturn(appId);
    RecoveryService rService = new RecoveryService(appContext);
    Configuration conf = new Configuration();
    conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true);
    rService.init(conf);
    rService.start();
    DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan();
    // write a DAGSubmittedEvent first to initialize summaryStream
    rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null)));
    // It should be fine to skip other events, just for testing.
    rService.handle(new DAGHistoryEvent(dagID, new VertexGroupCommitStartedEvent(dagID, "group_1", Lists.newArrayList(TezVertexID.getInstance(dagID, 0), TezVertexID.getInstance(dagID, 1)), 0L)));
    rService.stop();
    DAGRecoveryData dagData = parser.parseRecoveryData();
    assertEquals(dagID, dagData.recoveredDagID);
    assertTrue(dagData.nonRecoverable);
    assertTrue(dagData.reason.contains("Vertex Group Commit was in progress"));
}
Also used : Path(org.apache.hadoop.fs.Path) RecoveryService(org.apache.tez.dag.history.recovery.RecoveryService) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) VertexGroupCommitStartedEvent(org.apache.tez.dag.history.events.VertexGroupCommitStartedEvent) DAGHistoryEvent(org.apache.tez.dag.history.DAGHistoryEvent) DefaultHadoopShim(org.apache.tez.hadoop.shim.DefaultHadoopShim) DAGPlan(org.apache.tez.dag.api.records.DAGProtos.DAGPlan) TezDAGID(org.apache.tez.dag.records.TezDAGID) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) DAGRecoveryData(org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData) DAGSubmittedEvent(org.apache.tez.dag.history.events.DAGSubmittedEvent)

Example 2 with RecoveryService

use of org.apache.tez.dag.history.recovery.RecoveryService in project tez by apache.

the class TestRecoveryParser method testRecoverableSummary_DAGInCommitting.

@Test(timeout = 5000)
public void testRecoverableSummary_DAGInCommitting() throws IOException {
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/1"));
    when(appContext.getClock()).thenReturn(new SystemClock());
    when(mockDAGImpl.getID()).thenReturn(dagID);
    RecoveryService rService = new RecoveryService(appContext);
    Configuration conf = new Configuration();
    conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true);
    rService.init(conf);
    rService.start();
    DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan();
    // write a DAGSubmittedEvent first to initialize summaryStream
    rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null)));
    // It should be fine to skip other events, just for testing.
    rService.handle(new DAGHistoryEvent(dagID, new DAGCommitStartedEvent(dagID, 0L)));
    rService.stop();
    DAGRecoveryData dagData = parser.parseRecoveryData();
    assertEquals(dagID, dagData.recoveredDagID);
    assertTrue(dagData.nonRecoverable);
    assertTrue(dagData.reason.contains("DAG Commit was in progress"));
}
Also used : Path(org.apache.hadoop.fs.Path) DAGCommitStartedEvent(org.apache.tez.dag.history.events.DAGCommitStartedEvent) RecoveryService(org.apache.tez.dag.history.recovery.RecoveryService) DAGPlan(org.apache.tez.dag.api.records.DAGProtos.DAGPlan) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) TezDAGID(org.apache.tez.dag.records.TezDAGID) DAGHistoryEvent(org.apache.tez.dag.history.DAGHistoryEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) DAGRecoveryData(org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData) DAGSubmittedEvent(org.apache.tez.dag.history.events.DAGSubmittedEvent)

Example 3 with RecoveryService

use of org.apache.tez.dag.history.recovery.RecoveryService in project tez by apache.

the class TestRecoveryParser method testSkipAllOtherEvents_1.

// skipAllOtherEvents due to non-recoverable (in the middle of commit)
@Test(timeout = 5000)
public void testSkipAllOtherEvents_1() throws IOException {
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/1"));
    when(appContext.getClock()).thenReturn(new SystemClock());
    DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan();
    // write data in attempt_1
    RecoveryService rService = new RecoveryService(appContext);
    Configuration conf = new Configuration();
    conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true);
    rService.init(conf);
    rService.start();
    rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null)));
    rService.handle(new DAGHistoryEvent(dagID, new DAGInitializedEvent(dagID, 1L, "user", dagPlan.getName(), null)));
    // only for testing, DAGCommitStartedEvent is not supposed to happen at this time.
    rService.handle(new DAGHistoryEvent(dagID, new DAGCommitStartedEvent(dagID, System.currentTimeMillis())));
    rService.stop();
    // write data in attempt_2
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/2"));
    rService = new RecoveryService(appContext);
    rService.init(conf);
    rService.start();
    // only for testing, DAGStartedEvent is not supposed to happen at this time.
    rService.handle(new DAGHistoryEvent(dagID, new DAGStartedEvent(dagID, 1L, "user", "dag1")));
    rService.stop();
    DAGRecoveryData dagData = parser.parseRecoveryData();
    assertEquals(true, dagData.nonRecoverable);
    assertTrue(dagData.reason.contains("DAG Commit was in progress, not recoverable,"));
    // DAGSubmittedEvent is handled but DAGInitializedEvent and DAGStartedEvent in the next attempt are both skipped
    // due to the dag is not recoerable.
    verify(mockAppMaster).createDAG(any(DAGPlan.class), any(TezDAGID.class));
    assertNull(dagData.getDAGInitializedEvent());
    assertNull(dagData.getDAGStartedEvent());
}
Also used : Path(org.apache.hadoop.fs.Path) DAGCommitStartedEvent(org.apache.tez.dag.history.events.DAGCommitStartedEvent) RecoveryService(org.apache.tez.dag.history.recovery.RecoveryService) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) DAGHistoryEvent(org.apache.tez.dag.history.DAGHistoryEvent) DAGInitializedEvent(org.apache.tez.dag.history.events.DAGInitializedEvent) DAGPlan(org.apache.tez.dag.api.records.DAGProtos.DAGPlan) TezDAGID(org.apache.tez.dag.records.TezDAGID) DAGStartedEvent(org.apache.tez.dag.history.events.DAGStartedEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) DAGRecoveryData(org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData) DAGSubmittedEvent(org.apache.tez.dag.history.events.DAGSubmittedEvent)

Example 4 with RecoveryService

use of org.apache.tez.dag.history.recovery.RecoveryService in project tez by apache.

the class TestRecoveryParser method testRecoverableSummary_VertexFinishCommitting.

@Test(timeout = 5000)
public void testRecoverableSummary_VertexFinishCommitting() throws IOException {
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/1"));
    when(appContext.getClock()).thenReturn(new SystemClock());
    when(mockDAGImpl.getID()).thenReturn(dagID);
    when(appContext.getHadoopShim()).thenReturn(new DefaultHadoopShim());
    when(appContext.getApplicationID()).thenReturn(appId);
    RecoveryService rService = new RecoveryService(appContext);
    Configuration conf = new Configuration();
    conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true);
    rService.init(conf);
    rService.start();
    DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan();
    // write a DAGSubmittedEvent first to initialize summaryStream
    rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null)));
    // It should be fine to skip other events, just for testing.
    TezVertexID vertexId = TezVertexID.getInstance(dagID, 0);
    rService.handle(new DAGHistoryEvent(dagID, new VertexCommitStartedEvent(vertexId, 0L)));
    rService.handle(new DAGHistoryEvent(dagID, new VertexFinishedEvent(vertexId, "v1", 10, 0L, 0L, 0L, 0L, 0L, VertexState.SUCCEEDED, "", null, null, null, null)));
    rService.stop();
    DAGRecoveryData dagData = parser.parseRecoveryData();
    assertEquals(dagID, dagData.recoveredDagID);
    assertFalse(dagData.nonRecoverable);
}
Also used : Path(org.apache.hadoop.fs.Path) RecoveryService(org.apache.tez.dag.history.recovery.RecoveryService) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) DAGHistoryEvent(org.apache.tez.dag.history.DAGHistoryEvent) DefaultHadoopShim(org.apache.tez.hadoop.shim.DefaultHadoopShim) DAGPlan(org.apache.tez.dag.api.records.DAGProtos.DAGPlan) TezDAGID(org.apache.tez.dag.records.TezDAGID) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) DAGRecoveryData(org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData) TezVertexID(org.apache.tez.dag.records.TezVertexID) VertexFinishedEvent(org.apache.tez.dag.history.events.VertexFinishedEvent) DAGSubmittedEvent(org.apache.tez.dag.history.events.DAGSubmittedEvent) VertexCommitStartedEvent(org.apache.tez.dag.history.events.VertexCommitStartedEvent)

Example 5 with RecoveryService

use of org.apache.tez.dag.history.recovery.RecoveryService in project tez by apache.

the class TestRecoveryParser method testSkipAllOtherEvents_2.

// skipAllOtherEvents due to dag finished
@Test(timeout = 5000)
public void testSkipAllOtherEvents_2() throws IOException {
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/1"));
    when(appContext.getClock()).thenReturn(new SystemClock());
    DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan();
    // write data in attempt_1
    RecoveryService rService = new RecoveryService(appContext);
    Configuration conf = new Configuration();
    conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true);
    rService.init(conf);
    rService.start();
    rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null)));
    rService.handle(new DAGHistoryEvent(dagID, new DAGInitializedEvent(dagID, 1L, "user", dagPlan.getName(), null)));
    rService.handle(new DAGHistoryEvent(dagID, new DAGFinishedEvent(dagID, 1L, 2L, DAGState.FAILED, "diag", null, "user", "dag1", null, appAttemptId, dagPlan)));
    rService.handle(new DAGHistoryEvent(dagID, new DAGStartedEvent(dagID, 1L, "user", "dag1")));
    rService.stop();
    // write data in attempt_2
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/2"));
    rService = new RecoveryService(appContext);
    rService.init(conf);
    rService.start();
    rService.handle(new DAGHistoryEvent(dagID, new DAGStartedEvent(dagID, 1L, "user", "dag1")));
    rService.stop();
    DAGRecoveryData dagData = parser.parseRecoveryData();
    assertEquals(false, dagData.nonRecoverable);
    assertEquals(DAGState.FAILED, dagData.dagState);
    assertEquals(true, dagData.isCompleted);
    // DAGSubmittedEvent, DAGInitializedEvent and DAGFinishedEvent is handled
    verify(mockAppMaster).createDAG(any(DAGPlan.class), any(TezDAGID.class));
    // DAGInitializedEvent may not been handled before DAGFinishedEvent,
    // because DAGFinishedEvent's writeToRecoveryImmediately is true
    assertNotNull(dagData.getDAGFinishedEvent());
    assertNull(dagData.getDAGStartedEvent());
}
Also used : Path(org.apache.hadoop.fs.Path) RecoveryService(org.apache.tez.dag.history.recovery.RecoveryService) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) DAGHistoryEvent(org.apache.tez.dag.history.DAGHistoryEvent) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) DAGInitializedEvent(org.apache.tez.dag.history.events.DAGInitializedEvent) DAGPlan(org.apache.tez.dag.api.records.DAGProtos.DAGPlan) TezDAGID(org.apache.tez.dag.records.TezDAGID) DAGStartedEvent(org.apache.tez.dag.history.events.DAGStartedEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) DAGFinishedEvent(org.apache.tez.dag.history.events.DAGFinishedEvent) DAGRecoveryData(org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData) DAGSubmittedEvent(org.apache.tez.dag.history.events.DAGSubmittedEvent)

Aggregations

Configuration (org.apache.hadoop.conf.Configuration)11 Path (org.apache.hadoop.fs.Path)11 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)11 SystemClock (org.apache.hadoop.yarn.util.SystemClock)11 DAGPlan (org.apache.tez.dag.api.records.DAGProtos.DAGPlan)11 DAGHistoryEvent (org.apache.tez.dag.history.DAGHistoryEvent)11 DAGSubmittedEvent (org.apache.tez.dag.history.events.DAGSubmittedEvent)11 RecoveryService (org.apache.tez.dag.history.recovery.RecoveryService)11 TezDAGID (org.apache.tez.dag.records.TezDAGID)11 DAGRecoveryData (org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData)10 DefaultHadoopShim (org.apache.tez.hadoop.shim.DefaultHadoopShim)5 DAGInitializedEvent (org.apache.tez.dag.history.events.DAGInitializedEvent)4 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)3 DAGCommitStartedEvent (org.apache.tez.dag.history.events.DAGCommitStartedEvent)3 DAGStartedEvent (org.apache.tez.dag.history.events.DAGStartedEvent)3 VertexFinishedEvent (org.apache.tez.dag.history.events.VertexFinishedEvent)3 TezVertexID (org.apache.tez.dag.records.TezVertexID)3 DAGFinishedEvent (org.apache.tez.dag.history.events.DAGFinishedEvent)2 VertexCommitStartedEvent (org.apache.tez.dag.history.events.VertexCommitStartedEvent)2 VertexGroupCommitStartedEvent (org.apache.tez.dag.history.events.VertexGroupCommitStartedEvent)2