Search in sources :

Example 1 with DefaultHadoopShim

use of org.apache.tez.hadoop.shim.DefaultHadoopShim in project tez by apache.

the class TestOnFileUnorderedKVOutput method createOutputContext.

private OutputContext createOutputContext(Configuration conf, TezSharedExecutor sharedExecutor) throws IOException {
    int appAttemptNumber = 1;
    TezUmbilical tezUmbilical = mock(TezUmbilical.class);
    String dagName = "currentDAG";
    String taskVertexName = "currentVertex";
    String destinationVertexName = "destinationVertex";
    TezDAGID dagID = TezDAGID.getInstance("2000", 1, 1);
    TezVertexID vertexID = TezVertexID.getInstance(dagID, 1);
    TezTaskID taskID = TezTaskID.getInstance(vertexID, 1);
    TezTaskAttemptID taskAttemptID = TezTaskAttemptID.getInstance(taskID, 1);
    UserPayload userPayload = TezUtils.createUserPayloadFromConf(conf);
    TaskSpec mockSpec = mock(TaskSpec.class);
    when(mockSpec.getInputs()).thenReturn(Collections.singletonList(mock(InputSpec.class)));
    when(mockSpec.getOutputs()).thenReturn(Collections.singletonList(mock(OutputSpec.class)));
    task = new LogicalIOProcessorRuntimeTask(mockSpec, appAttemptNumber, new Configuration(), new String[] { "/" }, tezUmbilical, null, null, null, null, "", null, 1024, false, new DefaultHadoopShim(), sharedExecutor);
    LogicalIOProcessorRuntimeTask runtimeTask = spy(task);
    Map<String, String> auxEnv = new HashMap<String, String>();
    ByteBuffer bb = ByteBuffer.allocate(4);
    bb.putInt(shufflePort);
    bb.position(0);
    AuxiliaryServiceHelper.setServiceDataIntoEnv(conf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID, TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT), bb, auxEnv);
    OutputDescriptor outputDescriptor = mock(OutputDescriptor.class);
    when(outputDescriptor.getClassName()).thenReturn("OutputDescriptor");
    OutputContext realOutputContext = new TezOutputContextImpl(conf, new String[] { workDir.toString() }, appAttemptNumber, tezUmbilical, dagName, taskVertexName, destinationVertexName, -1, taskAttemptID, 0, userPayload, runtimeTask, null, auxEnv, new MemoryDistributor(1, 1, conf), outputDescriptor, null, new ExecutionContextImpl("localhost"), 2048, new TezSharedExecutor(defaultConf));
    verify(runtimeTask, times(1)).addAndGetTezCounter(destinationVertexName);
    verify(runtimeTask, times(1)).getTaskStatistics();
    // verify output stats object got created
    Assert.assertTrue(task.getTaskStatistics().getIOStatistics().containsKey(destinationVertexName));
    OutputContext outputContext = spy(realOutputContext);
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            long requestedSize = (Long) invocation.getArguments()[0];
            MemoryUpdateCallbackHandler callback = (MemoryUpdateCallbackHandler) invocation.getArguments()[1];
            callback.memoryAssigned(requestedSize);
            return null;
        }
    }).when(outputContext).requestInitialMemory(anyLong(), any(MemoryUpdateCallback.class));
    return outputContext;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) TezConfiguration(org.apache.tez.dag.api.TezConfiguration) TezRuntimeConfiguration(org.apache.tez.runtime.library.api.TezRuntimeConfiguration) HashMap(java.util.HashMap) ByteString(com.google.protobuf.ByteString) DefaultHadoopShim(org.apache.tez.hadoop.shim.DefaultHadoopShim) OutputDescriptor(org.apache.tez.dag.api.OutputDescriptor) TezDAGID(org.apache.tez.dag.records.TezDAGID) MemoryUpdateCallbackHandler(org.apache.tez.runtime.library.common.MemoryUpdateCallbackHandler) MemoryUpdateCallback(org.apache.tez.runtime.api.MemoryUpdateCallback) TezVertexID(org.apache.tez.dag.records.TezVertexID) LogicalIOProcessorRuntimeTask(org.apache.tez.runtime.LogicalIOProcessorRuntimeTask) TezOutputContextImpl(org.apache.tez.runtime.api.impl.TezOutputContextImpl) UserPayload(org.apache.tez.dag.api.UserPayload) ExecutionContextImpl(org.apache.tez.runtime.api.impl.ExecutionContextImpl) TaskSpec(org.apache.tez.runtime.api.impl.TaskSpec) ByteBuffer(java.nio.ByteBuffer) TezTaskID(org.apache.tez.dag.records.TezTaskID) OutputContext(org.apache.tez.runtime.api.OutputContext) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TezSharedExecutor(org.apache.tez.common.TezSharedExecutor) TezUmbilical(org.apache.tez.runtime.api.impl.TezUmbilical) MemoryDistributor(org.apache.tez.runtime.common.resources.MemoryDistributor) TezTaskAttemptID(org.apache.tez.dag.records.TezTaskAttemptID)

Example 2 with DefaultHadoopShim

use of org.apache.tez.hadoop.shim.DefaultHadoopShim in project tez by apache.

the class TestVertexImpl method setupPostDagCreation.

@SuppressWarnings({ "unchecked", "rawtypes" })
public void setupPostDagCreation() throws TezException {
    String dagName = "dag0";
    // dispatcher may be created multiple times (setupPostDagCreation may be called multiples)
    if (dispatcher != null) {
        dispatcher.stop();
    }
    dispatcher = new DrainDispatcher();
    appContext = mock(AppContext.class);
    when(appContext.getHadoopShim()).thenReturn(new DefaultHadoopShim());
    when(appContext.getContainerLauncherName(anyInt())).thenReturn(TezConstants.getTezYarnServicePluginName());
    thh = mock(TaskHeartbeatHandler.class);
    historyEventHandler = mock(HistoryEventHandler.class);
    TaskSchedulerManager taskScheduler = mock(TaskSchedulerManager.class);
    UserGroupInformation ugi;
    try {
        ugi = UserGroupInformation.getCurrentUser();
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    DAG dag = mock(DAG.class);
    doReturn(ugi).when(dag).getDagUGI();
    doReturn(dagName).when(dag).getName();
    Map<String, LocalResource> localResources = new HashMap<>();
    for (PlanLocalResource planLR : dagPlan.getLocalResourceList()) {
        localResources.put(planLR.getName(), DagTypeConverters.convertPlanLocalResourceToLocalResource(planLR));
    }
    when(dag.getLocalResources()).thenReturn(localResources);
    doReturn(appAttemptId).when(appContext).getApplicationAttemptId();
    doReturn(appAttemptId.getApplicationId()).when(appContext).getApplicationID();
    doReturn(dag).when(appContext).getCurrentDAG();
    execService = mock(ListeningExecutorService.class);
    final ListenableFuture<Void> mockFuture = mock(ListenableFuture.class);
    Mockito.doAnswer(new Answer() {

        public ListenableFuture<Void> answer(InvocationOnMock invocation) {
            Object[] args = invocation.getArguments();
            CallableEvent e = (CallableEvent) args[0];
            dispatcher.getEventHandler().handle(e);
            return mockFuture;
        }
    }).when(execService).submit((Callable<Void>) any());
    MockClock clock = new MockClock();
    doReturn(execService).when(appContext).getExecService();
    doReturn(conf).when(appContext).getAMConf();
    doReturn(new Credentials()).when(dag).getCredentials();
    doReturn(DAGPlan.getDefaultInstance()).when(dag).getJobPlan();
    doReturn(dagId).when(appContext).getCurrentDAGID();
    doReturn(dagId).when(dag).getID();
    doReturn(taskScheduler).when(appContext).getTaskScheduler();
    doReturn(Resource.newInstance(102400, 60)).when(taskScheduler).getTotalResources(0);
    doReturn(historyEventHandler).when(appContext).getHistoryHandler();
    doReturn(dispatcher.getEventHandler()).when(appContext).getEventHandler();
    doReturn(clock).when(appContext).getClock();
    vertexGroups = Maps.newHashMap();
    for (PlanVertexGroupInfo groupInfo : dagPlan.getVertexGroupsList()) {
        vertexGroups.put(groupInfo.getGroupName(), new VertexGroupInfo(groupInfo));
    }
    // updateTracker may be created multiple times (setupPostDagCreation may be called multiples)
    if (updateTracker != null) {
        updateTracker.stop();
    }
    updateTracker = new StateChangeNotifierForTest(appContext.getCurrentDAG());
    setupVertices();
    when(dag.getVertex(any(TezVertexID.class))).thenAnswer(new Answer<Vertex>() {

        @Override
        public Vertex answer(InvocationOnMock invocation) throws Throwable {
            Object[] args = invocation.getArguments();
            if (args.length != 1) {
                return null;
            }
            TezVertexID vId = (TezVertexID) args[0];
            return vertexIdMap.get(vId);
        }
    });
    when(dag.getVertex(any(String.class))).thenAnswer(new Answer<Vertex>() {

        @Override
        public Vertex answer(InvocationOnMock invocation) throws Throwable {
            Object[] args = invocation.getArguments();
            if (args.length != 1) {
                return null;
            }
            String vId = (String) args[0];
            return vertices.get(vId);
        }
    });
    // TODO - this test logic is tightly linked to impl DAGImpl code.
    edges = new HashMap<String, Edge>();
    for (EdgePlan edgePlan : dagPlan.getEdgeList()) {
        EdgeProperty edgeProperty = DagTypeConverters.createEdgePropertyMapFromDAGPlan(edgePlan);
        edges.put(edgePlan.getId(), new Edge(edgeProperty, dispatcher.getEventHandler(), conf));
    }
    parseVertexEdges();
    for (Edge edge : edges.values()) {
        edge.initialize();
    }
    dispatcher.register(CallableEventType.class, new CallableEventDispatcher());
    taskAttemptEventDispatcher = new TaskAttemptEventDispatcher();
    dispatcher.register(TaskAttemptEventType.class, taskAttemptEventDispatcher);
    taskEventDispatcher = new TaskEventDispatcher();
    dispatcher.register(TaskEventType.class, taskEventDispatcher);
    vertexEventDispatcher = new VertexEventDispatcher();
    dispatcher.register(VertexEventType.class, vertexEventDispatcher);
    dagEventDispatcher = new DagEventDispatcher();
    dispatcher.register(DAGEventType.class, dagEventDispatcher);
    amSchedulerEventDispatcher = new AMSchedulerEventDispatcher();
    dispatcher.register(AMSchedulerEventType.class, amSchedulerEventDispatcher);
    dispatcher.init(conf);
    dispatcher.start();
}
Also used : DrainDispatcher(org.apache.tez.common.DrainDispatcher) Vertex(org.apache.tez.dag.app.dag.Vertex) HashMap(java.util.HashMap) ByteString(com.google.protobuf.ByteString) DefaultHadoopShim(org.apache.tez.hadoop.shim.DefaultHadoopShim) HistoryEventHandler(org.apache.tez.dag.history.HistoryEventHandler) CallableEvent(org.apache.tez.dag.app.dag.event.CallableEvent) PlanVertexGroupInfo(org.apache.tez.dag.api.records.DAGProtos.PlanVertexGroupInfo) StateChangeNotifierForTest(org.apache.tez.dag.app.dag.TestStateChangeNotifier.StateChangeNotifierForTest) PlanVertexGroupInfo(org.apache.tez.dag.api.records.DAGProtos.PlanVertexGroupInfo) VertexGroupInfo(org.apache.tez.dag.app.dag.impl.DAGImpl.VertexGroupInfo) TaskHeartbeatHandler(org.apache.tez.dag.app.TaskHeartbeatHandler) TezVertexID(org.apache.tez.dag.records.TezVertexID) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation) AppContext(org.apache.tez.dag.app.AppContext) IOException(java.io.IOException) DAG(org.apache.tez.dag.app.dag.DAG) PlanLocalResource(org.apache.tez.dag.api.records.DAGProtos.PlanLocalResource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) Answer(org.mockito.stubbing.Answer) PlanLocalResource(org.apache.tez.dag.api.records.DAGProtos.PlanLocalResource) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TaskSchedulerManager(org.apache.tez.dag.app.rm.TaskSchedulerManager) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) EdgeProperty(org.apache.tez.dag.api.EdgeProperty) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) MockClock(org.apache.tez.dag.app.MockClock) Credentials(org.apache.hadoop.security.Credentials) EdgePlan(org.apache.tez.dag.api.records.DAGProtos.EdgePlan)

Example 3 with DefaultHadoopShim

use of org.apache.tez.hadoop.shim.DefaultHadoopShim in project tez by apache.

the class TestRecoveryParser method testRecoverableSummary_VertexGroupInCommitting.

@Test(timeout = 5000)
public void testRecoverableSummary_VertexGroupInCommitting() throws IOException {
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/1"));
    when(appContext.getClock()).thenReturn(new SystemClock());
    when(mockDAGImpl.getID()).thenReturn(dagID);
    when(appContext.getHadoopShim()).thenReturn(new DefaultHadoopShim());
    when(appContext.getApplicationID()).thenReturn(appId);
    RecoveryService rService = new RecoveryService(appContext);
    Configuration conf = new Configuration();
    conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true);
    rService.init(conf);
    rService.start();
    DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan();
    // write a DAGSubmittedEvent first to initialize summaryStream
    rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null)));
    // It should be fine to skip other events, just for testing.
    rService.handle(new DAGHistoryEvent(dagID, new VertexGroupCommitStartedEvent(dagID, "group_1", Lists.newArrayList(TezVertexID.getInstance(dagID, 0), TezVertexID.getInstance(dagID, 1)), 0L)));
    rService.stop();
    DAGRecoveryData dagData = parser.parseRecoveryData();
    assertEquals(dagID, dagData.recoveredDagID);
    assertTrue(dagData.nonRecoverable);
    assertTrue(dagData.reason.contains("Vertex Group Commit was in progress"));
}
Also used : Path(org.apache.hadoop.fs.Path) RecoveryService(org.apache.tez.dag.history.recovery.RecoveryService) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) VertexGroupCommitStartedEvent(org.apache.tez.dag.history.events.VertexGroupCommitStartedEvent) DAGHistoryEvent(org.apache.tez.dag.history.DAGHistoryEvent) DefaultHadoopShim(org.apache.tez.hadoop.shim.DefaultHadoopShim) DAGPlan(org.apache.tez.dag.api.records.DAGProtos.DAGPlan) TezDAGID(org.apache.tez.dag.records.TezDAGID) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) DAGRecoveryData(org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData) DAGSubmittedEvent(org.apache.tez.dag.history.events.DAGSubmittedEvent)

Example 4 with DefaultHadoopShim

use of org.apache.tez.hadoop.shim.DefaultHadoopShim in project tez by apache.

the class TestRecoveryParser method testRecoverableNonSummary1.

@Test(timeout = 5000)
public void testRecoverableNonSummary1() throws IOException {
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/1"));
    when(appContext.getClock()).thenReturn(new SystemClock());
    when(mockDAGImpl.getID()).thenReturn(dagID);
    when(appContext.getHadoopShim()).thenReturn(new DefaultHadoopShim());
    when(appContext.getApplicationID()).thenReturn(appId);
    // MockRecoveryService will skip the non-summary event
    MockRecoveryService rService = new MockRecoveryService(appContext);
    Configuration conf = new Configuration();
    conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true);
    rService.init(conf);
    rService.start();
    DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan();
    // write a DAGSubmittedEvent first to initialize summaryStream
    rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null)));
    // It should be fine to skip other events, just for testing.
    TezVertexID vertexId = TezVertexID.getInstance(dagID, 0);
    rService.handle(new DAGHistoryEvent(dagID, new VertexCommitStartedEvent(vertexId, 0L)));
    rService.handle(new DAGHistoryEvent(dagID, new VertexFinishedEvent(vertexId, "v1", 10, 0L, 0L, 0L, 0L, 0L, VertexState.SUCCEEDED, "", null, null, null, null)));
    rService.stop();
    DAGRecoveryData dagData = parser.parseRecoveryData();
    assertTrue(dagData.nonRecoverable);
    assertTrue(dagData.reason.contains("Vertex has been committed, but its full recovery events are not seen"));
}
Also used : Path(org.apache.hadoop.fs.Path) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) DAGHistoryEvent(org.apache.tez.dag.history.DAGHistoryEvent) DefaultHadoopShim(org.apache.tez.hadoop.shim.DefaultHadoopShim) DAGPlan(org.apache.tez.dag.api.records.DAGProtos.DAGPlan) TezDAGID(org.apache.tez.dag.records.TezDAGID) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) DAGRecoveryData(org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData) TezVertexID(org.apache.tez.dag.records.TezVertexID) VertexFinishedEvent(org.apache.tez.dag.history.events.VertexFinishedEvent) DAGSubmittedEvent(org.apache.tez.dag.history.events.DAGSubmittedEvent) VertexCommitStartedEvent(org.apache.tez.dag.history.events.VertexCommitStartedEvent)

Example 5 with DefaultHadoopShim

use of org.apache.tez.hadoop.shim.DefaultHadoopShim in project tez by apache.

the class TestRecoveryParser method testRecoverableSummary_VertexFinishCommitting.

@Test(timeout = 5000)
public void testRecoverableSummary_VertexFinishCommitting() throws IOException {
    ApplicationId appId = ApplicationId.newInstance(System.currentTimeMillis(), 1);
    TezDAGID dagID = TezDAGID.getInstance(appId, 1);
    AppContext appContext = mock(AppContext.class);
    when(appContext.getCurrentRecoveryDir()).thenReturn(new Path(recoveryPath + "/1"));
    when(appContext.getClock()).thenReturn(new SystemClock());
    when(mockDAGImpl.getID()).thenReturn(dagID);
    when(appContext.getHadoopShim()).thenReturn(new DefaultHadoopShim());
    when(appContext.getApplicationID()).thenReturn(appId);
    RecoveryService rService = new RecoveryService(appContext);
    Configuration conf = new Configuration();
    conf.setBoolean(RecoveryService.TEZ_TEST_RECOVERY_DRAIN_EVENTS_WHEN_STOPPED, true);
    rService.init(conf);
    rService.start();
    DAGPlan dagPlan = TestDAGImpl.createTestDAGPlan();
    // write a DAGSubmittedEvent first to initialize summaryStream
    rService.handle(new DAGHistoryEvent(dagID, new DAGSubmittedEvent(dagID, 1L, dagPlan, ApplicationAttemptId.newInstance(appId, 1), null, "user", new Configuration(), null, null)));
    // It should be fine to skip other events, just for testing.
    TezVertexID vertexId = TezVertexID.getInstance(dagID, 0);
    rService.handle(new DAGHistoryEvent(dagID, new VertexCommitStartedEvent(vertexId, 0L)));
    rService.handle(new DAGHistoryEvent(dagID, new VertexFinishedEvent(vertexId, "v1", 10, 0L, 0L, 0L, 0L, 0L, VertexState.SUCCEEDED, "", null, null, null, null)));
    rService.stop();
    DAGRecoveryData dagData = parser.parseRecoveryData();
    assertEquals(dagID, dagData.recoveredDagID);
    assertFalse(dagData.nonRecoverable);
}
Also used : Path(org.apache.hadoop.fs.Path) RecoveryService(org.apache.tez.dag.history.recovery.RecoveryService) SystemClock(org.apache.hadoop.yarn.util.SystemClock) Configuration(org.apache.hadoop.conf.Configuration) DAGHistoryEvent(org.apache.tez.dag.history.DAGHistoryEvent) DefaultHadoopShim(org.apache.tez.hadoop.shim.DefaultHadoopShim) DAGPlan(org.apache.tez.dag.api.records.DAGProtos.DAGPlan) TezDAGID(org.apache.tez.dag.records.TezDAGID) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) DAGRecoveryData(org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData) TezVertexID(org.apache.tez.dag.records.TezVertexID) VertexFinishedEvent(org.apache.tez.dag.history.events.VertexFinishedEvent) DAGSubmittedEvent(org.apache.tez.dag.history.events.DAGSubmittedEvent) VertexCommitStartedEvent(org.apache.tez.dag.history.events.VertexCommitStartedEvent)

Aggregations

DefaultHadoopShim (org.apache.tez.hadoop.shim.DefaultHadoopShim)19 Configuration (org.apache.hadoop.conf.Configuration)11 Path (org.apache.hadoop.fs.Path)10 TezDAGID (org.apache.tez.dag.records.TezDAGID)10 TezVertexID (org.apache.tez.dag.records.TezVertexID)10 SystemClock (org.apache.hadoop.yarn.util.SystemClock)8 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)7 TaskSpec (org.apache.tez.runtime.api.impl.TaskSpec)7 HashMap (java.util.HashMap)6 TezSharedExecutor (org.apache.tez.common.TezSharedExecutor)6 ProcessorDescriptor (org.apache.tez.dag.api.ProcessorDescriptor)6 DAGPlan (org.apache.tez.dag.api.records.DAGProtos.DAGPlan)6 DAGRecoveryData (org.apache.tez.dag.app.RecoveryParser.DAGRecoveryData)6 ByteBuffer (java.nio.ByteBuffer)5 UserGroupInformation (org.apache.hadoop.security.UserGroupInformation)5 TezConfiguration (org.apache.tez.dag.api.TezConfiguration)5 DAGHistoryEvent (org.apache.tez.dag.history.DAGHistoryEvent)5 DAGSubmittedEvent (org.apache.tez.dag.history.events.DAGSubmittedEvent)5 LogicalIOProcessorRuntimeTask (org.apache.tez.runtime.LogicalIOProcessorRuntimeTask)5 ExecutionContextImpl (org.apache.tez.runtime.api.impl.ExecutionContextImpl)5