Search in sources :

Example 1 with TezSharedExecutor

use of org.apache.tez.common.TezSharedExecutor in project tez by apache.

the class TestOnFileUnorderedKVOutput method testWithPipelinedShuffle.

@Test(timeout = 30000)
@SuppressWarnings("unchecked")
public void testWithPipelinedShuffle() throws Exception {
    Configuration conf = new Configuration();
    conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_KEY_CLASS, Text.class.getName());
    conf.set(TezRuntimeConfiguration.TEZ_RUNTIME_VALUE_CLASS, IntWritable.class.getName());
    conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_PIPELINED_SHUFFLE_ENABLED, true);
    conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_ENABLE_FINAL_MERGE_IN_OUTPUT, false);
    conf.setInt(TezRuntimeConfiguration.TEZ_RUNTIME_UNORDERED_OUTPUT_BUFFER_SIZE_MB, 1);
    TezSharedExecutor sharedExecutor = new TezSharedExecutor(conf);
    OutputContext outputContext = createOutputContext(conf, sharedExecutor);
    UnorderedKVOutput kvOutput = new UnorderedKVOutput(outputContext, 1);
    List<Event> events = null;
    events = kvOutput.initialize();
    kvOutput.start();
    assertTrue(events != null && events.size() == 0);
    KeyValueWriter kvWriter = kvOutput.getWriter();
    for (int i = 0; i < 500; i++) {
        kvWriter.write(new Text(RandomStringUtils.randomAscii(10000)), new IntWritable(i));
    }
    events = kvOutput.close();
    // When pipelining is on, all events are sent out within writer itself.
    assertTrue(events != null && events.size() == 0);
    // ensure that data is sent via outputContext.
    ArgumentCaptor<List> eventsCaptor = ArgumentCaptor.forClass(List.class);
    verify(outputContext, atLeast(1)).sendEvents(eventsCaptor.capture());
    events = eventsCaptor.getValue();
    CompositeDataMovementEvent dmEvent = (CompositeDataMovementEvent) events.get(1);
    assertEquals("Invalid source index", 0, dmEvent.getSourceIndexStart());
    DataMovementEventPayloadProto shufflePayload = DataMovementEventPayloadProto.parseFrom(ByteString.copyFrom(dmEvent.getUserPayload()));
    assertTrue(shufflePayload.hasLastEvent());
    assertFalse(shufflePayload.hasEmptyPartitions());
    assertEquals(shufflePort, shufflePayload.getPort());
    assertEquals("localhost", shufflePayload.getHost());
    sharedExecutor.shutdownNow();
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) TezConfiguration(org.apache.tez.dag.api.TezConfiguration) TezRuntimeConfiguration(org.apache.tez.runtime.library.api.TezRuntimeConfiguration) Text(org.apache.hadoop.io.Text) OutputContext(org.apache.tez.runtime.api.OutputContext) KeyValueWriter(org.apache.tez.runtime.library.api.KeyValueWriter) CompositeDataMovementEvent(org.apache.tez.runtime.api.events.CompositeDataMovementEvent) TezSharedExecutor(org.apache.tez.common.TezSharedExecutor) Event(org.apache.tez.runtime.api.Event) CompositeDataMovementEvent(org.apache.tez.runtime.api.events.CompositeDataMovementEvent) List(java.util.List) DataMovementEventPayloadProto(org.apache.tez.runtime.library.shuffle.impl.ShuffleUserPayloads.DataMovementEventPayloadProto) IntWritable(org.apache.hadoop.io.IntWritable) Test(org.junit.Test)

Example 2 with TezSharedExecutor

use of org.apache.tez.common.TezSharedExecutor in project tez by apache.

the class TestOnFileUnorderedKVOutput method createOutputContext.

private OutputContext createOutputContext(Configuration conf, TezSharedExecutor sharedExecutor) throws IOException {
    int appAttemptNumber = 1;
    TezUmbilical tezUmbilical = mock(TezUmbilical.class);
    String dagName = "currentDAG";
    String taskVertexName = "currentVertex";
    String destinationVertexName = "destinationVertex";
    TezDAGID dagID = TezDAGID.getInstance("2000", 1, 1);
    TezVertexID vertexID = TezVertexID.getInstance(dagID, 1);
    TezTaskID taskID = TezTaskID.getInstance(vertexID, 1);
    TezTaskAttemptID taskAttemptID = TezTaskAttemptID.getInstance(taskID, 1);
    UserPayload userPayload = TezUtils.createUserPayloadFromConf(conf);
    TaskSpec mockSpec = mock(TaskSpec.class);
    when(mockSpec.getInputs()).thenReturn(Collections.singletonList(mock(InputSpec.class)));
    when(mockSpec.getOutputs()).thenReturn(Collections.singletonList(mock(OutputSpec.class)));
    task = new LogicalIOProcessorRuntimeTask(mockSpec, appAttemptNumber, new Configuration(), new String[] { "/" }, tezUmbilical, null, null, null, null, "", null, 1024, false, new DefaultHadoopShim(), sharedExecutor);
    LogicalIOProcessorRuntimeTask runtimeTask = spy(task);
    Map<String, String> auxEnv = new HashMap<String, String>();
    ByteBuffer bb = ByteBuffer.allocate(4);
    bb.putInt(shufflePort);
    bb.position(0);
    AuxiliaryServiceHelper.setServiceDataIntoEnv(conf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID, TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT), bb, auxEnv);
    OutputDescriptor outputDescriptor = mock(OutputDescriptor.class);
    when(outputDescriptor.getClassName()).thenReturn("OutputDescriptor");
    OutputContext realOutputContext = new TezOutputContextImpl(conf, new String[] { workDir.toString() }, appAttemptNumber, tezUmbilical, dagName, taskVertexName, destinationVertexName, -1, taskAttemptID, 0, userPayload, runtimeTask, null, auxEnv, new MemoryDistributor(1, 1, conf), outputDescriptor, null, new ExecutionContextImpl("localhost"), 2048, new TezSharedExecutor(defaultConf));
    verify(runtimeTask, times(1)).addAndGetTezCounter(destinationVertexName);
    verify(runtimeTask, times(1)).getTaskStatistics();
    // verify output stats object got created
    Assert.assertTrue(task.getTaskStatistics().getIOStatistics().containsKey(destinationVertexName));
    OutputContext outputContext = spy(realOutputContext);
    doAnswer(new Answer() {

        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            long requestedSize = (Long) invocation.getArguments()[0];
            MemoryUpdateCallbackHandler callback = (MemoryUpdateCallbackHandler) invocation.getArguments()[1];
            callback.memoryAssigned(requestedSize);
            return null;
        }
    }).when(outputContext).requestInitialMemory(anyLong(), any(MemoryUpdateCallback.class));
    return outputContext;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) TezConfiguration(org.apache.tez.dag.api.TezConfiguration) TezRuntimeConfiguration(org.apache.tez.runtime.library.api.TezRuntimeConfiguration) HashMap(java.util.HashMap) ByteString(com.google.protobuf.ByteString) DefaultHadoopShim(org.apache.tez.hadoop.shim.DefaultHadoopShim) OutputDescriptor(org.apache.tez.dag.api.OutputDescriptor) TezDAGID(org.apache.tez.dag.records.TezDAGID) MemoryUpdateCallbackHandler(org.apache.tez.runtime.library.common.MemoryUpdateCallbackHandler) MemoryUpdateCallback(org.apache.tez.runtime.api.MemoryUpdateCallback) TezVertexID(org.apache.tez.dag.records.TezVertexID) LogicalIOProcessorRuntimeTask(org.apache.tez.runtime.LogicalIOProcessorRuntimeTask) TezOutputContextImpl(org.apache.tez.runtime.api.impl.TezOutputContextImpl) UserPayload(org.apache.tez.dag.api.UserPayload) ExecutionContextImpl(org.apache.tez.runtime.api.impl.ExecutionContextImpl) TaskSpec(org.apache.tez.runtime.api.impl.TaskSpec) ByteBuffer(java.nio.ByteBuffer) TezTaskID(org.apache.tez.dag.records.TezTaskID) OutputContext(org.apache.tez.runtime.api.OutputContext) Mockito.doAnswer(org.mockito.Mockito.doAnswer) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TezSharedExecutor(org.apache.tez.common.TezSharedExecutor) TezUmbilical(org.apache.tez.runtime.api.impl.TezUmbilical) MemoryDistributor(org.apache.tez.runtime.common.resources.MemoryDistributor) TezTaskAttemptID(org.apache.tez.dag.records.TezTaskAttemptID)

Example 3 with TezSharedExecutor

use of org.apache.tez.common.TezSharedExecutor in project tez by apache.

the class TestMapProcessor method testMapProcessor.

@Test(timeout = 5000)
public void testMapProcessor() throws Exception {
    String dagName = "mrdag0";
    String vertexName = MultiStageMRConfigUtil.getInitialMapVertexName();
    JobConf jobConf = new JobConf(defaultConf);
    setUpJobConf(jobConf);
    MRHelpers.translateMRConfToTez(jobConf);
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 0);
    jobConf.setBoolean(MRJobConfig.MR_TEZ_SPLITS_VIA_EVENTS, false);
    jobConf.set(MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR, new Path(workDir, "localized-resources").toUri().toString());
    Path mapInput = new Path(workDir, "map0");
    MapUtils.generateInputSplit(localFs, workDir, jobConf, mapInput, 10);
    InputSpec mapInputSpec = new InputSpec("NullSrcVertex", InputDescriptor.create(MRInputLegacy.class.getName()).setUserPayload(UserPayload.create(ByteBuffer.wrap(MRRuntimeProtos.MRInputUserPayloadProto.newBuilder().setConfigurationBytes(TezUtils.createByteStringFromConf(jobConf)).build().toByteArray()))), 1);
    OutputSpec mapOutputSpec = new OutputSpec("NullDestVertex", OutputDescriptor.create(OrderedPartitionedKVOutput.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(jobConf)), 1);
    TezSharedExecutor sharedExecutor = new TezSharedExecutor(jobConf);
    LogicalIOProcessorRuntimeTask task = MapUtils.createLogicalTask(localFs, workDir, jobConf, 0, new Path(workDir, "map0"), new TestUmbilical(), dagName, vertexName, Collections.singletonList(mapInputSpec), Collections.singletonList(mapOutputSpec), sharedExecutor);
    task.initialize();
    task.run();
    task.close();
    sharedExecutor.shutdownNow();
    OutputContext outputContext = task.getOutputContexts().iterator().next();
    TezTaskOutput mapOutputs = new TezTaskOutputFiles(jobConf, outputContext.getUniqueIdentifier(), outputContext.getDagIdentifier());
    // TODO NEWTEZ FIXME OutputCommitter verification
    // MRTask mrTask = (MRTask)t.getProcessor();
    // Assert.assertEquals(TezNullOutputCommitter.class.getName(), mrTask
    // .getCommitter().getClass().getName());
    // t.close();
    Path mapOutputFile = getMapOutputFile(jobConf, outputContext);
    LOG.info("mapOutputFile = " + mapOutputFile);
    IFile.Reader reader = new IFile.Reader(localFs, mapOutputFile, null, null, null, false, 0, -1);
    LongWritable key = new LongWritable();
    Text value = new Text();
    DataInputBuffer keyBuf = new DataInputBuffer();
    DataInputBuffer valueBuf = new DataInputBuffer();
    long prev = Long.MIN_VALUE;
    while (reader.nextRawKey(keyBuf)) {
        reader.nextRawValue(valueBuf);
        key.readFields(keyBuf);
        value.readFields(valueBuf);
        if (prev != Long.MIN_VALUE) {
            assert (prev <= key.get());
            prev = key.get();
        }
        LOG.info("key = " + key.get() + "; value = " + value);
    }
    reader.close();
}
Also used : Path(org.apache.hadoop.fs.Path) LogicalIOProcessorRuntimeTask(org.apache.tez.runtime.LogicalIOProcessorRuntimeTask) TestUmbilical(org.apache.tez.mapreduce.TestUmbilical) TezTaskOutputFiles(org.apache.tez.runtime.library.common.task.local.output.TezTaskOutputFiles) IFile(org.apache.tez.runtime.library.common.sort.impl.IFile) InputSpec(org.apache.tez.runtime.api.impl.InputSpec) OrderedPartitionedKVOutput(org.apache.tez.runtime.library.output.OrderedPartitionedKVOutput) Text(org.apache.hadoop.io.Text) OutputContext(org.apache.tez.runtime.api.OutputContext) DataInputBuffer(org.apache.hadoop.io.DataInputBuffer) TezSharedExecutor(org.apache.tez.common.TezSharedExecutor) LongWritable(org.apache.hadoop.io.LongWritable) MRInputLegacy(org.apache.tez.mapreduce.input.MRInputLegacy) JobConf(org.apache.hadoop.mapred.JobConf) OutputSpec(org.apache.tez.runtime.api.impl.OutputSpec) TezTaskOutput(org.apache.tez.runtime.library.common.task.local.output.TezTaskOutput) Test(org.junit.Test)

Example 4 with TezSharedExecutor

use of org.apache.tez.common.TezSharedExecutor in project tez by apache.

the class TestMROutput method testPerf.

@Ignore
@Test
public void testPerf() throws Exception {
    Configuration conf = new Configuration();
    TezSharedExecutor sharedExecutor = new TezSharedExecutor(conf);
    LogicalIOProcessorRuntimeTask task = createLogicalTask(conf, new TestUmbilical(), "dag", "vertex", sharedExecutor);
    task.initialize();
    task.run();
    task.close();
    sharedExecutor.shutdownNow();
}
Also used : LogicalIOProcessorRuntimeTask(org.apache.tez.runtime.LogicalIOProcessorRuntimeTask) TestUmbilical(org.apache.tez.mapreduce.TestUmbilical) Configuration(org.apache.hadoop.conf.Configuration) TezSharedExecutor(org.apache.tez.common.TezSharedExecutor) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 5 with TezSharedExecutor

use of org.apache.tez.common.TezSharedExecutor in project tez by apache.

the class TestReduceProcessor method testReduceProcessor.

@Test(timeout = 5000)
public void testReduceProcessor() throws Exception {
    final String dagName = "mrdag0";
    String mapVertexName = MultiStageMRConfigUtil.getInitialMapVertexName();
    String reduceVertexName = MultiStageMRConfigUtil.getFinalReduceVertexName();
    JobConf jobConf = new JobConf(defaultConf);
    setUpJobConf(jobConf);
    MRHelpers.translateMRConfToTez(jobConf);
    jobConf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, 0);
    jobConf.set(MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR, new Path(workDir, "localized-resources").toUri().toString());
    jobConf.setBoolean(MRJobConfig.MR_TEZ_SPLITS_VIA_EVENTS, false);
    Path mapInput = new Path(workDir, "map0");
    MapUtils.generateInputSplit(localFs, workDir, jobConf, mapInput, 10);
    InputSpec mapInputSpec = new InputSpec("NullSrcVertex", InputDescriptor.create(MRInputLegacy.class.getName()).setUserPayload(UserPayload.create(ByteBuffer.wrap(MRRuntimeProtos.MRInputUserPayloadProto.newBuilder().setConfigurationBytes(TezUtils.createByteStringFromConf(jobConf)).build().toByteArray()))), 1);
    OutputSpec mapOutputSpec = new OutputSpec("NullDestVertex", OutputDescriptor.create(OrderedPartitionedKVOutput.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(jobConf)), 1);
    // Run a map
    TestUmbilical testUmbilical = new TestUmbilical();
    TezSharedExecutor sharedExecutor = new TezSharedExecutor(jobConf);
    LogicalIOProcessorRuntimeTask mapTask = MapUtils.createLogicalTask(localFs, workDir, jobConf, 0, mapInput, testUmbilical, dagName, mapVertexName, Collections.singletonList(mapInputSpec), Collections.singletonList(mapOutputSpec), sharedExecutor);
    mapTask.initialize();
    mapTask.run();
    mapTask.close();
    // One VME, One DME
    Assert.assertEquals(2, testUmbilical.getEvents().size());
    Assert.assertEquals(EventType.VERTEX_MANAGER_EVENT, testUmbilical.getEvents().get(0).getEventType());
    Assert.assertEquals(EventType.COMPOSITE_DATA_MOVEMENT_EVENT, testUmbilical.getEvents().get(1).getEventType());
    CompositeDataMovementEvent cdmEvent = (CompositeDataMovementEvent) testUmbilical.getEvents().get(1).getEvent();
    Assert.assertEquals(1, cdmEvent.getCount());
    DataMovementEvent dme = cdmEvent.getEvents().iterator().next();
    dme.setTargetIndex(0);
    LOG.info("Starting reduce...");
    JobTokenIdentifier identifier = new JobTokenIdentifier(new Text(dagName));
    JobTokenSecretManager jobTokenSecretManager = new JobTokenSecretManager();
    Token<JobTokenIdentifier> shuffleToken = new Token<JobTokenIdentifier>(identifier, jobTokenSecretManager);
    shuffleToken.setService(identifier.getJobId());
    jobConf.setOutputFormat(SequenceFileOutputFormat.class);
    jobConf.set(MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR, new Path(workDir, "localized-resources").toUri().toString());
    jobConf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, true);
    FileOutputFormat.setOutputPath(jobConf, new Path(workDir, "output"));
    ProcessorDescriptor reduceProcessorDesc = ProcessorDescriptor.create(ReduceProcessor.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(jobConf));
    InputSpec reduceInputSpec = new InputSpec(mapVertexName, InputDescriptor.create(OrderedGroupedInputLegacy.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(jobConf)), 1);
    OutputSpec reduceOutputSpec = new OutputSpec("NullDestinationVertex", OutputDescriptor.create(MROutputLegacy.class.getName()).setUserPayload(TezUtils.createUserPayloadFromConf(jobConf)), 1);
    // Now run a reduce
    TaskSpec taskSpec = new TaskSpec(TezTestUtils.getMockTaskAttemptId(0, 1, 0, 0), dagName, reduceVertexName, -1, reduceProcessorDesc, Collections.singletonList(reduceInputSpec), Collections.singletonList(reduceOutputSpec), null, null);
    Map<String, ByteBuffer> serviceConsumerMetadata = new HashMap<String, ByteBuffer>();
    String auxiliaryService = jobConf.get(TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID, TezConfiguration.TEZ_AM_SHUFFLE_AUXILIARY_SERVICE_ID_DEFAULT);
    serviceConsumerMetadata.put(auxiliaryService, ShuffleUtils.convertJobTokenToBytes(shuffleToken));
    Map<String, String> serviceProviderEnvMap = new HashMap<String, String>();
    ByteBuffer shufflePortBb = ByteBuffer.allocate(4).putInt(0, 8000);
    AuxiliaryServiceHelper.setServiceDataIntoEnv(auxiliaryService, shufflePortBb, serviceProviderEnvMap);
    LogicalIOProcessorRuntimeTask task = new LogicalIOProcessorRuntimeTask(taskSpec, 0, jobConf, new String[] { workDir.toString() }, new TestUmbilical(), serviceConsumerMetadata, serviceProviderEnvMap, HashMultimap.<String, String>create(), null, "", new ExecutionContextImpl("localhost"), Runtime.getRuntime().maxMemory(), true, new DefaultHadoopShim(), sharedExecutor);
    List<Event> destEvents = new LinkedList<Event>();
    destEvents.add(dme);
    task.initialize();
    OrderedGroupedInputLegacy sortedOut = (OrderedGroupedInputLegacy) task.getInputs().values().iterator().next();
    sortedOut.handleEvents(destEvents);
    task.run();
    task.close();
    sharedExecutor.shutdownNow();
    // MRTask mrTask = (MRTask)t.getProcessor();
    // TODO NEWTEZ Verify the partitioner has not been created
    // Likely not applicable anymore.
    // Assert.assertNull(mrTask.getPartitioner());
    // Only a task commit happens, hence the data is still in the temporary directory.
    Path reduceOutputDir = new Path(new Path(workDir, "output"), "_temporary/0/" + IDConverter.toMRTaskIdForOutput(TezTestUtils.getMockTaskId(0, 1, 0)));
    Path reduceOutputFile = new Path(reduceOutputDir, "part-v001-o000-00000");
    SequenceFile.Reader reader = new SequenceFile.Reader(localFs, reduceOutputFile, jobConf);
    LongWritable key = new LongWritable();
    Text value = new Text();
    long prev = Long.MIN_VALUE;
    while (reader.next(key, value)) {
        if (prev != Long.MIN_VALUE) {
            Assert.assertTrue(prev < key.get());
            prev = key.get();
        }
    }
    reader.close();
}
Also used : OrderedGroupedInputLegacy(org.apache.tez.runtime.library.input.OrderedGroupedInputLegacy) TestUmbilical(org.apache.tez.mapreduce.TestUmbilical) HashMap(java.util.HashMap) MROutputLegacy(org.apache.tez.mapreduce.output.MROutputLegacy) Token(org.apache.hadoop.security.token.Token) DataMovementEvent(org.apache.tez.runtime.api.events.DataMovementEvent) CompositeDataMovementEvent(org.apache.tez.runtime.api.events.CompositeDataMovementEvent) DefaultHadoopShim(org.apache.tez.hadoop.shim.DefaultHadoopShim) SequenceFile(org.apache.hadoop.io.SequenceFile) JobTokenSecretManager(org.apache.tez.common.security.JobTokenSecretManager) LongWritable(org.apache.hadoop.io.LongWritable) JobConf(org.apache.hadoop.mapred.JobConf) OutputSpec(org.apache.tez.runtime.api.impl.OutputSpec) Path(org.apache.hadoop.fs.Path) LogicalIOProcessorRuntimeTask(org.apache.tez.runtime.LogicalIOProcessorRuntimeTask) ExecutionContextImpl(org.apache.tez.runtime.api.impl.ExecutionContextImpl) TaskSpec(org.apache.tez.runtime.api.impl.TaskSpec) JobTokenIdentifier(org.apache.tez.common.security.JobTokenIdentifier) ProcessorDescriptor(org.apache.tez.dag.api.ProcessorDescriptor) InputSpec(org.apache.tez.runtime.api.impl.InputSpec) OrderedPartitionedKVOutput(org.apache.tez.runtime.library.output.OrderedPartitionedKVOutput) Text(org.apache.hadoop.io.Text) ByteBuffer(java.nio.ByteBuffer) LinkedList(java.util.LinkedList) CompositeDataMovementEvent(org.apache.tez.runtime.api.events.CompositeDataMovementEvent) TezSharedExecutor(org.apache.tez.common.TezSharedExecutor) Event(org.apache.tez.runtime.api.Event) DataMovementEvent(org.apache.tez.runtime.api.events.DataMovementEvent) CompositeDataMovementEvent(org.apache.tez.runtime.api.events.CompositeDataMovementEvent) MRInputLegacy(org.apache.tez.mapreduce.input.MRInputLegacy) Test(org.junit.Test)

Aggregations

TezSharedExecutor (org.apache.tez.common.TezSharedExecutor)12 Test (org.junit.Test)9 Configuration (org.apache.hadoop.conf.Configuration)7 DefaultHadoopShim (org.apache.tez.hadoop.shim.DefaultHadoopShim)6 LogicalIOProcessorRuntimeTask (org.apache.tez.runtime.LogicalIOProcessorRuntimeTask)6 TezConfiguration (org.apache.tez.dag.api.TezConfiguration)5 InputSpec (org.apache.tez.runtime.api.impl.InputSpec)5 OutputSpec (org.apache.tez.runtime.api.impl.OutputSpec)5 TaskSpec (org.apache.tez.runtime.api.impl.TaskSpec)5 ByteBuffer (java.nio.ByteBuffer)4 HashMap (java.util.HashMap)4 Path (org.apache.hadoop.fs.Path)4 Text (org.apache.hadoop.io.Text)4 ProcessorDescriptor (org.apache.tez.dag.api.ProcessorDescriptor)4 TezDAGID (org.apache.tez.dag.records.TezDAGID)4 TezTaskAttemptID (org.apache.tez.dag.records.TezTaskAttemptID)4 TezVertexID (org.apache.tez.dag.records.TezVertexID)4 TestUmbilical (org.apache.tez.mapreduce.TestUmbilical)4 OutputContext (org.apache.tez.runtime.api.OutputContext)4 ExecutionContextImpl (org.apache.tez.runtime.api.impl.ExecutionContextImpl)4