Search in sources :

Example 6 with OutputDescriptor

use of org.apache.tez.dag.api.OutputDescriptor in project tez by apache.

the class TestCommit method createDAGPlan.

// v1->v3
// v2->v3
// vertex_group (v1, v2)
private DAGPlan createDAGPlan(boolean vertexGroupCommitSucceeded, boolean v3CommitSucceeded) throws Exception {
    LOG.info("Setting up group dag plan");
    int dummyTaskCount = 1;
    Resource dummyTaskResource = Resource.newInstance(1, 1);
    org.apache.tez.dag.api.Vertex v1 = org.apache.tez.dag.api.Vertex.create("vertex1", ProcessorDescriptor.create("Processor"), dummyTaskCount, dummyTaskResource);
    org.apache.tez.dag.api.Vertex v2 = org.apache.tez.dag.api.Vertex.create("vertex2", ProcessorDescriptor.create("Processor"), dummyTaskCount, dummyTaskResource);
    org.apache.tez.dag.api.Vertex v3 = org.apache.tez.dag.api.Vertex.create("vertex3", ProcessorDescriptor.create("Processor"), dummyTaskCount, dummyTaskResource);
    DAG dag = DAG.create("testDag");
    String groupName1 = "uv12";
    OutputCommitterDescriptor ocd1 = OutputCommitterDescriptor.create(CountingOutputCommitter.class.getName()).setUserPayload(UserPayload.create(ByteBuffer.wrap(new CountingOutputCommitter.CountingOutputCommitterConfig(!vertexGroupCommitSucceeded, true).toUserPayload())));
    OutputCommitterDescriptor ocd2 = OutputCommitterDescriptor.create(CountingOutputCommitter.class.getName()).setUserPayload(UserPayload.create(ByteBuffer.wrap(new CountingOutputCommitter.CountingOutputCommitterConfig(!v3CommitSucceeded, true).toUserPayload())));
    org.apache.tez.dag.api.VertexGroup uv12 = dag.createVertexGroup(groupName1, v1, v2);
    OutputDescriptor outDesc = OutputDescriptor.create("output.class");
    uv12.addDataSink("v12Out", DataSinkDescriptor.create(outDesc, ocd1, null));
    v3.addDataSink("v3Out", DataSinkDescriptor.create(outDesc, ocd2, null));
    GroupInputEdge e1 = GroupInputEdge.create(uv12, v3, EdgeProperty.create(DataMovementType.SCATTER_GATHER, DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("dummy output class"), InputDescriptor.create("dummy input class")), InputDescriptor.create("merge.class"));
    dag.addVertex(v1);
    dag.addVertex(v2);
    dag.addVertex(v3);
    dag.addEdge(e1);
    return dag.createDag(conf, null, null, null, true);
}
Also used : OutputCommitterDescriptor(org.apache.tez.dag.api.OutputCommitterDescriptor) Resource(org.apache.hadoop.yarn.api.records.Resource) DAG(org.apache.tez.dag.api.DAG) OutputDescriptor(org.apache.tez.dag.api.OutputDescriptor) GroupInputEdge(org.apache.tez.dag.api.GroupInputEdge)

Example 7 with OutputDescriptor

use of org.apache.tez.dag.api.OutputDescriptor in project tez by apache.

the class TestCommit method createDAGPlanWith2VertexGroupOutputs.

// v1->v3
// v2->v3
// vertex_group (v1, v2) has 2 shared outputs
private DAGPlan createDAGPlanWith2VertexGroupOutputs(boolean vertexGroupCommitSucceeded1, boolean vertexGroupCommitSucceeded2, boolean v3CommitSucceeded) throws Exception {
    LOG.info("Setting up group dag plan");
    int dummyTaskCount = 1;
    Resource dummyTaskResource = Resource.newInstance(1, 1);
    org.apache.tez.dag.api.Vertex v1 = org.apache.tez.dag.api.Vertex.create("vertex1", ProcessorDescriptor.create("Processor"), dummyTaskCount, dummyTaskResource);
    org.apache.tez.dag.api.Vertex v2 = org.apache.tez.dag.api.Vertex.create("vertex2", ProcessorDescriptor.create("Processor"), dummyTaskCount, dummyTaskResource);
    org.apache.tez.dag.api.Vertex v3 = org.apache.tez.dag.api.Vertex.create("vertex3", ProcessorDescriptor.create("Processor"), dummyTaskCount, dummyTaskResource);
    DAG dag = DAG.create("testDag");
    String groupName1 = "uv12";
    OutputCommitterDescriptor ocd1 = OutputCommitterDescriptor.create(CountingOutputCommitter.class.getName()).setUserPayload(UserPayload.create(ByteBuffer.wrap(new CountingOutputCommitter.CountingOutputCommitterConfig(!vertexGroupCommitSucceeded1, true).toUserPayload())));
    OutputCommitterDescriptor ocd2 = OutputCommitterDescriptor.create(CountingOutputCommitter.class.getName()).setUserPayload(UserPayload.create(ByteBuffer.wrap(new CountingOutputCommitter.CountingOutputCommitterConfig(!vertexGroupCommitSucceeded2, true).toUserPayload())));
    OutputCommitterDescriptor ocd3 = OutputCommitterDescriptor.create(CountingOutputCommitter.class.getName()).setUserPayload(UserPayload.create(ByteBuffer.wrap(new CountingOutputCommitter.CountingOutputCommitterConfig(!v3CommitSucceeded, true).toUserPayload())));
    org.apache.tez.dag.api.VertexGroup uv12 = dag.createVertexGroup(groupName1, v1, v2);
    OutputDescriptor outDesc = OutputDescriptor.create("output.class");
    uv12.addDataSink("v12Out1", DataSinkDescriptor.create(outDesc, ocd1, null));
    uv12.addDataSink("v12Out2", DataSinkDescriptor.create(outDesc, ocd2, null));
    v3.addDataSink("v3Out", DataSinkDescriptor.create(outDesc, ocd3, null));
    GroupInputEdge e1 = GroupInputEdge.create(uv12, v3, EdgeProperty.create(DataMovementType.SCATTER_GATHER, DataSourceType.PERSISTED, SchedulingType.SEQUENTIAL, OutputDescriptor.create("dummy output class"), InputDescriptor.create("dummy input class")), InputDescriptor.create("merge.class"));
    dag.addVertex(v1);
    dag.addVertex(v2);
    dag.addVertex(v3);
    dag.addEdge(e1);
    return dag.createDag(conf, null, null, null, true);
}
Also used : OutputCommitterDescriptor(org.apache.tez.dag.api.OutputCommitterDescriptor) Resource(org.apache.hadoop.yarn.api.records.Resource) DAG(org.apache.tez.dag.api.DAG) OutputDescriptor(org.apache.tez.dag.api.OutputDescriptor) GroupInputEdge(org.apache.tez.dag.api.GroupInputEdge)

Example 8 with OutputDescriptor

use of org.apache.tez.dag.api.OutputDescriptor in project tez by apache.

the class TestMROutputLegacy method testNewAPI_MR.

// simulate the behavior of translating MR to DAG using MR new API
@Test(timeout = 5000)
public void testNewAPI_MR() throws Exception {
    String outputPath = "/tmp/output";
    Job job = Job.getInstance();
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(Text.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    SequenceFileOutputFormat.setOutputPath(job, new Path(outputPath));
    job.getConfiguration().setBoolean("mapred.reducer.new-api", true);
    // the output is attached to reducer
    job.getConfiguration().setBoolean(MRConfig.IS_MAP_PROCESSOR, false);
    UserPayload vertexPayload = TezUtils.createUserPayloadFromConf(job.getConfiguration());
    OutputDescriptor od = OutputDescriptor.create(MROutputLegacy.class.getName()).setUserPayload(vertexPayload);
    DataSinkDescriptor sink = DataSinkDescriptor.create(od, OutputCommitterDescriptor.create(MROutputCommitter.class.getName()), null);
    OutputContext outputContext = createMockOutputContext(sink.getOutputDescriptor().getUserPayload());
    MROutputLegacy output = new MROutputLegacy(outputContext, 2);
    output.initialize();
    assertEquals(true, output.useNewApi);
    assertEquals(SequenceFileOutputFormat.class, output.newOutputFormat.getClass());
    assertNull(output.oldOutputFormat);
    assertEquals(NullWritable.class, output.newApiTaskAttemptContext.getOutputKeyClass());
    assertEquals(Text.class, output.newApiTaskAttemptContext.getOutputValueClass());
    assertNull(output.oldApiTaskAttemptContext);
    assertNotNull(output.newRecordWriter);
    assertNull(output.oldRecordWriter);
    assertEquals(FileOutputCommitter.class, output.committer.getClass());
}
Also used : Path(org.apache.hadoop.fs.Path) UserPayload(org.apache.tez.dag.api.UserPayload) OutputDescriptor(org.apache.tez.dag.api.OutputDescriptor) Job(org.apache.hadoop.mapreduce.Job) DataSinkDescriptor(org.apache.tez.dag.api.DataSinkDescriptor) OutputContext(org.apache.tez.runtime.api.OutputContext) Test(org.junit.Test)

Example 9 with OutputDescriptor

use of org.apache.tez.dag.api.OutputDescriptor in project tez by apache.

the class TestMROutputLegacy method testNewAPI_MapperOnly.

// simulate the behavior of translating mapper-only job to DAG using MR new API
@Test(timeout = 5000)
public void testNewAPI_MapperOnly() throws Exception {
    String outputPath = "/tmp/output";
    Job job = Job.getInstance();
    job.setOutputKeyClass(NullWritable.class);
    job.setOutputValueClass(Text.class);
    job.setOutputFormatClass(SequenceFileOutputFormat.class);
    SequenceFileOutputFormat.setOutputPath(job, new Path(outputPath));
    job.getConfiguration().setBoolean("mapred.mapper.new-api", true);
    // the output is attached to mapper
    job.getConfiguration().setBoolean(MRConfig.IS_MAP_PROCESSOR, true);
    UserPayload vertexPayload = TezUtils.createUserPayloadFromConf(job.getConfiguration());
    OutputDescriptor od = OutputDescriptor.create(MROutputLegacy.class.getName()).setUserPayload(vertexPayload);
    DataSinkDescriptor sink = DataSinkDescriptor.create(od, OutputCommitterDescriptor.create(MROutputCommitter.class.getName()), null);
    OutputContext outputContext = createMockOutputContext(sink.getOutputDescriptor().getUserPayload());
    MROutputLegacy output = new MROutputLegacy(outputContext, 2);
    output.initialize();
    assertEquals(true, output.useNewApi);
    assertEquals(SequenceFileOutputFormat.class, output.newOutputFormat.getClass());
    assertNull(output.oldOutputFormat);
    assertEquals(NullWritable.class, output.newApiTaskAttemptContext.getOutputKeyClass());
    assertEquals(Text.class, output.newApiTaskAttemptContext.getOutputValueClass());
    assertNull(output.oldApiTaskAttemptContext);
    assertNotNull(output.newRecordWriter);
    assertNull(output.oldRecordWriter);
    assertEquals(FileOutputCommitter.class, output.committer.getClass());
}
Also used : Path(org.apache.hadoop.fs.Path) UserPayload(org.apache.tez.dag.api.UserPayload) OutputDescriptor(org.apache.tez.dag.api.OutputDescriptor) Job(org.apache.hadoop.mapreduce.Job) DataSinkDescriptor(org.apache.tez.dag.api.DataSinkDescriptor) OutputContext(org.apache.tez.runtime.api.OutputContext) Test(org.junit.Test)

Example 10 with OutputDescriptor

use of org.apache.tez.dag.api.OutputDescriptor in project tez by apache.

the class TestMemoryDistributor method testScalingProcessor.

@Test(timeout = 5000)
public void testScalingProcessor() throws TezException {
    MemoryDistributor dist = new MemoryDistributor(2, 1, conf);
    dist.setJvmMemory(10000l);
    // First request
    MemoryUpdateCallbackForTest e1Callback = new MemoryUpdateCallbackForTest();
    InputContext e1InputContext1 = createTestInputContext();
    InputDescriptor e1InDesc1 = createTestInputDescriptor();
    dist.requestMemory(10000, e1Callback, e1InputContext1, e1InDesc1);
    // Second request
    MemoryUpdateCallbackForTest e2Callback = new MemoryUpdateCallbackForTest();
    InputContext e2InputContext2 = createTestInputContext();
    InputDescriptor e2InDesc2 = createTestInputDescriptor();
    dist.requestMemory(10000, e2Callback, e2InputContext2, e2InDesc2);
    // Third request - output
    MemoryUpdateCallbackForTest e3Callback = new MemoryUpdateCallbackForTest();
    OutputContext e3OutputContext1 = createTestOutputContext();
    OutputDescriptor e3OutDesc1 = createTestOutputDescriptor();
    dist.requestMemory(5000, e3Callback, e3OutputContext1, e3OutDesc1);
    // Fourth request - processor
    MemoryUpdateCallbackForTest e4Callback = new MemoryUpdateCallbackForTest();
    ProcessorContext e4ProcessorContext1 = createTestProcessortContext();
    ProcessorDescriptor e4ProcessorDesc1 = createTestProcessorDescriptor();
    dist.requestMemory(5000, e4Callback, e4ProcessorContext1, e4ProcessorDesc1);
    dist.makeInitialAllocations();
    // Total available: 70% of 10K = 7000
    // 4 requests - 10K, 10K, 5K, 5K
    // Scale down to - 2333.33, 2333.33, 1166.66, 1166.66
    assertTrue(e1Callback.assigned >= 2333 && e1Callback.assigned <= 2334);
    assertTrue(e2Callback.assigned >= 2333 && e2Callback.assigned <= 2334);
    assertTrue(e3Callback.assigned >= 1166 && e3Callback.assigned <= 1167);
    assertTrue(e4Callback.assigned >= 1166 && e4Callback.assigned <= 1167);
}
Also used : InputDescriptor(org.apache.tez.dag.api.InputDescriptor) OutputDescriptor(org.apache.tez.dag.api.OutputDescriptor) InputContext(org.apache.tez.runtime.api.InputContext) ProcessorDescriptor(org.apache.tez.dag.api.ProcessorDescriptor) OutputContext(org.apache.tez.runtime.api.OutputContext) ProcessorContext(org.apache.tez.runtime.api.ProcessorContext) Test(org.junit.Test)

Aggregations

OutputDescriptor (org.apache.tez.dag.api.OutputDescriptor)32 Test (org.junit.Test)14 OutputContext (org.apache.tez.runtime.api.OutputContext)13 InputDescriptor (org.apache.tez.dag.api.InputDescriptor)10 UserPayload (org.apache.tez.dag.api.UserPayload)10 Configuration (org.apache.hadoop.conf.Configuration)9 OutputCommitterDescriptor (org.apache.tez.dag.api.OutputCommitterDescriptor)9 TezConfiguration (org.apache.tez.dag.api.TezConfiguration)8 InputContext (org.apache.tez.runtime.api.InputContext)8 DAG (org.apache.tez.dag.api.DAG)7 Resource (org.apache.hadoop.yarn.api.records.Resource)6 Path (org.apache.hadoop.fs.Path)5 DataSinkDescriptor (org.apache.tez.dag.api.DataSinkDescriptor)5 WeightedScalingMemoryDistributor (org.apache.tez.runtime.library.resources.WeightedScalingMemoryDistributor)5 GroupInputEdge (org.apache.tez.dag.api.GroupInputEdge)4 ProcessorDescriptor (org.apache.tez.dag.api.ProcessorDescriptor)4 Vertex (org.apache.tez.dag.api.Vertex)4 OutputSpec (org.apache.tez.runtime.api.impl.OutputSpec)4 ByteString (com.google.protobuf.ByteString)3 JobConf (org.apache.hadoop.mapred.JobConf)3