Search in sources :

Example 6 with PTOutput

use of com.datatorrent.stram.plan.physical.PTOperator.PTOutput in project apex-core by apache.

the class StreamMapping method setSources.

public void setSources(Collection<PTOperator> partitions) {
    upstream.clear();
    // add existing inputs
    for (PTOperator uoper : partitions) {
        for (PTOutput source : uoper.outputs) {
            if (source.logicalStream == streamMeta) {
                upstream.add(source);
            }
        }
    }
    redoMapping();
}
Also used : PTOutput(com.datatorrent.stram.plan.physical.PTOperator.PTOutput)

Example 7 with PTOutput

use of com.datatorrent.stram.plan.physical.PTOperator.PTOutput in project apex-core by apache.

the class PhysicalPlanTest method testDefaultPartitionerWithParallel.

@Test
public void testDefaultPartitionerWithParallel() throws InterruptedException {
    final MutableInt loadInd = new MutableInt();
    StatsListener listener = new StatsListener() {

        @Override
        public Response processStats(BatchedOperatorStats stats) {
            Response response = new Response();
            response.repartitionRequired = true;
            response.loadIndicator = loadInd.intValue();
            return response;
        }
    };
    LogicalPlan dag = new LogicalPlan();
    GenericTestOperator nodeX = dag.addOperator("X", GenericTestOperator.class);
    dag.setOperatorAttribute(nodeX, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
    dag.setOperatorAttribute(nodeX, Context.OperatorContext.STATS_LISTENERS, Lists.newArrayList(listener));
    GenericTestOperator nodeY = dag.addOperator("Y", GenericTestOperator.class);
    dag.setOperatorAttribute(nodeY, Context.OperatorContext.PARTITIONER, new TestPartitioner<GenericTestOperator>());
    GenericTestOperator nodeZ = dag.addOperator("Z", GenericTestOperator.class);
    dag.addStream("Stream1", nodeX.outport1, nodeY.inport1, nodeZ.inport1);
    dag.addStream("Stream2", nodeX.outport2, nodeY.inport2, nodeZ.inport2);
    dag.setInputPortAttribute(nodeY.inport1, Context.PortContext.PARTITION_PARALLEL, true);
    dag.setInputPortAttribute(nodeY.inport2, Context.PortContext.PARTITION_PARALLEL, true);
    dag.setInputPortAttribute(nodeZ.inport1, Context.PortContext.PARTITION_PARALLEL, true);
    dag.setInputPortAttribute(nodeZ.inport2, Context.PortContext.PARTITION_PARALLEL, true);
    StramTestSupport.MemoryStorageAgent msa = new StramTestSupport.MemoryStorageAgent();
    dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, msa);
    TestPlanContext ctx = new TestPlanContext();
    PhysicalPlan plan = new PhysicalPlan(dag, ctx);
    LogicalPlan.OperatorMeta metaOfX = dag.getMeta(nodeX);
    LogicalPlan.OperatorMeta metaOfY = dag.getMeta(nodeY);
    Assert.assertEquals("number operators " + metaOfX.getName(), 2, plan.getOperators(metaOfX).size());
    Assert.assertEquals("number operators " + metaOfY.getName(), 2, plan.getOperators(metaOfY).size());
    List<PTOperator> ptOfX = plan.getOperators(metaOfX);
    for (PTOperator physicalX : ptOfX) {
        Assert.assertEquals("2 streams " + physicalX.getOutputs(), 2, physicalX.getOutputs().size());
        for (PTOutput outputPort : physicalX.getOutputs()) {
            Set<PTOperator> dopers = Sets.newHashSet();
            Assert.assertEquals("sink of " + metaOfX.getName() + " id " + physicalX.id + " port " + outputPort.portName, 2, outputPort.sinks.size());
            for (PTInput inputPort : outputPort.sinks) {
                dopers.add(inputPort.target);
            }
            Assert.assertEquals(2, dopers.size());
        }
    }
    //Invoke redo-partition of PhysicalPlan, no partition change
    loadInd.setValue(0);
    for (PTOperator ptOperator : ptOfX) {
        plan.onStatusUpdate(ptOperator);
    }
    ctx.events.remove(0).run();
    for (PTOperator physicalX : ptOfX) {
        Assert.assertEquals("2 streams " + physicalX.getOutputs(), 2, physicalX.getOutputs().size());
        for (PTOutput outputPort : physicalX.getOutputs()) {
            Set<PTOperator> dopers = Sets.newHashSet();
            Assert.assertEquals("sink of " + metaOfX.getName() + " id " + physicalX.id + " port " + outputPort.portName, 2, outputPort.sinks.size());
            for (PTInput inputPort : outputPort.sinks) {
                dopers.add(inputPort.target);
            }
            Assert.assertEquals(2, dopers.size());
        }
    }
    //scale up by splitting first partition
    loadInd.setValue(1);
    plan.onStatusUpdate(ptOfX.get(0));
    ctx.events.get(0).run();
    List<PTOperator> ptOfXScaleUp = plan.getOperators(metaOfX);
    Assert.assertEquals("3 partitons " + ptOfXScaleUp, 3, ptOfXScaleUp.size());
    for (PTOperator physicalX : ptOfXScaleUp) {
        Assert.assertEquals("2 streams " + physicalX.getOutputs(), 2, physicalX.getOutputs().size());
        for (PTOutput outputPort : physicalX.getOutputs()) {
            Set<PTOperator> dopers = Sets.newHashSet();
            Assert.assertEquals("sink of " + metaOfX.getName() + " id " + physicalX.id + " port " + outputPort.portName, 2, outputPort.sinks.size());
            for (PTInput inputPort : outputPort.sinks) {
                dopers.add(inputPort.target);
            }
            Assert.assertEquals(2, dopers.size());
        }
    }
}
Also used : PTInput(com.datatorrent.stram.plan.physical.PTOperator.PTInput) StatsListener(com.datatorrent.api.StatsListener) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) MutableInt(org.apache.commons.lang3.mutable.MutableInt) TestPlanContext(com.datatorrent.stram.plan.TestPlanContext) StramTestSupport(com.datatorrent.stram.support.StramTestSupport) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) PTOutput(com.datatorrent.stram.plan.physical.PTOperator.PTOutput) OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) Test(org.junit.Test) PartitioningTest(com.datatorrent.stram.PartitioningTest)

Example 8 with PTOutput

use of com.datatorrent.stram.plan.physical.PTOperator.PTOutput in project apex-core by apache.

the class StreamingContainerManager method calculateLatency.

private long calculateLatency(PTOperator operator) {
    long latency = operator.stats.latencyMA.getAvg();
    long maxUnifierLatency = 0;
    for (PTOutput output : operator.getOutputs()) {
        for (PTInput input : output.sinks) {
            if (input.target.isUnifier()) {
                long thisUnifierLatency = calculateLatency(input.target);
                if (maxUnifierLatency < thisUnifierLatency) {
                    maxUnifierLatency = thisUnifierLatency;
                }
            }
        }
    }
    return latency + maxUnifierLatency;
}
Also used : PTInput(com.datatorrent.stram.plan.physical.PTOperator.PTInput) PTOutput(com.datatorrent.stram.plan.physical.PTOperator.PTOutput)

Example 9 with PTOutput

use of com.datatorrent.stram.plan.physical.PTOperator.PTOutput in project apex-core by apache.

the class PhysicalPlanTest method testSingleFinalMxNPartitioning.

/**
   * MxN partitioning. When source and sink of a stream are partitioned, a
   * separate unifier is created container local with each downstream partition.
   */
@Test
public void testSingleFinalMxNPartitioning() {
    LogicalPlan dag = new LogicalPlan();
    TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
    dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(2));
    dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) new PartitioningTest.PartitionLoadWatch()));
    dag.setOutputPortAttribute(o1.outport, PortContext.UNIFIER_SINGLE_FINAL, true);
    OperatorMeta o1Meta = dag.getMeta(o1);
    GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
    dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(3));
    dag.setOperatorAttribute(o2, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
    OperatorMeta o2Meta = dag.getMeta(o2);
    dag.addStream("o1.outport1", o1.outport, o2.inport1);
    int maxContainers = 10;
    dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);
    TestPlanContext ctx = new TestPlanContext();
    dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
    PhysicalPlan plan = new PhysicalPlan(dag, ctx);
    Assert.assertEquals("number of containers", 6, plan.getContainers().size());
    List<PTOperator> inputOperators = new ArrayList<>();
    for (int i = 0; i < 2; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        Assert.assertEquals("operators " + container, o1Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
        inputOperators.add(container.getOperators().get(0));
    }
    PTOperator inputUnifier = null;
    {
        PTContainer container = plan.getContainers().get(2);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        PTOperator pUnifier = container.getOperators().get(0);
        Assert.assertEquals("operators " + container, o1Meta.getMeta(o1.outport).getUnifierMeta().getName(), pUnifier.getOperatorMeta().getName());
        Assert.assertTrue("single unifier " + pUnifier, pUnifier.isUnifier());
        Assert.assertEquals("" + pUnifier, 2, pUnifier.getInputs().size());
        for (int inputIndex = 0; inputIndex < pUnifier.getInputs().size(); inputIndex++) {
            PTInput input = pUnifier.getInputs().get(inputIndex);
            Assert.assertEquals("source port name " + pUnifier, "outport", input.source.portName);
            Assert.assertEquals("" + pUnifier, inputOperators.get(inputIndex), input.source.source);
            Assert.assertEquals("partition keys " + input.partitions, null, input.partitions);
        }
        Assert.assertEquals("number outputs " + pUnifier, 1, pUnifier.getOutputs().size());
        PTOutput output = pUnifier.getOutputs().get(0);
        Assert.assertEquals("number inputs " + output, 3, output.sinks.size());
        for (int inputIndex = 0; inputIndex < output.sinks.size(); ++inputIndex) {
            Assert.assertEquals("output sink " + output, o2Meta.getName(), output.sinks.get(inputIndex).target.getName());
            Assert.assertEquals("destination port name " + output, GenericTestOperator.IPORT1, output.sinks.get(inputIndex).portName);
        }
        inputUnifier = pUnifier;
    }
    List<Integer> partitionKeySizes = new ArrayList<>();
    for (int i = 3; i < 6; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        Assert.assertEquals("operators " + container, o2Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
        PTOperator operator = container.getOperators().get(0);
        Assert.assertEquals("operators " + container, o2Meta.getName(), operator.getOperatorMeta().getName());
        Assert.assertEquals("number inputs " + operator, 1, operator.getInputs().size());
        PTInput input = operator.getInputs().get(0);
        Assert.assertEquals("" + operator, inputUnifier, input.source.source);
        Assert.assertNotNull("input partitions " + operator, input.partitions);
        partitionKeySizes.add(input.partitions.partitions.size());
    }
    Assert.assertEquals("input partition sizes count", 3, partitionKeySizes.size());
    Collections.sort(partitionKeySizes);
    Assert.assertEquals("input partition sizes", Arrays.asList(1, 1, 2), partitionKeySizes);
    // scale down N from 3 to 2 and then from 2 to 1
    for (int i = 0; i < 2; i++) {
        List<PTOperator> ptos = plan.getOperators(o2Meta);
        Set<PTOperator> expUndeploy = Sets.newHashSet(ptos);
        for (PTOperator ptOperator : ptos) {
            //expUndeploy.addAll(ptOperator.upstreamMerge.values());
            expUndeploy.add(ptOperator);
            PartitioningTest.PartitionLoadWatch.put(ptOperator, -1);
            plan.onStatusUpdate(ptOperator);
        }
        ctx.backupRequests = 0;
        ctx.events.remove(0).run();
        Assert.assertEquals("single unifier ", 1, plan.getMergeOperators(o1Meta).size());
        Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
        // The unifier and o2 operators are expected to be deployed because of partition key changes
        for (PTOperator ptOperator : plan.getOperators(o2Meta)) {
            expDeploy.add(ptOperator);
        }
        // from 3 to 2 the containers decrease from 5 to 4, but from 2 to 1 the container remains same because single unifier are not inline with single operator partition
        Assert.assertEquals("number of containers", 5 - i, plan.getContainers().size());
        Assert.assertEquals("number of operators", 2 - i, plan.getOperators(o2Meta).size());
        Assert.assertEquals("undeployed operators " + ctx.undeploy, expUndeploy, ctx.undeploy);
        Assert.assertEquals("deployed operators " + ctx.deploy, expDeploy, ctx.deploy);
    }
    // scale up N from 1 to 2 and then from 2 to 3
    for (int i = 0; i < 2; i++) {
        List<PTOperator> unChangedOps = new LinkedList<>(plan.getOperators(o2Meta));
        PTOperator o2p1 = unChangedOps.remove(0);
        Set<PTOperator> expUndeploy = Sets.newHashSet(o2p1);
        PartitioningTest.PartitionLoadWatch.put(o2p1, 1);
        plan.onStatusUpdate(o2p1);
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.backupRequests = 0;
        ctx.events.remove(0).run();
        Assert.assertEquals("single unifier ", 1, plan.getMergeOperators(o1Meta).size());
        Assert.assertEquals("N partitions after scale up " + o2Meta, 2 + i, plan.getOperators(o2Meta).size());
        for (PTOperator o : plan.getOperators(o2Meta)) {
            Assert.assertNotNull(o.container);
            Assert.assertEquals("number operators ", 1, o.container.getOperators().size());
        }
        Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
        expDeploy.removeAll(unChangedOps);
        Assert.assertEquals("number of containers", 5 + i, plan.getContainers().size());
        Assert.assertEquals("undeployed operators" + ctx.undeploy, expUndeploy, ctx.undeploy);
        Assert.assertEquals("deployed operators" + ctx.deploy, expDeploy, ctx.deploy);
    }
    // scale down M to 1
    {
        Set<PTOperator> expUndeploy = Sets.newHashSet();
        Set<PTOperator> expDeploy = Sets.newHashSet();
        expUndeploy.addAll(plan.getMergeOperators(o1Meta));
        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            expUndeploy.add(o2p);
            expDeploy.add(o2p);
        }
        for (PTOperator o1p : plan.getOperators(o1Meta)) {
            expUndeploy.add(o1p);
            PartitioningTest.PartitionLoadWatch.put(o1p, -1);
            plan.onStatusUpdate(o1p);
        }
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.events.remove(0).run();
        Assert.assertEquals("M partitions after scale down " + o1Meta, 1, plan.getOperators(o1Meta).size());
        expUndeploy.removeAll(plan.getOperators(o1Meta));
        Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
        Assert.assertEquals("deploy", expDeploy, ctx.deploy);
    }
    // scale up M to 2
    Assert.assertEquals("M partitions " + o1Meta, 1, plan.getOperators(o1Meta).size());
    {
        Set<PTOperator> expUndeploy = Sets.newHashSet();
        Set<PTOperator> expDeploy = Sets.newHashSet();
        for (PTOperator o1p : plan.getOperators(o1Meta)) {
            expUndeploy.add(o1p);
            PartitioningTest.PartitionLoadWatch.put(o1p, 1);
            plan.onStatusUpdate(o1p);
        }
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.events.remove(0).run();
        Assert.assertEquals("M partitions after scale up " + o1Meta, 2, plan.getOperators(o1Meta).size());
        expDeploy.addAll(plan.getOperators(o1Meta));
        expDeploy.addAll(plan.getMergeOperators(o1Meta));
        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            expUndeploy.add(o2p);
            expDeploy.add(o2p);
            Assert.assertNotNull(o2p.container);
            Assert.assertEquals("number operators ", 1, o2p.container.getOperators().size());
        }
        Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
        Assert.assertEquals("deploy", expDeploy, ctx.deploy);
    }
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) PTInput(com.datatorrent.stram.plan.physical.PTOperator.PTInput) ArrayList(java.util.ArrayList) TestGeneratorInputOperator(com.datatorrent.stram.engine.TestGeneratorInputOperator) StatsListener(com.datatorrent.api.StatsListener) Checkpoint(com.datatorrent.stram.api.Checkpoint) LinkedList(java.util.LinkedList) PartitioningTest(com.datatorrent.stram.PartitioningTest) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) TestPlanContext(com.datatorrent.stram.plan.TestPlanContext) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) PTOutput(com.datatorrent.stram.plan.physical.PTOperator.PTOutput) Test(org.junit.Test) PartitioningTest(com.datatorrent.stram.PartitioningTest)

Example 10 with PTOutput

use of com.datatorrent.stram.plan.physical.PTOperator.PTOutput in project apex-core by apache.

the class PhysicalPlanTest method testSingleFinalCascadingUnifier.

@Test
public void testSingleFinalCascadingUnifier() {
    LogicalPlan dag = new LogicalPlan();
    //TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
    PartitioningTestOperator o1 = dag.addOperator("o1", PartitioningTestOperator.class);
    o1.partitionKeys = new Integer[] { 0, 1, 2, 3 };
    o1.setPartitionCount(3);
    dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
    dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_LIMIT, 2);
    dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_SINGLE_FINAL, true);
    OperatorMeta o1Meta = dag.getMeta(o1);
    GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
    dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
    OperatorMeta o2Meta = dag.getMeta(o2);
    dag.addStream("o1.outport1", o1.outport1, o2.inport1);
    dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 12);
    TestPlanContext ctx = new TestPlanContext();
    dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
    PhysicalPlan plan = new PhysicalPlan(dag, ctx);
    Assert.assertEquals("number of containers", 10, plan.getContainers().size());
    List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
    Assert.assertEquals("partitions " + o1Meta, 4, o1Partitions.size());
    Assert.assertEquals("partitioned map " + o1.partitions, 4, o1.partitions.size());
    List<PTOperator> o2Partitions = plan.getOperators(o2Meta);
    Assert.assertEquals("partitions " + o1Meta, 3, o2Partitions.size());
    for (PTOperator o : o1Partitions) {
        Assert.assertEquals("outputs " + o, 1, o.getOutputs().size());
        for (PTOutput out : o.getOutputs()) {
            Assert.assertEquals("sinks " + out, 1, out.sinks.size());
        }
        Assert.assertNotNull("container " + o, o.getContainer());
    }
    List<PTOperator> o1Unifiers = plan.getMergeOperators(o1Meta);
    // 2 cascadingUnifiers and one-downstream partition unifier
    Assert.assertEquals("o1Unifiers " + o1Meta, 3, o1Unifiers.size());
    List<PTOperator> finalUnifiers = new ArrayList<>();
    for (PTOperator o : o1Unifiers) {
        Assert.assertEquals("inputs " + o, 2, o.getInputs().size());
        Assert.assertEquals("outputs " + o, 1, o.getOutputs().size());
        List<PTInput> sinks = o.getOutputs().get(0).sinks;
        boolean finalUnifier = sinks.size() > 0 ? (sinks.get(0).target.getOperatorMeta() == o2Meta) : false;
        if (!finalUnifier) {
            for (PTOutput out : o.getOutputs()) {
                Assert.assertEquals("sinks " + out, 1, out.sinks.size());
                Assert.assertTrue(out.sinks.get(0).target.isUnifier());
            }
        } else {
            for (PTOutput out : o.getOutputs()) {
                Assert.assertEquals("sinks " + out, 3, out.sinks.size());
                for (PTInput in : out.sinks) {
                    Assert.assertFalse(in.target.isUnifier());
                }
            }
            finalUnifiers.add(o);
        }
        Assert.assertNotNull("container " + o, o.getContainer());
    }
    Assert.assertEquals("o1 final unifiers", 1, finalUnifiers.size());
    for (int i = 0; i < 4; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        Assert.assertTrue(o1Partitions.contains(container.getOperators().get(0)));
    }
    for (int i = 4; i < 7; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        Assert.assertTrue(o1Unifiers.contains(container.getOperators().get(0)));
    }
    for (int i = 7; i < 10; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        Assert.assertTrue(o2Partitions.contains(container.getOperators().get(0)));
    }
    PTOperator p1 = o1Partitions.get(0);
    StatsListener l = p1.statsListeners.get(0);
    Assert.assertTrue("stats handlers " + p1.statsListeners, l instanceof PartitioningTest.PartitionLoadWatch);
    PartitioningTest.PartitionLoadWatch.put(p1, 1);
    plan.onStatusUpdate(p1);
    Assert.assertEquals("partition scaling triggered", 1, ctx.events.size());
    o1.partitionKeys = new Integer[] { 0, 1, 2, 3, 4 };
    ctx.events.remove(0).run();
    o1Partitions = plan.getOperators(o1Meta);
    Assert.assertEquals("partitions " + o1Meta, 5, o1Partitions.size());
    Assert.assertEquals("partitioned map " + o1.partitions, 5, o1.partitions.size());
    o1Unifiers = plan.getMergeOperators(o1Meta);
    // 3(l1)x2(l2)
    Assert.assertEquals("o1Unifiers " + o1Meta, 4, o1Unifiers.size());
    for (PTOperator o : o1Unifiers) {
        Assert.assertNotNull("container null: " + o, o.getContainer());
    }
}
Also used : OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) PTInput(com.datatorrent.stram.plan.physical.PTOperator.PTInput) ArrayList(java.util.ArrayList) StatsListener(com.datatorrent.api.StatsListener) Checkpoint(com.datatorrent.stram.api.Checkpoint) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) PartitioningTest(com.datatorrent.stram.PartitioningTest) TestPlanContext(com.datatorrent.stram.plan.TestPlanContext) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) PTOutput(com.datatorrent.stram.plan.physical.PTOperator.PTOutput) Test(org.junit.Test) PartitioningTest(com.datatorrent.stram.PartitioningTest)

Aggregations

PTOutput (com.datatorrent.stram.plan.physical.PTOperator.PTOutput)20 PTInput (com.datatorrent.stram.plan.physical.PTOperator.PTInput)13 OperatorMeta (com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta)9 Checkpoint (com.datatorrent.stram.api.Checkpoint)6 LogicalPlan (com.datatorrent.stram.plan.logical.LogicalPlan)6 PartitioningTest (com.datatorrent.stram.PartitioningTest)5 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)5 TestPlanContext (com.datatorrent.stram.plan.TestPlanContext)5 Test (org.junit.Test)5 StatsListener (com.datatorrent.api.StatsListener)4 InputPortMeta (com.datatorrent.stram.plan.logical.LogicalPlan.InputPortMeta)4 ArrayList (java.util.ArrayList)3 Operator (com.datatorrent.api.Operator)2 PortMappingDescriptor (com.datatorrent.stram.plan.logical.Operators.PortMappingDescriptor)2 HashSet (java.util.HashSet)2 InputPort (com.datatorrent.api.Operator.InputPort)1 OutputPort (com.datatorrent.api.Operator.OutputPort)1 PartitionKeys (com.datatorrent.api.Partitioner.PartitionKeys)1 StreamCodec (com.datatorrent.api.StreamCodec)1 Pair (com.datatorrent.common.util.Pair)1