Search in sources :

Example 21 with TestGeneratorInputOperator

use of com.datatorrent.stram.engine.TestGeneratorInputOperator in project apex-core by apache.

the class PhysicalPlanTest method testMxNPartitioning.

/**
 * MxN partitioning. When source and sink of a stream are partitioned, a
 * separate unifier is created container local with each downstream partition.
 */
@Test
public void testMxNPartitioning() {
    LogicalPlan dag = new LogicalPlan();
    TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
    TestPartitioner<TestGeneratorInputOperator> o1Partitioner = new TestPartitioner<>();
    o1Partitioner.setPartitionCount(2);
    dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, o1Partitioner);
    dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) new PartitioningTest.PartitionLoadWatch()));
    OperatorMeta o1Meta = dag.getMeta(o1);
    GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
    dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(3));
    dag.setOperatorAttribute(o2, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
    OperatorMeta o2Meta = dag.getMeta(o2);
    dag.addStream("o1.outport1", o1.outport, o2.inport1);
    TestPlanContext ctx = new TestPlanContext();
    dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
    PhysicalPlan plan = new PhysicalPlan(dag, ctx);
    Assert.assertEquals("number of containers", 5, plan.getContainers().size());
    List<PTOperator> inputOperators = new ArrayList<>();
    for (int i = 0; i < 2; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        Assert.assertEquals("operators " + container, o1Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
        inputOperators.add(container.getOperators().get(0));
    }
    for (int i = 2; i < 5; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 2, container.getOperators().size());
        Assert.assertEquals("operators " + container, o2Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
        Set<String> expectedLogicalNames = Sets.newHashSet(o1Meta.getMeta(o1.outport).getUnifierMeta().getName(), o2Meta.getName());
        Map<String, PTOperator> actualOperators = new HashMap<>();
        for (PTOperator p : container.getOperators()) {
            actualOperators.put(p.getOperatorMeta().getName(), p);
        }
        Assert.assertEquals("", expectedLogicalNames, actualOperators.keySet());
        PTOperator pUnifier = actualOperators.get(o1Meta.getMeta(o1.outport).getUnifierMeta().getName());
        Assert.assertNotNull("" + pUnifier, pUnifier.getContainer());
        Assert.assertTrue("" + pUnifier, pUnifier.isUnifier());
        // input from each upstream partition
        Assert.assertEquals("" + pUnifier, 2, pUnifier.getInputs().size());
        int numberPartitionKeys = (i == 2) ? 2 : 1;
        for (int inputIndex = 0; inputIndex < pUnifier.getInputs().size(); inputIndex++) {
            PTInput input = pUnifier.getInputs().get(inputIndex);
            Assert.assertEquals("" + pUnifier, "outport", input.source.portName);
            Assert.assertEquals("" + pUnifier, inputOperators.get(inputIndex), input.source.source);
            Assert.assertEquals("partition keys " + input.partitions, numberPartitionKeys, input.partitions.partitions.size());
        }
        // output to single downstream partition
        Assert.assertEquals("" + pUnifier, 1, pUnifier.getOutputs().size());
        Assert.assertTrue("" + actualOperators.get(o2Meta.getName()).getOperatorMeta().getOperator(), actualOperators.get(o2Meta.getName()).getOperatorMeta().getOperator() instanceof GenericTestOperator);
        PTOperator p = actualOperators.get(o2Meta.getName());
        Assert.assertEquals("partition inputs " + p.getInputs(), 1, p.getInputs().size());
        Assert.assertEquals("partition inputs " + p.getInputs(), pUnifier, p.getInputs().get(0).source.source);
        Assert.assertEquals("input partition keys " + p.getInputs(), null, p.getInputs().get(0).partitions);
        Assert.assertTrue("partitioned unifier container local " + p.getInputs().get(0).source, p.getInputs().get(0).source.isDownStreamInline());
    }
    // scale down N from 3 to 2 and then from 2 to 1
    for (int i = 0; i < 2; i++) {
        List<PTOperator> ptos = plan.getOperators(o2Meta);
        Set<PTOperator> expUndeploy = Sets.newHashSet(ptos);
        for (PTOperator ptOperator : ptos) {
            expUndeploy.addAll(ptOperator.upstreamMerge.values());
            PartitioningTest.PartitionLoadWatch.put(ptOperator, -1);
            plan.onStatusUpdate(ptOperator);
        }
        ctx.backupRequests = 0;
        ctx.events.remove(0).run();
        Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
        // Either unifiers for each partition or single unifier for single partition is expected to be deployed
        expDeploy.addAll(plan.getMergeOperators(o1Meta));
        for (PTOperator ptOperator : plan.getOperators(o2Meta)) {
            expDeploy.addAll(ptOperator.upstreamMerge.values());
        }
        Assert.assertEquals("number of containers", 4 - i, plan.getContainers().size());
        Assert.assertEquals("number of operators", 2 - i, plan.getOperators(o2Meta).size());
        Assert.assertEquals("undeployed operators " + ctx.undeploy, expUndeploy, ctx.undeploy);
        Assert.assertEquals("deployed operators " + ctx.deploy, expDeploy, ctx.deploy);
    }
    // scale up N from 1 to 2 and then from 2 to 3
    for (int i = 0; i < 2; i++) {
        List<PTOperator> unChangedOps = new LinkedList<>(plan.getOperators(o2Meta));
        PTOperator o2p1 = unChangedOps.remove(0);
        Set<PTOperator> expUndeploy = Sets.newHashSet(o2p1);
        // Either single unifier for one partition or merged unifiers for each partition is expected to be undeployed
        expUndeploy.addAll(plan.getMergeOperators(o1Meta));
        expUndeploy.addAll(o2p1.upstreamMerge.values());
        List<PTOperator> nOps = new LinkedList<>();
        for (Iterator<PTOperator> iterator = unChangedOps.iterator(); iterator.hasNext(); ) {
            PTOperator ptOperator = iterator.next();
            nOps.addAll(ptOperator.upstreamMerge.values());
        }
        unChangedOps.addAll(nOps);
        PartitioningTest.PartitionLoadWatch.put(o2p1, 1);
        plan.onStatusUpdate(o2p1);
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.backupRequests = 0;
        ctx.events.remove(0).run();
        Assert.assertEquals("N partitions after scale up " + o2Meta, 2 + i, plan.getOperators(o2Meta).size());
        Assert.assertTrue("no unifiers", plan.getMergeOperators(o1Meta).isEmpty());
        for (PTOperator o : plan.getOperators(o2Meta)) {
            Assert.assertNotNull(o.container);
            PTOperator unifier = o.upstreamMerge.values().iterator().next();
            Assert.assertNotNull(unifier.container);
            Assert.assertSame("unifier in same container", o.container, unifier.container);
            Assert.assertEquals("container operators " + o.container, Sets.newHashSet(o.container.getOperators()), Sets.newHashSet(o, unifier));
        }
        Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
        for (PTOperator ptOperator : plan.getOperators(o2Meta)) {
            expDeploy.addAll(ptOperator.upstreamMerge.values());
        }
        expDeploy.removeAll(unChangedOps);
        Assert.assertEquals("number of containers", 4 + i, plan.getContainers().size());
        Assert.assertEquals("undeployed operators" + ctx.undeploy, expUndeploy, ctx.undeploy);
        Assert.assertEquals("deployed operators" + ctx.deploy, expDeploy, ctx.deploy);
    }
    // scale down M to 1
    {
        Set<PTOperator> expUndeploy = Sets.newHashSet();
        Set<PTOperator> expDeploy = Sets.newHashSet();
        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            expUndeploy.addAll(o2p.upstreamMerge.values());
            expUndeploy.add(o2p);
            expDeploy.add(o2p);
        }
        for (PTOperator o1p : plan.getOperators(o1Meta)) {
            expUndeploy.add(o1p);
            PartitioningTest.PartitionLoadWatch.put(o1p, -1);
            plan.onStatusUpdate(o1p);
        }
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.events.remove(0).run();
        Assert.assertEquals("M partitions after scale down " + o1Meta, 1, plan.getOperators(o1Meta).size());
        expUndeploy.removeAll(plan.getOperators(o1Meta));
        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            Assert.assertTrue("merge unifier " + o2p + " " + o2p.upstreamMerge, o2p.upstreamMerge.isEmpty());
        }
        Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
        Assert.assertEquals("deploy", expDeploy, ctx.deploy);
    }
    // scale up M to 2
    Assert.assertEquals("M partitions " + o1Meta, 1, plan.getOperators(o1Meta).size());
    {
        Set<PTOperator> expUndeploy = Sets.newHashSet();
        Set<PTOperator> expDeploy = Sets.newHashSet();
        for (PTOperator o1p : plan.getOperators(o1Meta)) {
            PartitioningTest.PartitionLoadWatch.put(o1p, 1);
            plan.onStatusUpdate(o1p);
        }
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        o1Partitioner.extraPartitions.add(new DefaultPartition<>(o1));
        ctx.events.remove(0).run();
        o1Partitioner.extraPartitions.clear();
        List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
        List<PTOperator> o2Partitions = plan.getOperators(o2Meta);
        Assert.assertEquals("M partitions after scale up " + o1Meta, 2, o1Partitions.size());
        // previous partition unchanged
        expDeploy.add(o1Partitions.get(1));
        for (PTOperator o1p : o1Partitions) {
            Assert.assertEquals("outputs " + o1p, 1, o1p.getOutputs().size());
            Assert.assertEquals("sinks " + o1p, o2Partitions.size(), o1p.getOutputs().get(0).sinks.size());
        }
        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            expUndeploy.add(o2p);
            expDeploy.add(o2p);
            Assert.assertEquals("merge unifier " + o2p + " " + o2p.upstreamMerge, 1, o2p.upstreamMerge.size());
            expDeploy.addAll(o2p.upstreamMerge.values());
        }
        Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
        Assert.assertEquals("deploy", expDeploy, ctx.deploy);
    }
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) PartitioningTest(com.datatorrent.stram.PartitioningTest) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) TestPlanContext(com.datatorrent.stram.plan.TestPlanContext) List(java.util.List) ArrayList(java.util.ArrayList) LinkedList(java.util.LinkedList) OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) PTInput(com.datatorrent.stram.plan.physical.PTOperator.PTInput) TestGeneratorInputOperator(com.datatorrent.stram.engine.TestGeneratorInputOperator) StatsListener(com.datatorrent.api.StatsListener) Checkpoint(com.datatorrent.stram.api.Checkpoint) LinkedList(java.util.LinkedList) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) DefaultPartition(com.datatorrent.api.DefaultPartition) GenericNodeTest(com.datatorrent.stram.engine.GenericNodeTest) Test(org.junit.Test) PartitioningTest(com.datatorrent.stram.PartitioningTest)

Example 22 with TestGeneratorInputOperator

use of com.datatorrent.stram.engine.TestGeneratorInputOperator in project apex-core by apache.

the class PhysicalPlanTest method testAugmentedDynamicPartitioning.

/**
 * Test covering scenario when only new partitions are added during dynamic partitioning and there
 * are no changes to existing partitions and partition mapping
 */
@Test
public void testAugmentedDynamicPartitioning() {
    LogicalPlan dag = new LogicalPlan();
    TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
    dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new TestAugmentingPartitioner<TestGeneratorInputOperator>(3));
    dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) new PartitioningTest.PartitionLoadWatch()));
    OperatorMeta o1Meta = dag.getMeta(o1);
    GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
    OperatorMeta o2Meta = dag.getMeta(o2);
    dag.addStream("o1.outport1", o1.outport, o2.inport1);
    int maxContainers = 10;
    dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);
    TestPlanContext ctx = new TestPlanContext();
    dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
    PhysicalPlan plan = new PhysicalPlan(dag, ctx);
    Assert.assertEquals("number of containers", 4, plan.getContainers().size());
    List<PTOperator> o1ops = plan.getOperators(o1Meta);
    Assert.assertEquals("number of o1 operators", 3, o1ops.size());
    List<PTOperator> o2ops = plan.getOperators(o2Meta);
    Assert.assertEquals("number of o2 operators", 1, o2ops.size());
    Set<PTOperator> expUndeploy = Sets.newLinkedHashSet();
    expUndeploy.addAll(plan.getOperators(o2Meta));
    expUndeploy.add(plan.getOperators(o2Meta).get(0).upstreamMerge.values().iterator().next());
    for (int i = 0; i < 2; ++i) {
        PartitioningTest.PartitionLoadWatch.put(o1ops.get(i), 1);
        plan.onStatusUpdate(o1ops.get(i));
    }
    ctx.backupRequests = 0;
    ctx.events.remove(0).run();
    Assert.assertEquals("number of containers", 6, plan.getContainers().size());
    Assert.assertEquals("undeployed opertors", expUndeploy, ctx.undeploy);
}
Also used : OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) TestGeneratorInputOperator(com.datatorrent.stram.engine.TestGeneratorInputOperator) StatsListener(com.datatorrent.api.StatsListener) Checkpoint(com.datatorrent.stram.api.Checkpoint) PartitioningTest(com.datatorrent.stram.PartitioningTest) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) TestPlanContext(com.datatorrent.stram.plan.TestPlanContext) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) GenericNodeTest(com.datatorrent.stram.engine.GenericNodeTest) Test(org.junit.Test) PartitioningTest(com.datatorrent.stram.PartitioningTest)

Example 23 with TestGeneratorInputOperator

use of com.datatorrent.stram.engine.TestGeneratorInputOperator in project apex-core by apache.

the class PhysicalPlanTest method testSingleFinalMxNPartitioning.

/**
 * MxN partitioning. When source and sink of a stream are partitioned, a
 * separate unifier is created container local with each downstream partition.
 */
@Test
public void testSingleFinalMxNPartitioning() {
    LogicalPlan dag = new LogicalPlan();
    TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
    dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(2));
    dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) new PartitioningTest.PartitionLoadWatch()));
    dag.setOutputPortAttribute(o1.outport, PortContext.UNIFIER_SINGLE_FINAL, true);
    OperatorMeta o1Meta = dag.getMeta(o1);
    GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
    dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(3));
    dag.setOperatorAttribute(o2, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
    OperatorMeta o2Meta = dag.getMeta(o2);
    dag.addStream("o1.outport1", o1.outport, o2.inport1);
    int maxContainers = 10;
    dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);
    TestPlanContext ctx = new TestPlanContext();
    dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
    PhysicalPlan plan = new PhysicalPlan(dag, ctx);
    Assert.assertEquals("number of containers", 6, plan.getContainers().size());
    List<PTOperator> inputOperators = new ArrayList<>();
    for (int i = 0; i < 2; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        Assert.assertEquals("operators " + container, o1Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
        inputOperators.add(container.getOperators().get(0));
    }
    PTOperator inputUnifier = null;
    {
        PTContainer container = plan.getContainers().get(2);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        PTOperator pUnifier = container.getOperators().get(0);
        Assert.assertEquals("operators " + container, o1Meta.getMeta(o1.outport).getUnifierMeta().getName(), pUnifier.getOperatorMeta().getName());
        Assert.assertTrue("single unifier " + pUnifier, pUnifier.isUnifier());
        Assert.assertEquals("" + pUnifier, 2, pUnifier.getInputs().size());
        for (int inputIndex = 0; inputIndex < pUnifier.getInputs().size(); inputIndex++) {
            PTInput input = pUnifier.getInputs().get(inputIndex);
            Assert.assertEquals("source port name " + pUnifier, "outport", input.source.portName);
            Assert.assertEquals("" + pUnifier, inputOperators.get(inputIndex), input.source.source);
            Assert.assertEquals("partition keys " + input.partitions, null, input.partitions);
        }
        Assert.assertEquals("number outputs " + pUnifier, 1, pUnifier.getOutputs().size());
        PTOutput output = pUnifier.getOutputs().get(0);
        Assert.assertEquals("number inputs " + output, 3, output.sinks.size());
        for (int inputIndex = 0; inputIndex < output.sinks.size(); ++inputIndex) {
            Assert.assertEquals("output sink " + output, o2Meta.getName(), output.sinks.get(inputIndex).target.getName());
            Assert.assertEquals("destination port name " + output, GenericTestOperator.IPORT1, output.sinks.get(inputIndex).portName);
        }
        inputUnifier = pUnifier;
    }
    List<Integer> partitionKeySizes = new ArrayList<>();
    for (int i = 3; i < 6; i++) {
        PTContainer container = plan.getContainers().get(i);
        Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
        Assert.assertEquals("operators " + container, o2Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
        PTOperator operator = container.getOperators().get(0);
        Assert.assertEquals("operators " + container, o2Meta.getName(), operator.getOperatorMeta().getName());
        Assert.assertEquals("number inputs " + operator, 1, operator.getInputs().size());
        PTInput input = operator.getInputs().get(0);
        Assert.assertEquals("" + operator, inputUnifier, input.source.source);
        Assert.assertNotNull("input partitions " + operator, input.partitions);
        partitionKeySizes.add(input.partitions.partitions.size());
    }
    Assert.assertEquals("input partition sizes count", 3, partitionKeySizes.size());
    Collections.sort(partitionKeySizes);
    Assert.assertEquals("input partition sizes", Arrays.asList(1, 1, 2), partitionKeySizes);
    // scale down N from 3 to 2 and then from 2 to 1
    for (int i = 0; i < 2; i++) {
        List<PTOperator> ptos = plan.getOperators(o2Meta);
        Set<PTOperator> expUndeploy = Sets.newHashSet(ptos);
        for (PTOperator ptOperator : ptos) {
            // expUndeploy.addAll(ptOperator.upstreamMerge.values());
            expUndeploy.add(ptOperator);
            PartitioningTest.PartitionLoadWatch.put(ptOperator, -1);
            plan.onStatusUpdate(ptOperator);
        }
        ctx.backupRequests = 0;
        ctx.events.remove(0).run();
        Assert.assertEquals("single unifier ", 1, plan.getMergeOperators(o1Meta).size());
        Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
        // The unifier and o2 operators are expected to be deployed because of partition key changes
        for (PTOperator ptOperator : plan.getOperators(o2Meta)) {
            expDeploy.add(ptOperator);
        }
        // from 3 to 2 the containers decrease from 5 to 4, but from 2 to 1 the container remains same because single unifier are not inline with single operator partition
        Assert.assertEquals("number of containers", 5 - i, plan.getContainers().size());
        Assert.assertEquals("number of operators", 2 - i, plan.getOperators(o2Meta).size());
        Assert.assertEquals("undeployed operators " + ctx.undeploy, expUndeploy, ctx.undeploy);
        Assert.assertEquals("deployed operators " + ctx.deploy, expDeploy, ctx.deploy);
    }
    // scale up N from 1 to 2 and then from 2 to 3
    for (int i = 0; i < 2; i++) {
        List<PTOperator> unChangedOps = new LinkedList<>(plan.getOperators(o2Meta));
        PTOperator o2p1 = unChangedOps.remove(0);
        Set<PTOperator> expUndeploy = Sets.newHashSet(o2p1);
        PartitioningTest.PartitionLoadWatch.put(o2p1, 1);
        plan.onStatusUpdate(o2p1);
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.backupRequests = 0;
        ctx.events.remove(0).run();
        Assert.assertEquals("single unifier ", 1, plan.getMergeOperators(o1Meta).size());
        Assert.assertEquals("N partitions after scale up " + o2Meta, 2 + i, plan.getOperators(o2Meta).size());
        for (PTOperator o : plan.getOperators(o2Meta)) {
            Assert.assertNotNull(o.container);
            Assert.assertEquals("number operators ", 1, o.container.getOperators().size());
        }
        Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
        expDeploy.removeAll(unChangedOps);
        Assert.assertEquals("number of containers", 5 + i, plan.getContainers().size());
        Assert.assertEquals("undeployed operators" + ctx.undeploy, expUndeploy, ctx.undeploy);
        Assert.assertEquals("deployed operators" + ctx.deploy, expDeploy, ctx.deploy);
    }
    // scale down M to 1
    {
        Set<PTOperator> expUndeploy = Sets.newHashSet();
        Set<PTOperator> expDeploy = Sets.newHashSet();
        expUndeploy.addAll(plan.getMergeOperators(o1Meta));
        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            expUndeploy.add(o2p);
            expDeploy.add(o2p);
        }
        for (PTOperator o1p : plan.getOperators(o1Meta)) {
            expUndeploy.add(o1p);
            PartitioningTest.PartitionLoadWatch.put(o1p, -1);
            plan.onStatusUpdate(o1p);
        }
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.events.remove(0).run();
        Assert.assertEquals("M partitions after scale down " + o1Meta, 1, plan.getOperators(o1Meta).size());
        expUndeploy.removeAll(plan.getOperators(o1Meta));
        Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
        Assert.assertEquals("deploy", expDeploy, ctx.deploy);
    }
    // scale up M to 2
    Assert.assertEquals("M partitions " + o1Meta, 1, plan.getOperators(o1Meta).size());
    {
        Set<PTOperator> expUndeploy = Sets.newHashSet();
        Set<PTOperator> expDeploy = Sets.newHashSet();
        for (PTOperator o1p : plan.getOperators(o1Meta)) {
            expUndeploy.add(o1p);
            PartitioningTest.PartitionLoadWatch.put(o1p, 1);
            plan.onStatusUpdate(o1p);
        }
        Assert.assertEquals("repartition event", 1, ctx.events.size());
        ctx.events.remove(0).run();
        Assert.assertEquals("M partitions after scale up " + o1Meta, 2, plan.getOperators(o1Meta).size());
        expDeploy.addAll(plan.getOperators(o1Meta));
        expDeploy.addAll(plan.getMergeOperators(o1Meta));
        for (PTOperator o2p : plan.getOperators(o2Meta)) {
            expUndeploy.add(o2p);
            expDeploy.add(o2p);
            Assert.assertNotNull(o2p.container);
            Assert.assertEquals("number operators ", 1, o2p.container.getOperators().size());
        }
        Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
        Assert.assertEquals("deploy", expDeploy, ctx.deploy);
    }
}
Also used : Set(java.util.Set) HashSet(java.util.HashSet) OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) PTInput(com.datatorrent.stram.plan.physical.PTOperator.PTInput) ArrayList(java.util.ArrayList) TestGeneratorInputOperator(com.datatorrent.stram.engine.TestGeneratorInputOperator) StatsListener(com.datatorrent.api.StatsListener) Checkpoint(com.datatorrent.stram.api.Checkpoint) LinkedList(java.util.LinkedList) PartitioningTest(com.datatorrent.stram.PartitioningTest) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) TestPlanContext(com.datatorrent.stram.plan.TestPlanContext) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) PTOutput(com.datatorrent.stram.plan.physical.PTOperator.PTOutput) GenericNodeTest(com.datatorrent.stram.engine.GenericNodeTest) Test(org.junit.Test) PartitioningTest(com.datatorrent.stram.PartitioningTest)

Example 24 with TestGeneratorInputOperator

use of com.datatorrent.stram.engine.TestGeneratorInputOperator in project apex-core by apache.

the class StramLocalClusterTest method testAppPath.

@Test
public void testAppPath() throws Exception {
    // add operator for initial checkpoint
    TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
    o1.setMaxTuples(1);
    File relPath = new File(dag.getAttributes().get(DAGContext.APPLICATION_PATH));
    String uriPath = relPath.toURI().toString();
    dag.setAttribute(DAGContext.APPLICATION_PATH, uriPath);
    StramLocalCluster cluster = new StramLocalCluster(dag);
    // no need for run(), just need the initial checkpoint
    Assert.assertFalse(cluster.isFinished());
    Assert.assertTrue("app path exists", relPath.exists() && relPath.isDirectory());
    File checkPointDir = new File(relPath, LogicalPlan.SUBDIR_CHECKPOINTS);
    Assert.assertTrue("checkpoint path exists", checkPointDir.exists() && checkPointDir.isDirectory());
}
Also used : TestGeneratorInputOperator(com.datatorrent.stram.engine.TestGeneratorInputOperator) File(java.io.File) Test(org.junit.Test)

Example 25 with TestGeneratorInputOperator

use of com.datatorrent.stram.engine.TestGeneratorInputOperator in project apex-core by apache.

the class StramLocalClusterTest method testLocalClusterInitShutdown.

/**
 * Verify test configuration launches and stops after input terminates.
 * Test validates expected output end to end.
 *
 * @throws Exception
 */
@Test
public void testLocalClusterInitShutdown() throws Exception {
    TestGeneratorInputOperator genNode = dag.addOperator("genNode", TestGeneratorInputOperator.class);
    genNode.setMaxTuples(2);
    GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
    node1.setEmitFormat("%s >> node1");
    File outFile = new File("./target/" + StramLocalClusterTest.class.getName() + "-testLocalClusterInitShutdown.out");
    outFile.delete();
    TestOutputOperator outNode = dag.addOperator("outNode", TestOutputOperator.class);
    outNode.pathSpec = outFile.toURI().toString();
    dag.addStream("fromGenNode", genNode.outport, node1.inport1);
    dag.addStream("fromNode1", node1.outport1, outNode.inport);
    dag.getAttributes().put(LogicalPlan.CONTAINERS_MAX_COUNT, 2);
    StramLocalCluster localCluster = new StramLocalCluster(dag);
    localCluster.setHeartbeatMonitoringEnabled(false);
    localCluster.run();
    Assert.assertTrue(outFile + " exists", outFile.exists());
    LineNumberReader lnr = new LineNumberReader(new FileReader(outFile));
    String line;
    while ((line = lnr.readLine()) != null) {
        Assert.assertTrue("line match " + line, line.matches("" + lnr.getLineNumber() + " >> node1"));
    }
    Assert.assertEquals("number lines", 2, lnr.getLineNumber());
    lnr.close();
}
Also used : GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) TestGeneratorInputOperator(com.datatorrent.stram.engine.TestGeneratorInputOperator) TestOutputOperator(com.datatorrent.stram.engine.TestOutputOperator) FileReader(java.io.FileReader) File(java.io.File) LineNumberReader(java.io.LineNumberReader) Test(org.junit.Test)

Aggregations

TestGeneratorInputOperator (com.datatorrent.stram.engine.TestGeneratorInputOperator)45 Test (org.junit.Test)43 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)34 PartitioningTest (com.datatorrent.stram.PartitioningTest)11 Checkpoint (com.datatorrent.stram.api.Checkpoint)10 OperatorMeta (com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta)9 PTOperator (com.datatorrent.stram.plan.physical.PTOperator)8 ValidationException (javax.validation.ValidationException)8 StreamMeta (com.datatorrent.api.DAG.StreamMeta)7 StreamingContainerManagerTest (com.datatorrent.stram.StreamingContainerManagerTest)7 LogicalPlan (com.datatorrent.stram.plan.logical.LogicalPlan)6 PhysicalPlanTest (com.datatorrent.stram.plan.physical.PhysicalPlanTest)6 MemoryStorageAgent (com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent)6 DefaultDelayOperator (com.datatorrent.common.util.DefaultDelayOperator)5 TestPlanContext (com.datatorrent.stram.plan.TestPlanContext)5 PhysicalPlan (com.datatorrent.stram.plan.physical.PhysicalPlan)5 ArrayList (java.util.ArrayList)5 OperatorMeta (com.datatorrent.api.DAG.OperatorMeta)4 StatsListener (com.datatorrent.api.StatsListener)4 StramLocalCluster (com.datatorrent.stram.StramLocalCluster)4