use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class PhysicalPlanTest method testSingleFinalMxNPartitioning.
/**
* MxN partitioning. When source and sink of a stream are partitioned, a
* separate unifier is created container local with each downstream partition.
*/
@Test
public void testSingleFinalMxNPartitioning() {
LogicalPlan dag = new LogicalPlan();
TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(2));
dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) new PartitioningTest.PartitionLoadWatch()));
dag.setOutputPortAttribute(o1.outport, PortContext.UNIFIER_SINGLE_FINAL, true);
OperatorMeta o1Meta = dag.getMeta(o1);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(3));
dag.setOperatorAttribute(o2, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
OperatorMeta o2Meta = dag.getMeta(o2);
dag.addStream("o1.outport1", o1.outport, o2.inport1);
int maxContainers = 10;
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
Assert.assertEquals("number of containers", 6, plan.getContainers().size());
List<PTOperator> inputOperators = new ArrayList<>();
for (int i = 0; i < 2; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertEquals("operators " + container, o1Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
inputOperators.add(container.getOperators().get(0));
}
PTOperator inputUnifier = null;
{
PTContainer container = plan.getContainers().get(2);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
PTOperator pUnifier = container.getOperators().get(0);
Assert.assertEquals("operators " + container, o1Meta.getMeta(o1.outport).getUnifierMeta().getName(), pUnifier.getOperatorMeta().getName());
Assert.assertTrue("single unifier " + pUnifier, pUnifier.isUnifier());
Assert.assertEquals("" + pUnifier, 2, pUnifier.getInputs().size());
for (int inputIndex = 0; inputIndex < pUnifier.getInputs().size(); inputIndex++) {
PTInput input = pUnifier.getInputs().get(inputIndex);
Assert.assertEquals("source port name " + pUnifier, "outport", input.source.portName);
Assert.assertEquals("" + pUnifier, inputOperators.get(inputIndex), input.source.source);
Assert.assertEquals("partition keys " + input.partitions, null, input.partitions);
}
Assert.assertEquals("number outputs " + pUnifier, 1, pUnifier.getOutputs().size());
PTOutput output = pUnifier.getOutputs().get(0);
Assert.assertEquals("number inputs " + output, 3, output.sinks.size());
for (int inputIndex = 0; inputIndex < output.sinks.size(); ++inputIndex) {
Assert.assertEquals("output sink " + output, o2Meta.getName(), output.sinks.get(inputIndex).target.getName());
Assert.assertEquals("destination port name " + output, GenericTestOperator.IPORT1, output.sinks.get(inputIndex).portName);
}
inputUnifier = pUnifier;
}
List<Integer> partitionKeySizes = new ArrayList<>();
for (int i = 3; i < 6; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertEquals("operators " + container, o2Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
PTOperator operator = container.getOperators().get(0);
Assert.assertEquals("operators " + container, o2Meta.getName(), operator.getOperatorMeta().getName());
Assert.assertEquals("number inputs " + operator, 1, operator.getInputs().size());
PTInput input = operator.getInputs().get(0);
Assert.assertEquals("" + operator, inputUnifier, input.source.source);
Assert.assertNotNull("input partitions " + operator, input.partitions);
partitionKeySizes.add(input.partitions.partitions.size());
}
Assert.assertEquals("input partition sizes count", 3, partitionKeySizes.size());
Collections.sort(partitionKeySizes);
Assert.assertEquals("input partition sizes", Arrays.asList(1, 1, 2), partitionKeySizes);
// scale down N from 3 to 2 and then from 2 to 1
for (int i = 0; i < 2; i++) {
List<PTOperator> ptos = plan.getOperators(o2Meta);
Set<PTOperator> expUndeploy = Sets.newHashSet(ptos);
for (PTOperator ptOperator : ptos) {
//expUndeploy.addAll(ptOperator.upstreamMerge.values());
expUndeploy.add(ptOperator);
PartitioningTest.PartitionLoadWatch.put(ptOperator, -1);
plan.onStatusUpdate(ptOperator);
}
ctx.backupRequests = 0;
ctx.events.remove(0).run();
Assert.assertEquals("single unifier ", 1, plan.getMergeOperators(o1Meta).size());
Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
// The unifier and o2 operators are expected to be deployed because of partition key changes
for (PTOperator ptOperator : plan.getOperators(o2Meta)) {
expDeploy.add(ptOperator);
}
// from 3 to 2 the containers decrease from 5 to 4, but from 2 to 1 the container remains same because single unifier are not inline with single operator partition
Assert.assertEquals("number of containers", 5 - i, plan.getContainers().size());
Assert.assertEquals("number of operators", 2 - i, plan.getOperators(o2Meta).size());
Assert.assertEquals("undeployed operators " + ctx.undeploy, expUndeploy, ctx.undeploy);
Assert.assertEquals("deployed operators " + ctx.deploy, expDeploy, ctx.deploy);
}
// scale up N from 1 to 2 and then from 2 to 3
for (int i = 0; i < 2; i++) {
List<PTOperator> unChangedOps = new LinkedList<>(plan.getOperators(o2Meta));
PTOperator o2p1 = unChangedOps.remove(0);
Set<PTOperator> expUndeploy = Sets.newHashSet(o2p1);
PartitioningTest.PartitionLoadWatch.put(o2p1, 1);
plan.onStatusUpdate(o2p1);
Assert.assertEquals("repartition event", 1, ctx.events.size());
ctx.backupRequests = 0;
ctx.events.remove(0).run();
Assert.assertEquals("single unifier ", 1, plan.getMergeOperators(o1Meta).size());
Assert.assertEquals("N partitions after scale up " + o2Meta, 2 + i, plan.getOperators(o2Meta).size());
for (PTOperator o : plan.getOperators(o2Meta)) {
Assert.assertNotNull(o.container);
Assert.assertEquals("number operators ", 1, o.container.getOperators().size());
}
Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
expDeploy.removeAll(unChangedOps);
Assert.assertEquals("number of containers", 5 + i, plan.getContainers().size());
Assert.assertEquals("undeployed operators" + ctx.undeploy, expUndeploy, ctx.undeploy);
Assert.assertEquals("deployed operators" + ctx.deploy, expDeploy, ctx.deploy);
}
// scale down M to 1
{
Set<PTOperator> expUndeploy = Sets.newHashSet();
Set<PTOperator> expDeploy = Sets.newHashSet();
expUndeploy.addAll(plan.getMergeOperators(o1Meta));
for (PTOperator o2p : plan.getOperators(o2Meta)) {
expUndeploy.add(o2p);
expDeploy.add(o2p);
}
for (PTOperator o1p : plan.getOperators(o1Meta)) {
expUndeploy.add(o1p);
PartitioningTest.PartitionLoadWatch.put(o1p, -1);
plan.onStatusUpdate(o1p);
}
Assert.assertEquals("repartition event", 1, ctx.events.size());
ctx.events.remove(0).run();
Assert.assertEquals("M partitions after scale down " + o1Meta, 1, plan.getOperators(o1Meta).size());
expUndeploy.removeAll(plan.getOperators(o1Meta));
Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
Assert.assertEquals("deploy", expDeploy, ctx.deploy);
}
// scale up M to 2
Assert.assertEquals("M partitions " + o1Meta, 1, plan.getOperators(o1Meta).size());
{
Set<PTOperator> expUndeploy = Sets.newHashSet();
Set<PTOperator> expDeploy = Sets.newHashSet();
for (PTOperator o1p : plan.getOperators(o1Meta)) {
expUndeploy.add(o1p);
PartitioningTest.PartitionLoadWatch.put(o1p, 1);
plan.onStatusUpdate(o1p);
}
Assert.assertEquals("repartition event", 1, ctx.events.size());
ctx.events.remove(0).run();
Assert.assertEquals("M partitions after scale up " + o1Meta, 2, plan.getOperators(o1Meta).size());
expDeploy.addAll(plan.getOperators(o1Meta));
expDeploy.addAll(plan.getMergeOperators(o1Meta));
for (PTOperator o2p : plan.getOperators(o2Meta)) {
expUndeploy.add(o2p);
expDeploy.add(o2p);
Assert.assertNotNull(o2p.container);
Assert.assertEquals("number operators ", 1, o2p.container.getOperators().size());
}
Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
Assert.assertEquals("deploy", expDeploy, ctx.deploy);
}
}
use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class PhysicalPlanTest method testMxNPartitioning.
/**
* MxN partitioning. When source and sink of a stream are partitioned, a
* separate unifier is created container local with each downstream partition.
*/
@Test
public void testMxNPartitioning() {
LogicalPlan dag = new LogicalPlan();
TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
TestPartitioner<TestGeneratorInputOperator> o1Partitioner = new TestPartitioner<>();
o1Partitioner.setPartitionCount(2);
dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, o1Partitioner);
dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) new PartitioningTest.PartitionLoadWatch()));
OperatorMeta o1Meta = dag.getMeta(o1);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<TestGeneratorInputOperator>(3));
dag.setOperatorAttribute(o2, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
OperatorMeta o2Meta = dag.getMeta(o2);
dag.addStream("o1.outport1", o1.outport, o2.inport1);
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
Assert.assertEquals("number of containers", 5, plan.getContainers().size());
List<PTOperator> inputOperators = new ArrayList<>();
for (int i = 0; i < 2; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertEquals("operators " + container, o1Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
inputOperators.add(container.getOperators().get(0));
}
for (int i = 2; i < 5; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 2, container.getOperators().size());
Assert.assertEquals("operators " + container, o2Meta.getName(), container.getOperators().get(0).getOperatorMeta().getName());
Set<String> expectedLogicalNames = Sets.newHashSet(o1Meta.getMeta(o1.outport).getUnifierMeta().getName(), o2Meta.getName());
Map<String, PTOperator> actualOperators = new HashMap<>();
for (PTOperator p : container.getOperators()) {
actualOperators.put(p.getOperatorMeta().getName(), p);
}
Assert.assertEquals("", expectedLogicalNames, actualOperators.keySet());
PTOperator pUnifier = actualOperators.get(o1Meta.getMeta(o1.outport).getUnifierMeta().getName());
Assert.assertNotNull("" + pUnifier, pUnifier.getContainer());
Assert.assertTrue("" + pUnifier, pUnifier.isUnifier());
// input from each upstream partition
Assert.assertEquals("" + pUnifier, 2, pUnifier.getInputs().size());
int numberPartitionKeys = (i == 2) ? 2 : 1;
for (int inputIndex = 0; inputIndex < pUnifier.getInputs().size(); inputIndex++) {
PTInput input = pUnifier.getInputs().get(inputIndex);
Assert.assertEquals("" + pUnifier, "outport", input.source.portName);
Assert.assertEquals("" + pUnifier, inputOperators.get(inputIndex), input.source.source);
Assert.assertEquals("partition keys " + input.partitions, numberPartitionKeys, input.partitions.partitions.size());
}
// output to single downstream partition
Assert.assertEquals("" + pUnifier, 1, pUnifier.getOutputs().size());
Assert.assertTrue("" + actualOperators.get(o2Meta.getName()).getOperatorMeta().getOperator(), actualOperators.get(o2Meta.getName()).getOperatorMeta().getOperator() instanceof GenericTestOperator);
PTOperator p = actualOperators.get(o2Meta.getName());
Assert.assertEquals("partition inputs " + p.getInputs(), 1, p.getInputs().size());
Assert.assertEquals("partition inputs " + p.getInputs(), pUnifier, p.getInputs().get(0).source.source);
Assert.assertEquals("input partition keys " + p.getInputs(), null, p.getInputs().get(0).partitions);
Assert.assertTrue("partitioned unifier container local " + p.getInputs().get(0).source, p.getInputs().get(0).source.isDownStreamInline());
}
// scale down N from 3 to 2 and then from 2 to 1
for (int i = 0; i < 2; i++) {
List<PTOperator> ptos = plan.getOperators(o2Meta);
Set<PTOperator> expUndeploy = Sets.newHashSet(ptos);
for (PTOperator ptOperator : ptos) {
expUndeploy.addAll(ptOperator.upstreamMerge.values());
PartitioningTest.PartitionLoadWatch.put(ptOperator, -1);
plan.onStatusUpdate(ptOperator);
}
ctx.backupRequests = 0;
ctx.events.remove(0).run();
Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
// Either unifiers for each partition or single unifier for single partition is expected to be deployed
expDeploy.addAll(plan.getMergeOperators(o1Meta));
for (PTOperator ptOperator : plan.getOperators(o2Meta)) {
expDeploy.addAll(ptOperator.upstreamMerge.values());
}
Assert.assertEquals("number of containers", 4 - i, plan.getContainers().size());
Assert.assertEquals("number of operators", 2 - i, plan.getOperators(o2Meta).size());
Assert.assertEquals("undeployed operators " + ctx.undeploy, expUndeploy, ctx.undeploy);
Assert.assertEquals("deployed operators " + ctx.deploy, expDeploy, ctx.deploy);
}
// scale up N from 1 to 2 and then from 2 to 3
for (int i = 0; i < 2; i++) {
List<PTOperator> unChangedOps = new LinkedList<>(plan.getOperators(o2Meta));
PTOperator o2p1 = unChangedOps.remove(0);
Set<PTOperator> expUndeploy = Sets.newHashSet(o2p1);
// Either single unifier for one partition or merged unifiers for each partition is expected to be undeployed
expUndeploy.addAll(plan.getMergeOperators(o1Meta));
expUndeploy.addAll(o2p1.upstreamMerge.values());
List<PTOperator> nOps = new LinkedList<>();
for (Iterator<PTOperator> iterator = unChangedOps.iterator(); iterator.hasNext(); ) {
PTOperator ptOperator = iterator.next();
nOps.addAll(ptOperator.upstreamMerge.values());
}
unChangedOps.addAll(nOps);
PartitioningTest.PartitionLoadWatch.put(o2p1, 1);
plan.onStatusUpdate(o2p1);
Assert.assertEquals("repartition event", 1, ctx.events.size());
ctx.backupRequests = 0;
ctx.events.remove(0).run();
Assert.assertEquals("N partitions after scale up " + o2Meta, 2 + i, plan.getOperators(o2Meta).size());
Assert.assertTrue("no unifiers", plan.getMergeOperators(o1Meta).isEmpty());
for (PTOperator o : plan.getOperators(o2Meta)) {
Assert.assertNotNull(o.container);
PTOperator unifier = o.upstreamMerge.values().iterator().next();
Assert.assertNotNull(unifier.container);
Assert.assertSame("unifier in same container", o.container, unifier.container);
Assert.assertEquals("container operators " + o.container, Sets.newHashSet(o.container.getOperators()), Sets.newHashSet(o, unifier));
}
Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(o2Meta));
for (PTOperator ptOperator : plan.getOperators(o2Meta)) {
expDeploy.addAll(ptOperator.upstreamMerge.values());
}
expDeploy.removeAll(unChangedOps);
Assert.assertEquals("number of containers", 4 + i, plan.getContainers().size());
Assert.assertEquals("undeployed operators" + ctx.undeploy, expUndeploy, ctx.undeploy);
Assert.assertEquals("deployed operators" + ctx.deploy, expDeploy, ctx.deploy);
}
// scale down M to 1
{
Set<PTOperator> expUndeploy = Sets.newHashSet();
Set<PTOperator> expDeploy = Sets.newHashSet();
for (PTOperator o2p : plan.getOperators(o2Meta)) {
expUndeploy.addAll(o2p.upstreamMerge.values());
expUndeploy.add(o2p);
expDeploy.add(o2p);
}
for (PTOperator o1p : plan.getOperators(o1Meta)) {
expUndeploy.add(o1p);
PartitioningTest.PartitionLoadWatch.put(o1p, -1);
plan.onStatusUpdate(o1p);
}
Assert.assertEquals("repartition event", 1, ctx.events.size());
ctx.events.remove(0).run();
Assert.assertEquals("M partitions after scale down " + o1Meta, 1, plan.getOperators(o1Meta).size());
expUndeploy.removeAll(plan.getOperators(o1Meta));
for (PTOperator o2p : plan.getOperators(o2Meta)) {
Assert.assertTrue("merge unifier " + o2p + " " + o2p.upstreamMerge, o2p.upstreamMerge.isEmpty());
}
Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
Assert.assertEquals("deploy", expDeploy, ctx.deploy);
}
// scale up M to 2
Assert.assertEquals("M partitions " + o1Meta, 1, plan.getOperators(o1Meta).size());
{
Set<PTOperator> expUndeploy = Sets.newHashSet();
Set<PTOperator> expDeploy = Sets.newHashSet();
for (PTOperator o1p : plan.getOperators(o1Meta)) {
PartitioningTest.PartitionLoadWatch.put(o1p, 1);
plan.onStatusUpdate(o1p);
}
Assert.assertEquals("repartition event", 1, ctx.events.size());
o1Partitioner.extraPartitions.add(new DefaultPartition<>(o1));
ctx.events.remove(0).run();
o1Partitioner.extraPartitions.clear();
List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
List<PTOperator> o2Partitions = plan.getOperators(o2Meta);
Assert.assertEquals("M partitions after scale up " + o1Meta, 2, o1Partitions.size());
// previous partition unchanged
expDeploy.add(o1Partitions.get(1));
for (PTOperator o1p : o1Partitions) {
Assert.assertEquals("outputs " + o1p, 1, o1p.getOutputs().size());
Assert.assertEquals("sinks " + o1p, o2Partitions.size(), o1p.getOutputs().get(0).sinks.size());
}
for (PTOperator o2p : plan.getOperators(o2Meta)) {
expUndeploy.add(o2p);
expDeploy.add(o2p);
Assert.assertEquals("merge unifier " + o2p + " " + o2p.upstreamMerge, 1, o2p.upstreamMerge.size());
expDeploy.addAll(o2p.upstreamMerge.values());
}
Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
Assert.assertEquals("deploy", expDeploy, ctx.deploy);
}
}
use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class PhysicalPlanTest method testStaticPartitioning.
@Test
public void testStaticPartitioning() {
LogicalPlan dag = new LogicalPlan();
dag.setAttribute(OperatorContext.STORAGE_AGENT, new StramTestSupport.MemoryStorageAgent());
TestGeneratorInputOperator node0 = dag.addOperator("node0", TestGeneratorInputOperator.class);
GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
PartitioningTestOperator partitioned = dag.addOperator("partitioned", PartitioningTestOperator.class);
partitioned.setPartitionCount(partitioned.partitionKeys.length);
GenericTestOperator singleton1 = dag.addOperator("singleton1", GenericTestOperator.class);
GenericTestOperator singleton2 = dag.addOperator("singleton2", GenericTestOperator.class);
dag.addStream("n0.inport1", node0.outport, node1.inport1);
dag.addStream("n1.outport1", node1.outport1, partitioned.inport1, partitioned.inportWithCodec);
dag.addStream("mergeStream", partitioned.outport1, singleton1.inport1, singleton2.inport1);
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 2);
OperatorMeta partitionedMeta = dag.getMeta(partitioned);
dag.validate();
PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
Assert.assertEquals("number of containers", 2, plan.getContainers().size());
Assert.assertNotNull("partition map", partitioned.partitions);
Assert.assertEquals("partition map " + partitioned.partitions, 3, partitioned.partitions.size());
List<PTOperator> n2Instances = plan.getOperators(partitionedMeta);
Assert.assertEquals("partition instances " + n2Instances, partitioned.partitionKeys.length, n2Instances.size());
for (int i = 0; i < n2Instances.size(); i++) {
PTOperator po = n2Instances.get(i);
Map<String, PTInput> inputsMap = new HashMap<>();
for (PTInput input : po.getInputs()) {
inputsMap.put(input.portName, input);
Assert.assertEquals("partitions " + input, Sets.newHashSet(partitioned.partitionKeys[i]), input.partitions.partitions);
//Assert.assertEquals("codec " + input.logicalStream, PartitioningTestStreamCodec.class, input.logicalStream.getCodecClass());
}
Assert.assertEquals("number inputs " + inputsMap, Sets.newHashSet(PartitioningTestOperator.IPORT1, PartitioningTestOperator.INPORT_WITH_CODEC), inputsMap.keySet());
}
Collection<PTOperator> unifiers = plan.getMergeOperators(partitionedMeta);
Assert.assertEquals("number unifiers " + partitionedMeta, 0, unifiers.size());
}
use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class PhysicalPlanTest method testSingleFinalCascadingUnifier.
@Test
public void testSingleFinalCascadingUnifier() {
LogicalPlan dag = new LogicalPlan();
//TestGeneratorInputOperator o1 = dag.addOperator("o1", TestGeneratorInputOperator.class);
PartitioningTestOperator o1 = dag.addOperator("o1", PartitioningTestOperator.class);
o1.partitionKeys = new Integer[] { 0, 1, 2, 3 };
o1.setPartitionCount(3);
dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_LIMIT, 2);
dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_SINGLE_FINAL, true);
OperatorMeta o1Meta = dag.getMeta(o1);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
OperatorMeta o2Meta = dag.getMeta(o2);
dag.addStream("o1.outport1", o1.outport1, o2.inport1);
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 12);
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
Assert.assertEquals("number of containers", 10, plan.getContainers().size());
List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("partitions " + o1Meta, 4, o1Partitions.size());
Assert.assertEquals("partitioned map " + o1.partitions, 4, o1.partitions.size());
List<PTOperator> o2Partitions = plan.getOperators(o2Meta);
Assert.assertEquals("partitions " + o1Meta, 3, o2Partitions.size());
for (PTOperator o : o1Partitions) {
Assert.assertEquals("outputs " + o, 1, o.getOutputs().size());
for (PTOutput out : o.getOutputs()) {
Assert.assertEquals("sinks " + out, 1, out.sinks.size());
}
Assert.assertNotNull("container " + o, o.getContainer());
}
List<PTOperator> o1Unifiers = plan.getMergeOperators(o1Meta);
// 2 cascadingUnifiers and one-downstream partition unifier
Assert.assertEquals("o1Unifiers " + o1Meta, 3, o1Unifiers.size());
List<PTOperator> finalUnifiers = new ArrayList<>();
for (PTOperator o : o1Unifiers) {
Assert.assertEquals("inputs " + o, 2, o.getInputs().size());
Assert.assertEquals("outputs " + o, 1, o.getOutputs().size());
List<PTInput> sinks = o.getOutputs().get(0).sinks;
boolean finalUnifier = sinks.size() > 0 ? (sinks.get(0).target.getOperatorMeta() == o2Meta) : false;
if (!finalUnifier) {
for (PTOutput out : o.getOutputs()) {
Assert.assertEquals("sinks " + out, 1, out.sinks.size());
Assert.assertTrue(out.sinks.get(0).target.isUnifier());
}
} else {
for (PTOutput out : o.getOutputs()) {
Assert.assertEquals("sinks " + out, 3, out.sinks.size());
for (PTInput in : out.sinks) {
Assert.assertFalse(in.target.isUnifier());
}
}
finalUnifiers.add(o);
}
Assert.assertNotNull("container " + o, o.getContainer());
}
Assert.assertEquals("o1 final unifiers", 1, finalUnifiers.size());
for (int i = 0; i < 4; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertTrue(o1Partitions.contains(container.getOperators().get(0)));
}
for (int i = 4; i < 7; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertTrue(o1Unifiers.contains(container.getOperators().get(0)));
}
for (int i = 7; i < 10; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertTrue(o2Partitions.contains(container.getOperators().get(0)));
}
PTOperator p1 = o1Partitions.get(0);
StatsListener l = p1.statsListeners.get(0);
Assert.assertTrue("stats handlers " + p1.statsListeners, l instanceof PartitioningTest.PartitionLoadWatch);
PartitioningTest.PartitionLoadWatch.put(p1, 1);
plan.onStatusUpdate(p1);
Assert.assertEquals("partition scaling triggered", 1, ctx.events.size());
o1.partitionKeys = new Integer[] { 0, 1, 2, 3, 4 };
ctx.events.remove(0).run();
o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("partitions " + o1Meta, 5, o1Partitions.size());
Assert.assertEquals("partitioned map " + o1.partitions, 5, o1.partitions.size());
o1Unifiers = plan.getMergeOperators(o1Meta);
// 3(l1)x2(l2)
Assert.assertEquals("o1Unifiers " + o1Meta, 4, o1Unifiers.size());
for (PTOperator o : o1Unifiers) {
Assert.assertNotNull("container null: " + o, o.getContainer());
}
}
use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class PhysicalPlanTest method testParallelPartitioning.
@Test
public void testParallelPartitioning() {
LogicalPlan dag = new LogicalPlan();
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
dag.addStream("o1Output1", o1.outport1, o2.inport1, o3.inport1).setLocality(null);
dag.addStream("o2Output1", o2.outport1, o3.inport2).setLocality(Locality.CONTAINER_LOCAL);
dag.setInputPortAttribute(o3.inport2, PortContext.PARTITION_PARALLEL, true);
// parallel partition two downstream operators
PartitioningTestOperator o3_1 = dag.addOperator("o3_1", PartitioningTestOperator.class);
o3_1.fixedCapacity = false;
dag.setInputPortAttribute(o3_1.inport1, PortContext.PARTITION_PARALLEL, true);
OperatorMeta o3_1Meta = dag.getMeta(o3_1);
GenericTestOperator o3_2 = dag.addOperator("o3_2", GenericTestOperator.class);
dag.setInputPortAttribute(o3_2.inport1, PortContext.PARTITION_PARALLEL, true);
OperatorMeta o3_2Meta = dag.getMeta(o3_2);
dag.addStream("o3outport1", o3.outport1, o3_1.inport1, o3_2.inport1).setLocality(Locality.CONTAINER_LOCAL);
// join within parallel partition
GenericTestOperator o4 = dag.addOperator("o4", GenericTestOperator.class);
dag.setInputPortAttribute(o4.inport1, PortContext.PARTITION_PARALLEL, true);
dag.setInputPortAttribute(o4.inport2, PortContext.PARTITION_PARALLEL, true);
OperatorMeta o4Meta = dag.getMeta(o4);
dag.addStream("o3_1.outport1", o3_1.outport1, o4.inport1).setLocality(Locality.CONTAINER_LOCAL);
dag.addStream("o3_2.outport1", o3_2.outport1, o4.inport2).setLocality(Locality.CONTAINER_LOCAL);
// non inline
GenericTestOperator o5single = dag.addOperator("o5single", GenericTestOperator.class);
dag.addStream("o4outport1", o4.outport1, o5single.inport1);
int maxContainers = 4;
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);
dag.setAttribute(OperatorContext.STORAGE_AGENT, new TestPlanContext());
PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
Assert.assertEquals("number of containers", 4, plan.getContainers().size());
PTContainer container1 = plan.getContainers().get(0);
Assert.assertEquals("number operators " + container1, 1, container1.getOperators().size());
Assert.assertEquals("operators " + container1, "o1", container1.getOperators().get(0).getOperatorMeta().getName());
for (int i = 1; i < 3; i++) {
PTContainer container2 = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container2, 5, container2.getOperators().size());
Set<String> expectedLogicalNames = Sets.newHashSet("o2", "o3", o3_1Meta.getName(), o3_2Meta.getName(), o4Meta.getName());
Set<String> actualLogicalNames = Sets.newHashSet();
for (PTOperator p : container2.getOperators()) {
actualLogicalNames.add(p.getOperatorMeta().getName());
}
Assert.assertEquals("operator names " + container2, expectedLogicalNames, actualLogicalNames);
}
List<OperatorMeta> inlineOperators = Lists.newArrayList(dag.getMeta(o2), o3_1Meta, o3_2Meta);
for (OperatorMeta ow : inlineOperators) {
List<PTOperator> partitionedInstances = plan.getOperators(ow);
Assert.assertEquals("" + partitionedInstances, 2, partitionedInstances.size());
for (PTOperator p : partitionedInstances) {
Assert.assertEquals("outputs " + p, 1, p.getOutputs().size());
Assert.assertTrue("downstream inline " + p.getOutputs().get(0), p.getOutputs().get(0).isDownStreamInline());
}
}
// container 4: Unifier for o4 & O5
PTContainer container4 = plan.getContainers().get(3);
PTOperator ptOperatorO5 = plan.getOperators(dag.getMeta(o5single)).get(0);
PTOperator unifier = ptOperatorO5.upstreamMerge.values().iterator().next();
Assert.assertEquals("number operators " + container4, 2, container4.getOperators().size());
Assert.assertEquals("operators " + container4, o4Meta.getMeta(o4.outport1).getUnifierMeta(), unifier.getOperatorMeta());
Assert.assertEquals("unifier inputs" + unifier.getInputs(), 2, unifier.getInputs().size());
Assert.assertEquals("unifier outputs" + unifier.getOutputs(), 1, unifier.getOutputs().size());
OperatorMeta o5Meta = dag.getMeta(o5single);
Assert.assertEquals("operators " + container4, o5Meta, ptOperatorO5.getOperatorMeta());
List<PTOperator> o5Instances = plan.getOperators(o5Meta);
Assert.assertEquals("" + o5Instances, 1, o5Instances.size());
Assert.assertEquals("inputs" + ptOperatorO5.getInputs(), 1, ptOperatorO5.getInputs().size());
Assert.assertEquals("inputs" + ptOperatorO5.getInputs(), unifier, ptOperatorO5.getInputs().get(0).source.source);
// verify partitioner was called for parallel partition
Assert.assertNotNull("partitioner called " + o3_1, o3_1.partitions);
for (PTOperator p : plan.getOperators(o3_1Meta)) {
Assert.assertEquals("inputs " + p, 1, p.getInputs().size());
for (PTInput pti : p.getInputs()) {
Assert.assertNull("partition keys " + pti, pti.partitions);
}
}
}
Aggregations