use of com.datatorrent.stram.PartitioningTest.TestInputOperator in project apex-core by apache.
the class PhysicalPlanTest method testInputOperatorPartitioning.
/**
* Test partitioning of an input operator (no input port).
* Cover aspects that are not part of generic operator test.
* Test scaling from one to multiple partitions with unifier when one partition remains unmodified.
*/
@Test
public void testInputOperatorPartitioning() {
LogicalPlan dag = new LogicalPlan();
final TestInputOperator<Object> o1 = dag.addOperator("o1", new TestInputOperator<>());
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.addStream("o1.outport1", o1.output, o2.inport1);
OperatorMeta o1Meta = dag.getMeta(o1);
dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
TestPartitioner<TestInputOperator<Object>> partitioner = new TestPartitioner<>();
dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, partitioner);
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
Assert.assertEquals("number of containers", 2, plan.getContainers().size());
List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("partitions " + o1Partitions, 1, o1Partitions.size());
PTOperator o1p1 = o1Partitions.get(0);
// verify load update generates expected events per configuration
Assert.assertEquals("stats handlers " + o1p1, 1, o1p1.statsListeners.size());
StatsListener l = o1p1.statsListeners.get(0);
Assert.assertTrue("stats handlers " + o1p1.statsListeners, l instanceof PartitioningTest.PartitionLoadWatch);
PartitioningTest.PartitionLoadWatch.put(o1p1, 1);
plan.onStatusUpdate(o1p1);
Assert.assertEquals("scale up triggered", 1, ctx.events.size());
// add another partition, keep existing as is
partitioner.extraPartitions.add(new DefaultPartition<>(o1));
Runnable r = ctx.events.remove(0);
r.run();
partitioner.extraPartitions.clear();
o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("operators after scale up", 2, o1Partitions.size());
Assert.assertEquals("first partition unmodified", o1p1, o1Partitions.get(0));
Assert.assertEquals("single output", 1, o1p1.getOutputs().size());
Assert.assertEquals("output to unifier", 1, o1p1.getOutputs().get(0).sinks.size());
Set<PTOperator> expUndeploy = Sets.newHashSet(plan.getOperators(dag.getMeta(o2)));
Set<PTOperator> expDeploy = Sets.newHashSet(o1Partitions.get(1));
expDeploy.addAll(plan.getMergeOperators(dag.getMeta(o1)));
expDeploy.addAll(expUndeploy);
expDeploy.add(o1p1.getOutputs().get(0).sinks.get(0).target);
Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
Assert.assertEquals("deploy", expDeploy, ctx.deploy);
for (PTOperator p : o1Partitions) {
Assert.assertEquals("activation window id " + p, Checkpoint.INITIAL_CHECKPOINT, p.recoveryCheckpoint);
Assert.assertEquals("checkpoints " + p + " " + p.checkpoints, Lists.newArrayList(), p.checkpoints);
PartitioningTest.PartitionLoadWatch.put(p, -1);
plan.onStatusUpdate(p);
}
ctx.events.remove(0).run();
Assert.assertEquals("operators after scale down", 1, plan.getOperators(o1Meta).size());
}
Aggregations