use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class LogicalPlanModificationTest method testRemoveOperator2.
@Test
public void testRemoveOperator2() {
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
OperatorMeta o1Meta = dag.getMeta(o1);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
OperatorMeta o2Meta = dag.getMeta(o2);
GenericTestOperator o3 = dag.addOperator("o3", GenericTestOperator.class);
OperatorMeta o3Meta = dag.getMeta(o3);
LogicalPlan.StreamMeta s1 = dag.addStream("o1.outport1", o1.outport1, o2.inport1, o3.inport1).setLocality(Locality.CONTAINER_LOCAL);
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
ctx.deploy.clear();
ctx.undeploy.clear();
Assert.assertEquals("containers " + plan.getContainers(), 1, plan.getContainers().size());
Assert.assertEquals("physical operators " + plan.getAllOperators(), 3, plan.getAllOperators().size());
Assert.assertEquals("sinks s1 " + s1.getSinks(), 2, s1.getSinks().size());
List<PTOperator> o2PhysicalOpers = plan.getOperators(o2Meta);
Assert.assertEquals("instances " + o2Meta, 1, o2PhysicalOpers.size());
PlanModifier pm = new PlanModifier(plan);
// remove operator w/o removing the stream
pm.removeOperator(o2Meta.getName());
pm.applyChanges(ctx);
Assert.assertEquals("sinks s1 " + s1.getSinks(), 1, s1.getSinks().size());
Assert.assertTrue("undeploy " + ctx.undeploy, ctx.undeploy.containsAll(o2PhysicalOpers));
Set<PTOperator> expDeploy = Sets.newHashSet();
// TODO: container local operators should be included in undeploy/deploy
// expDeploy.addAll(plan.getOperators(o1Meta));
// expDeploy.addAll(plan.getOperators(o3Meta));
Assert.assertEquals("deploy " + ctx.deploy, ctx.deploy, expDeploy);
Assert.assertEquals("streams " + dag.getAllStreams(), 1, dag.getAllStreams().size());
Assert.assertEquals("operators " + dag.getAllOperators(), 2, dag.getAllOperators().size());
Assert.assertTrue("operators " + dag.getAllOperators(), dag.getAllOperators().containsAll(Sets.newHashSet(o1Meta, o3Meta)));
}
use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class LogicalPlanModificationTest method testSetOperatorProperty.
@Test
public void testSetOperatorProperty() {
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
OperatorMeta o1Meta = dag.getMeta(o1);
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
ctx.deploy.clear();
ctx.undeploy.clear();
PlanModifier pm = new PlanModifier(plan);
try {
pm.setOperatorProperty(o1Meta.getName(), "myStringProperty", "propertyValue");
Assert.fail("validation error exepected");
} catch (javax.validation.ValidationException e) {
Assert.assertTrue(e.getMessage().contains(o1Meta.toString()));
}
GenericTestOperator newOperator = new GenericTestOperator();
pm.addOperator("newOperator", newOperator);
pm.setOperatorProperty("newOperator", "myStringProperty", "propertyValue");
Assert.assertEquals("", "propertyValue", newOperator.getMyStringProperty());
}
use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class PhysicalPlanTest method testCascadingUnifier.
@Test
public void testCascadingUnifier() {
LogicalPlan dag = new LogicalPlan();
PartitioningTestOperator o1 = dag.addOperator("o1", PartitioningTestOperator.class);
o1.partitionKeys = new Integer[] { 0, 1, 2, 3 };
o1.setPartitionCount(o1.partitionKeys.length);
dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_LIMIT, 2);
OperatorMeta o1Meta = dag.getMeta(o1);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
OperatorMeta o2Meta = dag.getMeta(o2);
dag.addStream("o1.outport1", o1.outport1, o2.inport1);
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 10);
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
Assert.assertEquals("number of containers", 9, plan.getContainers().size());
List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("partitions " + o1Meta, 4, o1Partitions.size());
Assert.assertEquals("partitioned map " + o1.partitions, 4, o1.partitions.size());
List<PTOperator> o2Partitions = plan.getOperators(o2Meta);
Assert.assertEquals("partitions " + o1Meta, 3, o2Partitions.size());
for (PTOperator o : o1Partitions) {
Assert.assertEquals("outputs " + o, 1, o.getOutputs().size());
for (PTOutput out : o.getOutputs()) {
Assert.assertEquals("sinks " + out, 1, out.sinks.size());
}
Assert.assertNotNull("container " + o, o.getContainer());
}
List<PTOperator> o1Unifiers = plan.getMergeOperators(o1Meta);
// 2 cascadingUnifiers to per-downstream partition unifier(s)
Assert.assertEquals("o1Unifiers " + o1Meta, 2, o1Unifiers.size());
for (PTOperator o : o1Unifiers) {
Assert.assertEquals("inputs " + o, 2, o.getInputs().size());
Assert.assertEquals("outputs " + o, 1, o.getOutputs().size());
for (PTOutput out : o.getOutputs()) {
Assert.assertEquals("sinks " + out, 3, out.sinks.size());
for (PTInput in : out.sinks) {
// MxN unifier
Assert.assertTrue(in.target.isUnifier());
Assert.assertEquals(1, in.target.getOutputs().get(0).sinks.size());
}
}
Assert.assertNotNull("container " + o, o.getContainer());
}
for (int i = 0; i < 4; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertTrue(o1Partitions.contains(container.getOperators().get(0)));
}
for (int i = 4; i < 6; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertTrue(o1Unifiers.contains(container.getOperators().get(0)));
}
for (int i = 6; i < 9; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 2, container.getOperators().size());
Assert.assertTrue(o2Partitions.contains(container.getOperators().get(0)));
}
PTOperator p1 = o1Partitions.get(0);
StatsListener l = p1.statsListeners.get(0);
Assert.assertTrue("stats handlers " + p1.statsListeners, l instanceof PartitioningTest.PartitionLoadWatch);
PartitioningTest.PartitionLoadWatch.put(p1, 1);
plan.onStatusUpdate(p1);
Assert.assertEquals("partition scaling triggered", 1, ctx.events.size());
o1.partitionKeys = new Integer[] { 0, 1, 2, 3, 4 };
ctx.events.remove(0).run();
o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("partitions " + o1Meta, 5, o1Partitions.size());
Assert.assertEquals("partitioned map " + o1.partitions, 5, o1.partitions.size());
o1Unifiers = plan.getMergeOperators(o1Meta);
// 3(l1)x2(l2)
Assert.assertEquals("o1Unifiers " + o1Meta, 3, o1Unifiers.size());
for (PTOperator o : o1Unifiers) {
Assert.assertNotNull("container null: " + o, o.getContainer());
}
}
use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class PhysicalPlanTest method testNodeLocality.
@Test
public void testNodeLocality() {
LogicalPlan dag = new LogicalPlan();
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator partitioned = dag.addOperator("partitioned", GenericTestOperator.class);
dag.getMeta(partitioned).getAttributes().put(OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
GenericTestOperator partitionedParallel = dag.addOperator("partitionedParallel", GenericTestOperator.class);
dag.addStream("o1_outport1", o1.outport1, partitioned.inport1).setLocality(null);
dag.addStream("partitioned_outport1", partitioned.outport1, partitionedParallel.inport2).setLocality(Locality.NODE_LOCAL);
dag.setInputPortAttribute(partitionedParallel.inport2, PortContext.PARTITION_PARALLEL, true);
GenericTestOperator single = dag.addOperator("single", GenericTestOperator.class);
dag.addStream("partitionedParallel_outport1", partitionedParallel.outport1, single.inport1);
int maxContainers = 6;
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, maxContainers);
dag.setAttribute(OperatorContext.STORAGE_AGENT, new TestPlanContext());
PhysicalPlan plan = new PhysicalPlan(dag, new TestPlanContext());
Assert.assertEquals("number of containers", maxContainers, plan.getContainers().size());
PTContainer container1 = plan.getContainers().get(0);
Assert.assertEquals("number operators " + container1, 1, container1.getOperators().size());
Assert.assertEquals("operators " + container1, dag.getMeta(o1), container1.getOperators().get(0).getOperatorMeta());
for (int i = 1; i < 3; i++) {
PTContainer c = plan.getContainers().get(i);
Assert.assertEquals("number operators " + c, 1, c.getOperators().size());
Set<OperatorMeta> expectedLogical = Sets.newHashSet(dag.getMeta(partitioned));
Set<OperatorMeta> actualLogical = Sets.newHashSet();
for (PTOperator p : c.getOperators()) {
actualLogical.add(p.getOperatorMeta());
}
Assert.assertEquals("operators " + c, expectedLogical, actualLogical);
}
// in-node parallel partition
for (int i = 3; i < 5; i++) {
PTContainer c = plan.getContainers().get(i);
Assert.assertEquals("number operators " + c, 1, c.getOperators().size());
Set<OperatorMeta> expectedLogical = Sets.newHashSet(dag.getMeta(partitionedParallel));
Set<OperatorMeta> actualLogical = Sets.newHashSet();
for (PTOperator p : c.getOperators()) {
actualLogical.add(p.getOperatorMeta());
Assert.assertEquals("nodeLocal " + p.getNodeLocalOperators(), 2, p.getNodeLocalOperators().getOperatorSet().size());
}
Assert.assertEquals("operators " + c, expectedLogical, actualLogical);
}
}
use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class PhysicalPlanTest method testRepartitioningScaleUp.
@Test
public void testRepartitioningScaleUp() {
LogicalPlan dag = new LogicalPlan();
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
GenericTestOperator mergeNode = dag.addOperator("mergeNode", GenericTestOperator.class);
dag.addStream("o1.outport1", o1.outport1, o2.inport1, o2.inport2);
dag.addStream("mergeStream", o2.outport1, mergeNode.inport1);
OperatorMeta o2Meta = dag.getMeta(o2);
o2Meta.getAttributes().put(OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) new PartitionLoadWatch(0, 5)));
o2Meta.getAttributes().put(OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(1));
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
Assert.assertEquals("number of operators", 3, plan.getAllOperators().size());
Assert.assertEquals("number of save requests", 3, ctx.backupRequests);
List<PTOperator> o2Partitions = plan.getOperators(o2Meta);
Assert.assertEquals("partition count " + o2Meta, 1, o2Partitions.size());
PTOperator o2p1 = o2Partitions.get(0);
Assert.assertEquals("stats handlers " + o2p1, 1, o2p1.statsListeners.size());
StatsListener sl = o2p1.statsListeners.get(0);
Assert.assertTrue("stats handlers " + o2p1.statsListeners, sl instanceof PartitionLoadWatch);
// no delay
((PartitionLoadWatch) sl).evalIntervalMillis = -1;
setThroughput(o2p1, 10);
plan.onStatusUpdate(o2p1);
Assert.assertEquals("partitioning triggered", 1, ctx.events.size());
ctx.backupRequests = 0;
ctx.events.remove(0).run();
o2Partitions = plan.getOperators(o2Meta);
Assert.assertEquals("partition count " + o2Partitions, 2, o2Partitions.size());
o2p1 = o2Partitions.get(0);
Assert.assertEquals("sinks " + o2p1.getOutputs(), 1, o2p1.getOutputs().size());
PTOperator o2p2 = o2Partitions.get(1);
Assert.assertEquals("sinks " + o2p2.getOutputs(), 1, o2p2.getOutputs().size());
Set<PTOperator> expUndeploy = Sets.newHashSet(plan.getOperators(dag.getMeta(mergeNode)));
expUndeploy.add(o2p1);
expUndeploy.addAll(plan.getOperators(dag.getMeta(mergeNode)).get(0).upstreamMerge.values());
// verify load update generates expected events per configuration
setThroughput(o2p1, 0);
plan.onStatusUpdate(o2p1);
Assert.assertEquals("load min", 0, ctx.events.size());
setThroughput(o2p1, 3);
plan.onStatusUpdate(o2p1);
Assert.assertEquals("load within range", 0, ctx.events.size());
setThroughput(o2p1, 10);
plan.onStatusUpdate(o2p1);
Assert.assertEquals("load exceeds max", 1, ctx.events.size());
ctx.backupRequests = 0;
ctx.events.remove(0).run();
Assert.assertEquals("new partitions", 3, plan.getOperators(o2Meta).size());
Assert.assertTrue("", plan.getOperators(o2Meta).contains(o2p2));
for (PTOperator partition : plan.getOperators(o2Meta)) {
Assert.assertNotNull("container null " + partition, partition.getContainer());
Assert.assertEquals("outputs " + partition, 1, partition.getOutputs().size());
Assert.assertEquals("downstream operators " + partition.getOutputs().get(0).sinks, 1, partition.getOutputs().get(0).sinks.size());
}
Assert.assertEquals("" + ctx.undeploy, expUndeploy, ctx.undeploy);
Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(dag.getMeta(mergeNode)));
expDeploy.addAll(plan.getOperators(o2Meta));
expDeploy.remove(o2p2);
expDeploy.addAll(plan.getOperators(dag.getMeta(mergeNode)).get(0).upstreamMerge.values());
Assert.assertEquals("" + ctx.deploy, expDeploy, ctx.deploy);
Assert.assertEquals("Count of storage requests", 2, ctx.backupRequests);
// partitioning skipped on insufficient head room
o2p1 = plan.getOperators(o2Meta).get(0);
plan.setAvailableResources(0);
setThroughput(o2p1, 10);
plan.onStatusUpdate(o2p1);
Assert.assertEquals("not repartitioned", 1, ctx.events.size());
ctx.events.remove(0).run();
Assert.assertEquals("partition count unchanged", 3, plan.getOperators(o2Meta).size());
}
Aggregations