use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class PhysicalPlanTest method testDefaultPartitionerWithParallel.
@Test
public void testDefaultPartitionerWithParallel() throws InterruptedException {
final MutableInt loadInd = new MutableInt();
StatsListener listener = new StatsListener() {
@Override
public Response processStats(BatchedOperatorStats stats) {
Response response = new Response();
response.repartitionRequired = true;
response.loadIndicator = loadInd.intValue();
return response;
}
};
LogicalPlan dag = new LogicalPlan();
GenericTestOperator nodeX = dag.addOperator("X", GenericTestOperator.class);
dag.setOperatorAttribute(nodeX, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
dag.setOperatorAttribute(nodeX, Context.OperatorContext.STATS_LISTENERS, Lists.newArrayList(listener));
GenericTestOperator nodeY = dag.addOperator("Y", GenericTestOperator.class);
dag.setOperatorAttribute(nodeY, Context.OperatorContext.PARTITIONER, new TestPartitioner<GenericTestOperator>());
GenericTestOperator nodeZ = dag.addOperator("Z", GenericTestOperator.class);
dag.addStream("Stream1", nodeX.outport1, nodeY.inport1, nodeZ.inport1);
dag.addStream("Stream2", nodeX.outport2, nodeY.inport2, nodeZ.inport2);
dag.setInputPortAttribute(nodeY.inport1, Context.PortContext.PARTITION_PARALLEL, true);
dag.setInputPortAttribute(nodeY.inport2, Context.PortContext.PARTITION_PARALLEL, true);
dag.setInputPortAttribute(nodeZ.inport1, Context.PortContext.PARTITION_PARALLEL, true);
dag.setInputPortAttribute(nodeZ.inport2, Context.PortContext.PARTITION_PARALLEL, true);
StramTestSupport.MemoryStorageAgent msa = new StramTestSupport.MemoryStorageAgent();
dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, msa);
TestPlanContext ctx = new TestPlanContext();
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
LogicalPlan.OperatorMeta metaOfX = dag.getMeta(nodeX);
LogicalPlan.OperatorMeta metaOfY = dag.getMeta(nodeY);
Assert.assertEquals("number operators " + metaOfX.getName(), 2, plan.getOperators(metaOfX).size());
Assert.assertEquals("number operators " + metaOfY.getName(), 2, plan.getOperators(metaOfY).size());
List<PTOperator> ptOfX = plan.getOperators(metaOfX);
for (PTOperator physicalX : ptOfX) {
Assert.assertEquals("2 streams " + physicalX.getOutputs(), 2, physicalX.getOutputs().size());
for (PTOutput outputPort : physicalX.getOutputs()) {
Set<PTOperator> dopers = Sets.newHashSet();
Assert.assertEquals("sink of " + metaOfX.getName() + " id " + physicalX.id + " port " + outputPort.portName, 2, outputPort.sinks.size());
for (PTInput inputPort : outputPort.sinks) {
dopers.add(inputPort.target);
}
Assert.assertEquals(2, dopers.size());
}
}
// Invoke redo-partition of PhysicalPlan, no partition change
loadInd.setValue(0);
for (PTOperator ptOperator : ptOfX) {
plan.onStatusUpdate(ptOperator);
}
ctx.events.remove(0).run();
for (PTOperator physicalX : ptOfX) {
Assert.assertEquals("2 streams " + physicalX.getOutputs(), 2, physicalX.getOutputs().size());
for (PTOutput outputPort : physicalX.getOutputs()) {
Set<PTOperator> dopers = Sets.newHashSet();
Assert.assertEquals("sink of " + metaOfX.getName() + " id " + physicalX.id + " port " + outputPort.portName, 2, outputPort.sinks.size());
for (PTInput inputPort : outputPort.sinks) {
dopers.add(inputPort.target);
}
Assert.assertEquals(2, dopers.size());
}
}
// scale up by splitting first partition
loadInd.setValue(1);
plan.onStatusUpdate(ptOfX.get(0));
ctx.events.get(0).run();
List<PTOperator> ptOfXScaleUp = plan.getOperators(metaOfX);
Assert.assertEquals("3 partitons " + ptOfXScaleUp, 3, ptOfXScaleUp.size());
for (PTOperator physicalX : ptOfXScaleUp) {
Assert.assertEquals("2 streams " + physicalX.getOutputs(), 2, physicalX.getOutputs().size());
for (PTOutput outputPort : physicalX.getOutputs()) {
Set<PTOperator> dopers = Sets.newHashSet();
Assert.assertEquals("sink of " + metaOfX.getName() + " id " + physicalX.id + " port " + outputPort.portName, 2, outputPort.sinks.size());
for (PTInput inputPort : outputPort.sinks) {
dopers.add(inputPort.target);
}
Assert.assertEquals(2, dopers.size());
}
}
}
use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class PhysicalPlanTest method testCascadingUnifier.
@Test
public void testCascadingUnifier() {
LogicalPlan dag = new LogicalPlan();
PartitioningTestOperator o1 = dag.addOperator("o1", PartitioningTestOperator.class);
o1.partitionKeys = new Integer[] { 0, 1, 2, 3 };
o1.setPartitionCount(o1.partitionKeys.length);
dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
dag.setOutputPortAttribute(o1.outport1, PortContext.UNIFIER_LIMIT, 2);
OperatorMeta o1Meta = dag.getMeta(o1);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.setOperatorAttribute(o2, OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(3));
OperatorMeta o2Meta = dag.getMeta(o2);
dag.addStream("o1.outport1", o1.outport1, o2.inport1);
dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 10);
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
Assert.assertEquals("number of containers", 9, plan.getContainers().size());
List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("partitions " + o1Meta, 4, o1Partitions.size());
Assert.assertEquals("partitioned map " + o1.partitions, 4, o1.partitions.size());
List<PTOperator> o2Partitions = plan.getOperators(o2Meta);
Assert.assertEquals("partitions " + o1Meta, 3, o2Partitions.size());
for (PTOperator o : o1Partitions) {
Assert.assertEquals("outputs " + o, 1, o.getOutputs().size());
for (PTOutput out : o.getOutputs()) {
Assert.assertEquals("sinks " + out, 1, out.sinks.size());
}
Assert.assertNotNull("container " + o, o.getContainer());
}
List<PTOperator> o1Unifiers = plan.getMergeOperators(o1Meta);
// 2 cascadingUnifiers to per-downstream partition unifier(s)
Assert.assertEquals("o1Unifiers " + o1Meta, 2, o1Unifiers.size());
for (PTOperator o : o1Unifiers) {
Assert.assertEquals("inputs " + o, 2, o.getInputs().size());
Assert.assertEquals("outputs " + o, 1, o.getOutputs().size());
for (PTOutput out : o.getOutputs()) {
Assert.assertEquals("sinks " + out, 3, out.sinks.size());
for (PTInput in : out.sinks) {
// MxN unifier
Assert.assertTrue(in.target.isUnifier());
Assert.assertEquals(1, in.target.getOutputs().get(0).sinks.size());
}
}
Assert.assertNotNull("container " + o, o.getContainer());
}
for (int i = 0; i < 4; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertTrue(o1Partitions.contains(container.getOperators().get(0)));
}
for (int i = 4; i < 6; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 1, container.getOperators().size());
Assert.assertTrue(o1Unifiers.contains(container.getOperators().get(0)));
}
for (int i = 6; i < 9; i++) {
PTContainer container = plan.getContainers().get(i);
Assert.assertEquals("number operators " + container, 2, container.getOperators().size());
Assert.assertTrue(o2Partitions.contains(container.getOperators().get(0)));
}
PTOperator p1 = o1Partitions.get(0);
StatsListener l = p1.statsListeners.get(0);
Assert.assertTrue("stats handlers " + p1.statsListeners, l instanceof PartitioningTest.PartitionLoadWatch);
PartitioningTest.PartitionLoadWatch.put(p1, 1);
plan.onStatusUpdate(p1);
Assert.assertEquals("partition scaling triggered", 1, ctx.events.size());
o1.partitionKeys = new Integer[] { 0, 1, 2, 3, 4 };
ctx.events.remove(0).run();
o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("partitions " + o1Meta, 5, o1Partitions.size());
Assert.assertEquals("partitioned map " + o1.partitions, 5, o1.partitions.size());
o1Unifiers = plan.getMergeOperators(o1Meta);
// 3(l1)x2(l2)
Assert.assertEquals("o1Unifiers " + o1Meta, 3, o1Unifiers.size());
for (PTOperator o : o1Unifiers) {
Assert.assertNotNull("container null: " + o, o.getContainer());
}
}
use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class PhysicalPlanTest method testRepartitioningScaleDown.
@Test
public void testRepartitioningScaleDown() {
LogicalPlan dag = new LogicalPlan();
GenericTestOperator o1 = dag.addOperator("o1", GenericTestOperator.class);
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
GenericTestOperator o3parallel = dag.addOperator("o3parallel", GenericTestOperator.class);
OperatorMeta o3Meta = dag.getMeta(o3parallel);
GenericTestOperator mergeNode = dag.addOperator("mergeNode", GenericTestOperator.class);
dag.addStream("o1.outport1", o1.outport1, o2.inport1, o2.inport2);
dag.addStream("o2.outport1", o2.outport1, o3parallel.inport1).setLocality(Locality.CONTAINER_LOCAL);
dag.setInputPortAttribute(o3parallel.inport1, PortContext.PARTITION_PARALLEL, true);
dag.addStream("o3parallel_outport1", o3parallel.outport1, mergeNode.inport1);
dag.getAttributes().put(LogicalPlan.CONTAINERS_MAX_COUNT, 2);
OperatorMeta node2Meta = dag.getMeta(o2);
node2Meta.getAttributes().put(OperatorContext.STATS_LISTENERS, Lists.newArrayList((StatsListener) new PartitionLoadWatch(3, 5)));
node2Meta.getAttributes().put(OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(8));
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
Assert.assertEquals("number of containers", 2, plan.getContainers().size());
Assert.assertEquals("Count of storage requests", plan.getAllOperators().size(), ctx.backupRequests);
List<PTOperator> n2Instances = plan.getOperators(node2Meta);
Assert.assertEquals("partition instances " + n2Instances, 8, n2Instances.size());
PTOperator po = n2Instances.get(0);
Collection<PTOperator> unifiers = plan.getMergeOperators(node2Meta);
Assert.assertEquals("unifiers " + node2Meta, 0, unifiers.size());
Collection<PTOperator> o3unifiers = plan.getOperators(dag.getMeta(mergeNode)).get(0).upstreamMerge.values();
Assert.assertEquals("unifiers " + o3Meta, 1, o3unifiers.size());
PTOperator o3unifier = o3unifiers.iterator().next();
Assert.assertEquals("unifier inputs " + o3unifier, 8, o3unifier.getInputs().size());
Set<PTOperator> expUndeploy = Sets.newHashSet(plan.getOperators(dag.getMeta(mergeNode)));
expUndeploy.addAll(n2Instances);
expUndeploy.addAll(plan.getOperators(o3Meta));
expUndeploy.addAll(o3unifiers);
// verify load update generates expected events per configuration
Assert.assertEquals("stats handlers " + po, 1, po.statsListeners.size());
StatsListener l = po.statsListeners.get(0);
Assert.assertTrue("stats handlers " + po.statsListeners, l instanceof PartitionLoadWatch);
// no delay
((PartitionLoadWatch) l).evalIntervalMillis = -1;
setThroughput(po, 5);
plan.onStatusUpdate(po);
Assert.assertEquals("load upper bound", 0, ctx.events.size());
setThroughput(po, 3);
plan.onStatusUpdate(po);
Assert.assertEquals("load lower bound", 0, ctx.events.size());
setThroughput(po, 2);
plan.onStatusUpdate(po);
Assert.assertEquals("load below min", 1, ctx.events.size());
ctx.backupRequests = 0;
ctx.events.remove(0).run();
// expect operators unchanged
Assert.assertEquals("partitions unchanged", Sets.newHashSet(n2Instances), Sets.newHashSet(plan.getOperators(node2Meta)));
for (PTOperator o : n2Instances) {
setThroughput(o, 2);
plan.onStatusUpdate(o);
}
Assert.assertEquals("load below min", 1, ctx.events.size());
ctx.events.remove(0).run();
Assert.assertEquals("partitions merged", 4, plan.getOperators(node2Meta).size());
Assert.assertEquals("unifier inputs after scale down " + o3unifier, 4, o3unifier.getInputs().size());
for (PTOperator p : plan.getOperators(o3Meta)) {
Assert.assertEquals("outputs " + p.getOutputs(), 1, p.getOutputs().size());
}
for (PTOperator p : plan.getOperators(node2Meta)) {
PartitionKeys pks = p.getPartitionKeys().values().iterator().next();
Assert.assertEquals("partition mask " + p, 3, pks.mask);
Assert.assertEquals("inputs " + p, 2, p.getInputs().size());
boolean portConnected = false;
for (PTInput input : p.getInputs()) {
if (GenericTestOperator.IPORT1.equals(input.portName)) {
portConnected = true;
Assert.assertEquals("partition mask " + input, pks, input.partitions);
}
}
Assert.assertTrue("connected " + GenericTestOperator.IPORT1, portConnected);
}
Assert.assertEquals("" + ctx.undeploy, expUndeploy, ctx.undeploy);
o3unifiers = plan.getOperators(dag.getMeta(mergeNode)).get(0).upstreamMerge.values();
Set<PTOperator> expDeploy = Sets.newHashSet(plan.getOperators(dag.getMeta(mergeNode)));
expDeploy.addAll(plan.getOperators(node2Meta));
expDeploy.addAll(plan.getOperators(o3Meta));
expDeploy.addAll(o3unifiers);
Assert.assertEquals("" + ctx.deploy, expDeploy, ctx.deploy);
for (PTOperator oper : ctx.deploy) {
Assert.assertNotNull("container " + oper, oper.getContainer());
}
Assert.assertEquals("Count of storage requests", 8, ctx.backupRequests);
}
use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class PhysicalPlan method updateStreamMappings.
private void updateStreamMappings(PMapping m) {
for (Map.Entry<OutputPortMeta, StreamMeta> opm : m.logicalOperator.getOutputStreams().entrySet()) {
StreamMapping ug = m.outputStreams.get(opm.getKey());
if (ug == null) {
ug = new StreamMapping(opm.getValue(), this);
m.outputStreams.put(opm.getKey(), ug);
}
LOG.debug("update stream mapping for {} {}", opm.getKey().getOperatorMeta(), opm.getKey().getPortName());
ug.setSources(m.partitions);
}
for (Map.Entry<InputPortMeta, StreamMeta> ipm : m.logicalOperator.getInputStreams().entrySet()) {
PMapping sourceMapping = this.logicalToPTOperator.get(ipm.getValue().getSource().getOperatorMeta());
if (ipm.getValue().getSource().getOperatorMeta().getOperator() instanceof Operator.DelayOperator) {
// skip if the source is a DelayOperator
continue;
}
if (ipm.getKey().getValue(PortContext.PARTITION_PARALLEL)) {
if (sourceMapping.partitions.size() < m.partitions.size()) {
throw new AssertionError("Number of partitions don't match in parallel mapping " + sourceMapping.logicalOperator.getName() + " -> " + m.logicalOperator.getName() + ", " + sourceMapping.partitions.size() + " -> " + m.partitions.size());
}
int slidingWindowCount = 0;
OperatorMeta sourceOM = sourceMapping.logicalOperator;
if (sourceOM.getAttributes().contains(Context.OperatorContext.SLIDE_BY_WINDOW_COUNT)) {
if (sourceOM.getValue(Context.OperatorContext.SLIDE_BY_WINDOW_COUNT) < sourceOM.getValue(Context.OperatorContext.APPLICATION_WINDOW_COUNT)) {
slidingWindowCount = sourceOM.getValue(OperatorContext.SLIDE_BY_WINDOW_COUNT);
} else {
LOG.warn("Sliding Window Count {} should be less than APPLICATION WINDOW COUNT {}", sourceOM.getValue(Context.OperatorContext.SLIDE_BY_WINDOW_COUNT), sourceOM.getValue(Context.OperatorContext.APPLICATION_WINDOW_COUNT));
}
}
for (int i = 0; i < m.partitions.size(); i++) {
PTOperator oper = m.partitions.get(i);
PTOperator sourceOper = sourceMapping.partitions.get(i);
for (PTOutput sourceOut : sourceOper.outputs) {
nextSource: if (sourceOut.logicalStream == ipm.getValue()) {
// avoid duplicate entries in case of parallel partitions
for (PTInput sinkIn : sourceOut.sinks) {
// input-port-meta currently being looked at since we allow an output port to connect to multiple inputs of the same operator.
if (sinkIn.target == oper && sinkIn.portName.equals(ipm.getKey().getPortName())) {
break nextSource;
}
}
PTInput input;
if (slidingWindowCount > 0) {
PTOperator slidingUnifier = StreamMapping.createSlidingUnifier(sourceOut.logicalStream, this, sourceOM.getValue(Context.OperatorContext.APPLICATION_WINDOW_COUNT), slidingWindowCount);
StreamMapping.addInput(slidingUnifier, sourceOut, null);
input = new PTInput(ipm.getKey().getPortName(), ipm.getValue(), oper, null, slidingUnifier.outputs.get(0), ipm.getKey().getValue(LogicalPlan.IS_CONNECTED_TO_DELAY_OPERATOR));
sourceMapping.outputStreams.get(ipm.getValue().getSource()).slidingUnifiers.add(slidingUnifier);
} else {
input = new PTInput(ipm.getKey().getPortName(), ipm.getValue(), oper, null, sourceOut, ipm.getKey().getValue(LogicalPlan.IS_CONNECTED_TO_DELAY_OPERATOR));
}
oper.inputs.add(input);
}
}
}
} else {
StreamMapping ug = sourceMapping.outputStreams.get(ipm.getValue().getSource());
if (ug == null) {
ug = new StreamMapping(ipm.getValue(), this);
m.outputStreams.put(ipm.getValue().getSource(), ug);
}
LOG.debug("update upstream stream mapping for {} {}", sourceMapping.logicalOperator, ipm.getValue().getSource().getPortName());
ug.setSources(sourceMapping.partitions);
}
}
}
use of com.datatorrent.stram.plan.physical.PTOperator.PTInput in project apex-core by apache.
the class StreamMapping method redoMapping.
/**
* rebuild the tree, which may cause more changes to execution layer than need be
* TODO: investigate incremental logic
*/
private void redoMapping() {
Set<Pair<PTOperator, InputPortMeta>> downstreamOpers = Sets.newHashSet();
// figure out the downstream consumers
for (InputPortMeta ipm : streamMeta.getSinks()) {
// skipped for parallel partitions - those are handled elsewhere
if (!ipm.getValue(PortContext.PARTITION_PARALLEL) && plan.hasMapping(ipm.getOperatorMeta())) {
List<PTOperator> partitions = plan.getOperators(ipm.getOperatorMeta());
for (PTOperator doper : partitions) {
downstreamOpers.add(new Pair<>(doper, ipm));
}
}
}
if (!downstreamOpers.isEmpty()) {
// unifiers are required
for (PTOperator unifier : this.cascadingUnifiers) {
detachUnifier(unifier);
}
if (this.finalUnifier != null) {
detachUnifier(finalUnifier);
}
List<PTOperator> currentUnifiers = Lists.newArrayList(this.cascadingUnifiers);
this.cascadingUnifiers.clear();
plan.undeployOpers.addAll(currentUnifiers);
addSlidingUnifiers();
int limit = streamMeta.getSource().getValue(PortContext.UNIFIER_LIMIT);
boolean separateUnifiers = false;
Integer lastId = null;
for (InputPortMeta ipm : streamMeta.getSinks()) {
Integer id = plan.getStreamCodecIdentifier(ipm.getStreamCodec());
if (lastId == null) {
lastId = id;
} else if (!id.equals(lastId)) {
separateUnifiers = true;
break;
}
}
List<PTOutput> unifierSources = this.upstream;
Map<StreamCodec<?>, List<PTOutput>> cascadeUnifierSourcesMap = Maps.newHashMap();
if (limit > 1 && this.upstream.size() > limit) {
// cascading unifier
if (!separateUnifiers) {
unifierSources = setupCascadingUnifiers(this.upstream, currentUnifiers, limit, 0);
} else {
for (InputPortMeta ipm : streamMeta.getSinks()) {
StreamCodec<?> streamCodec = ipm.getStreamCodec();
if (!cascadeUnifierSourcesMap.containsKey(streamCodec)) {
unifierSources = setupCascadingUnifiers(this.upstream, currentUnifiers, limit, 0);
cascadeUnifierSourcesMap.put(streamCodec, unifierSources);
}
}
}
}
// remove remaining unifiers
for (PTOperator oper : currentUnifiers) {
plan.removePTOperator(oper);
}
// Directly getting attribute from map to know if it is set or not as it can be overriden by the input
Boolean sourceSingleFinal = streamMeta.getSource().getAttributes().get(PortContext.UNIFIER_SINGLE_FINAL);
// link the downstream operators with the unifiers
for (Pair<PTOperator, InputPortMeta> doperEntry : downstreamOpers) {
Map<LogicalPlan.InputPortMeta, PartitionKeys> partKeys = doperEntry.first.partitionKeys;
PartitionKeys pks = partKeys != null ? partKeys.get(doperEntry.second) : null;
Boolean sinkSingleFinal = doperEntry.second.getAttributes().get(PortContext.UNIFIER_SINGLE_FINAL);
boolean lastSingle = (sinkSingleFinal != null) ? sinkSingleFinal : (sourceSingleFinal != null ? sourceSingleFinal.booleanValue() : PortContext.UNIFIER_SINGLE_FINAL.defaultValue);
if (upstream.size() > 1) {
// detach downstream from upstream operator for the case where no unifier existed previously
for (PTOutput source : upstream) {
Iterator<PTInput> sinks = source.sinks.iterator();
while (sinks.hasNext()) {
PTInput sink = sinks.next();
if (sink.target == doperEntry.first) {
doperEntry.first.inputs.remove(sink);
sinks.remove();
}
}
}
if (!separateUnifiers && lastSingle) {
if (finalUnifier == null) {
finalUnifier = createUnifier(streamMeta, plan);
}
setInput(doperEntry.first, doperEntry.second, finalUnifier, (pks == null) || (pks.mask == 0) ? null : pks);
if (finalUnifier.inputs.isEmpty()) {
// set unifier inputs once, regardless how many downstream operators there are
for (PTOutput out : unifierSources) {
addInput(this.finalUnifier, out, null);
}
}
} else {
// MxN partitioning: unifier per downstream partition
LOG.debug("MxN unifier for {} {} {}", new Object[] { doperEntry.first, doperEntry.second.getPortName(), pks });
PTOperator unifier = doperEntry.first.upstreamMerge.get(doperEntry.second);
if (unifier == null) {
unifier = createUnifier(streamMeta, plan);
doperEntry.first.upstreamMerge.put(doperEntry.second, unifier);
setInput(doperEntry.first, doperEntry.second, unifier, null);
}
// sources may change dynamically, rebuild inputs (as for cascading unifiers)
for (PTInput in : unifier.inputs) {
in.source.sinks.remove(in);
}
unifier.inputs.clear();
List<PTOutput> doperUnifierSources = unifierSources;
if (separateUnifiers) {
List<PTOutput> cascadeSources = cascadeUnifierSourcesMap.get(doperEntry.second.getStreamCodec());
if (cascadeSources != null) {
doperUnifierSources = cascadeSources;
}
}
// add new inputs
for (PTOutput out : doperUnifierSources) {
addInput(unifier, out, (pks == null) || (pks.mask == 0) ? null : pks);
}
}
} else {
// no partitioning
PTOperator unifier = doperEntry.first.upstreamMerge.remove(doperEntry.second);
if (unifier != null) {
plan.removePTOperator(unifier);
}
setInput(doperEntry.first, doperEntry.second, upstream.get(0).source, pks);
}
}
// 2) Downstream operators partitions are scaled up from one to multiple. (replaced by merged unifier)
if (finalUnifier != null && finalUnifier.inputs.isEmpty()) {
plan.removePTOperator(finalUnifier);
finalUnifier = null;
}
}
}
Aggregations