use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class PhysicalPlanTest method testInputOperatorPartitioning.
/**
* Test partitioning of an input operator (no input port).
* Cover aspects that are not part of generic operator test.
* Test scaling from one to multiple partitions with unifier when one partition remains unmodified.
*/
@Test
public void testInputOperatorPartitioning() {
LogicalPlan dag = new LogicalPlan();
final TestInputOperator<Object> o1 = dag.addOperator("o1", new TestInputOperator<>());
GenericTestOperator o2 = dag.addOperator("o2", GenericTestOperator.class);
dag.addStream("o1.outport1", o1.output, o2.inport1);
OperatorMeta o1Meta = dag.getMeta(o1);
dag.setOperatorAttribute(o1, OperatorContext.STATS_LISTENERS, Arrays.asList(new StatsListener[] { new PartitioningTest.PartitionLoadWatch() }));
TestPartitioner<TestInputOperator<Object>> partitioner = new TestPartitioner<>();
dag.setOperatorAttribute(o1, OperatorContext.PARTITIONER, partitioner);
TestPlanContext ctx = new TestPlanContext();
dag.setAttribute(OperatorContext.STORAGE_AGENT, ctx);
PhysicalPlan plan = new PhysicalPlan(dag, ctx);
Assert.assertEquals("number of containers", 2, plan.getContainers().size());
List<PTOperator> o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("partitions " + o1Partitions, 1, o1Partitions.size());
PTOperator o1p1 = o1Partitions.get(0);
// verify load update generates expected events per configuration
Assert.assertEquals("stats handlers " + o1p1, 1, o1p1.statsListeners.size());
StatsListener l = o1p1.statsListeners.get(0);
Assert.assertTrue("stats handlers " + o1p1.statsListeners, l instanceof PartitioningTest.PartitionLoadWatch);
PartitioningTest.PartitionLoadWatch.put(o1p1, 1);
plan.onStatusUpdate(o1p1);
Assert.assertEquals("scale up triggered", 1, ctx.events.size());
// add another partition, keep existing as is
partitioner.extraPartitions.add(new DefaultPartition<>(o1));
Runnable r = ctx.events.remove(0);
r.run();
partitioner.extraPartitions.clear();
o1Partitions = plan.getOperators(o1Meta);
Assert.assertEquals("operators after scale up", 2, o1Partitions.size());
Assert.assertEquals("first partition unmodified", o1p1, o1Partitions.get(0));
Assert.assertEquals("single output", 1, o1p1.getOutputs().size());
Assert.assertEquals("output to unifier", 1, o1p1.getOutputs().get(0).sinks.size());
Set<PTOperator> expUndeploy = Sets.newHashSet(plan.getOperators(dag.getMeta(o2)));
Set<PTOperator> expDeploy = Sets.newHashSet(o1Partitions.get(1));
expDeploy.addAll(plan.getMergeOperators(dag.getMeta(o1)));
expDeploy.addAll(expUndeploy);
expDeploy.add(o1p1.getOutputs().get(0).sinks.get(0).target);
Assert.assertEquals("undeploy", expUndeploy, ctx.undeploy);
Assert.assertEquals("deploy", expDeploy, ctx.deploy);
for (PTOperator p : o1Partitions) {
Assert.assertEquals("activation window id " + p, Checkpoint.INITIAL_CHECKPOINT, p.recoveryCheckpoint);
Assert.assertEquals("checkpoints " + p + " " + p.checkpoints, Lists.newArrayList(), p.checkpoints);
PartitioningTest.PartitionLoadWatch.put(p, -1);
plan.onStatusUpdate(p);
}
ctx.events.remove(0).run();
Assert.assertEquals("operators after scale down", 1, plan.getOperators(o1Meta).size());
}
use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class LogicalPlanConfiguration method setOperatorProperties.
/**
* Set any opProps from configuration on the operators in the DAG. This
* method may throw unchecked exception if the configuration contains
* opProps that are invalid for an operator.
*
* @param dag
* @param applicationName
*/
public void setOperatorProperties(LogicalPlan dag, String applicationName) {
List<AppConf> appConfs = stramConf.getMatchingChildConf(applicationName, StramElement.APPLICATION);
for (OperatorMeta ow : dag.getAllOperators()) {
List<OperatorConf> opConfs = getMatchingChildConf(appConfs, ow.getName(), StramElement.OPERATOR);
Map<String, String> opProps = getProperties(getPropertyArgs(ow), opConfs, applicationName);
setOperatorProperties(ow.getGenericOperator(), opProps);
}
}
use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class LogicalPlanConfiguration method setOperatorConfiguration.
private void setOperatorConfiguration(final LogicalPlan dag, List<AppConf> appConfs, String appName) {
for (final OperatorMeta ow : dag.getAllOperators()) {
List<OperatorConf> opConfs = getMatchingChildConf(appConfs, ow.getName(), StramElement.OPERATOR);
// Set the operator attributes
setAttributes(opConfs, ow.getAttributes());
// Set the operator opProps
Map<String, String> opProps = getProperties(getPropertyArgs(ow), opConfs, appName);
setOperatorProperties(ow.getOperator(), opProps);
// Set the port attributes
for (Entry<LogicalPlan.InputPortMeta, LogicalPlan.StreamMeta> entry : ow.getInputStreams().entrySet()) {
final InputPortMeta im = entry.getKey();
List<PortConf> inPortConfs = getMatchingChildConf(opConfs, im.getPortName(), StramElement.INPUT_PORT);
// Add the generic port attributes as well
List<PortConf> portConfs = getMatchingChildConf(opConfs, im.getPortName(), StramElement.PORT);
inPortConfs.addAll(portConfs);
setAttributes(inPortConfs, im.getAttributes());
}
for (Entry<LogicalPlan.OutputPortMeta, LogicalPlan.StreamMeta> entry : ow.getOutputStreams().entrySet()) {
final OutputPortMeta om = entry.getKey();
List<PortConf> outPortConfs = getMatchingChildConf(opConfs, om.getPortName(), StramElement.OUTPUT_PORT);
// Add the generic port attributes as well
List<PortConf> portConfs = getMatchingChildConf(opConfs, om.getPortName(), StramElement.PORT);
outPortConfs.addAll(portConfs);
setAttributes(outPortConfs, om.getAttributes());
List<OperatorConf> unifConfs = getMatchingChildConf(outPortConfs, null, StramElement.UNIFIER);
if (unifConfs.size() != 0) {
setAttributes(unifConfs, om.getUnifierMeta().getAttributes());
}
}
ow.populateAggregatorMeta();
}
}
use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class PhysicalPlan method updatePersistOperatorStreamCodec.
private void updatePersistOperatorStreamCodec(LogicalPlan dag) {
HashMap<StreamMeta, StreamCodec<?>> streamMetaToCodecMap = new HashMap<>();
try {
for (OperatorMeta n : dag.getAllOperators()) {
for (StreamMeta s : n.getOutputStreams().values()) {
if (s.getPersistOperator() != null) {
Map<InputPortMeta, StreamCodec<?>> inputStreamCodecs = new HashMap<>();
// Logging is enabled for the stream
for (InputPortMeta portMeta : s.getSinksToPersist()) {
StreamCodec<?> inputStreamCodec = portMeta.getStreamCodec();
if (inputStreamCodec != null) {
boolean alreadyAdded = false;
for (StreamCodec<?> codec : inputStreamCodecs.values()) {
if (inputStreamCodec.equals(codec)) {
alreadyAdded = true;
break;
}
}
if (!alreadyAdded) {
inputStreamCodecs.put(portMeta, inputStreamCodec);
}
}
}
if (inputStreamCodecs.isEmpty()) {
// Stream codec not specified
// So everything out of Source should be captured without any
// StreamCodec
// Do nothing
} else {
// Create Wrapper codec for Stream persistence using all unique
// stream codecs
// Logger should write merged or union of all input stream codecs
StreamCodec<?> specifiedCodecForLogger = s.getPersistOperatorInputPort().getStreamCodec();
@SuppressWarnings({ "unchecked", "rawtypes" }) StreamCodecWrapperForPersistance<Object> codec = new StreamCodecWrapperForPersistance(inputStreamCodecs, specifiedCodecForLogger);
streamMetaToCodecMap.put(s, codec);
}
}
}
}
for (java.util.Map.Entry<StreamMeta, StreamCodec<?>> entry : streamMetaToCodecMap.entrySet()) {
dag.setInputPortAttribute(entry.getKey().getPersistOperatorInputPort().getPort(), PortContext.STREAM_CODEC, entry.getValue());
}
} catch (Exception e) {
throw Throwables.propagate(e);
}
}
use of com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta in project apex-core by apache.
the class PhysicalPlan method updateStreamMappings.
private void updateStreamMappings(PMapping m) {
for (Map.Entry<OutputPortMeta, StreamMeta> opm : m.logicalOperator.getOutputStreams().entrySet()) {
StreamMapping ug = m.outputStreams.get(opm.getKey());
if (ug == null) {
ug = new StreamMapping(opm.getValue(), this);
m.outputStreams.put(opm.getKey(), ug);
}
LOG.debug("update stream mapping for {} {}", opm.getKey().getOperatorMeta(), opm.getKey().getPortName());
ug.setSources(m.partitions);
}
for (Map.Entry<InputPortMeta, StreamMeta> ipm : m.logicalOperator.getInputStreams().entrySet()) {
PMapping sourceMapping = this.logicalToPTOperator.get(ipm.getValue().getSource().getOperatorMeta());
if (ipm.getValue().getSource().getOperatorMeta().getOperator() instanceof Operator.DelayOperator) {
// skip if the source is a DelayOperator
continue;
}
if (ipm.getKey().getValue(PortContext.PARTITION_PARALLEL)) {
if (sourceMapping.partitions.size() < m.partitions.size()) {
throw new AssertionError("Number of partitions don't match in parallel mapping " + sourceMapping.logicalOperator.getName() + " -> " + m.logicalOperator.getName() + ", " + sourceMapping.partitions.size() + " -> " + m.partitions.size());
}
int slidingWindowCount = 0;
OperatorMeta sourceOM = sourceMapping.logicalOperator;
if (sourceOM.getAttributes().contains(Context.OperatorContext.SLIDE_BY_WINDOW_COUNT)) {
if (sourceOM.getValue(Context.OperatorContext.SLIDE_BY_WINDOW_COUNT) < sourceOM.getValue(Context.OperatorContext.APPLICATION_WINDOW_COUNT)) {
slidingWindowCount = sourceOM.getValue(OperatorContext.SLIDE_BY_WINDOW_COUNT);
} else {
LOG.warn("Sliding Window Count {} should be less than APPLICATION WINDOW COUNT {}", sourceOM.getValue(Context.OperatorContext.SLIDE_BY_WINDOW_COUNT), sourceOM.getValue(Context.OperatorContext.APPLICATION_WINDOW_COUNT));
}
}
for (int i = 0; i < m.partitions.size(); i++) {
PTOperator oper = m.partitions.get(i);
PTOperator sourceOper = sourceMapping.partitions.get(i);
for (PTOutput sourceOut : sourceOper.outputs) {
nextSource: if (sourceOut.logicalStream == ipm.getValue()) {
//avoid duplicate entries in case of parallel partitions
for (PTInput sinkIn : sourceOut.sinks) {
// input-port-meta currently being looked at since we allow an output port to connect to multiple inputs of the same operator.
if (sinkIn.target == oper && sinkIn.portName.equals(ipm.getKey().getPortName())) {
break nextSource;
}
}
PTInput input;
if (slidingWindowCount > 0) {
PTOperator slidingUnifier = StreamMapping.createSlidingUnifier(sourceOut.logicalStream, this, sourceOM.getValue(Context.OperatorContext.APPLICATION_WINDOW_COUNT), slidingWindowCount);
StreamMapping.addInput(slidingUnifier, sourceOut, null);
input = new PTInput(ipm.getKey().getPortName(), ipm.getValue(), oper, null, slidingUnifier.outputs.get(0), ipm.getKey().getValue(LogicalPlan.IS_CONNECTED_TO_DELAY_OPERATOR));
sourceMapping.outputStreams.get(ipm.getValue().getSource()).slidingUnifiers.add(slidingUnifier);
} else {
input = new PTInput(ipm.getKey().getPortName(), ipm.getValue(), oper, null, sourceOut, ipm.getKey().getValue(LogicalPlan.IS_CONNECTED_TO_DELAY_OPERATOR));
}
oper.inputs.add(input);
}
}
}
} else {
StreamMapping ug = sourceMapping.outputStreams.get(ipm.getValue().getSource());
if (ug == null) {
ug = new StreamMapping(ipm.getValue(), this);
m.outputStreams.put(ipm.getValue().getSource(), ug);
}
LOG.debug("update upstream stream mapping for {} {}", sourceMapping.logicalOperator, ipm.getValue().getSource().getPortName());
ug.setSources(sourceMapping.partitions);
}
}
}
Aggregations