Search in sources :

Example 1 with OperatorDeployInfo

use of com.datatorrent.stram.api.OperatorDeployInfo in project apex-core by apache.

the class OutputUnifiedTest method testOutputAttribute.

private void testOutputAttribute(LogicalPlan dag, Operator operator, StreamingContainerManager scm, PhysicalPlan physicalPlan, boolean result) {
    List<PTOperator> ptOperators = physicalPlan.getOperators(dag.getMeta(operator));
    for (PTOperator ptOperator : ptOperators) {
        PTContainer container = ptOperator.getContainer();
        StreamingContainerAgent agent = scm.getContainerAgent("container" + container.getId());
        List<OperatorDeployInfo> deployInfoList = agent.getDeployInfoList(container.getOperators());
        Assert.assertEquals("Deploy info size", 1, deployInfoList.size());
        Assert.assertEquals("Is output unified", deployInfoList.get(0).outputs.get(0).getAttributes().get(PortContext.IS_OUTPUT_UNIFIED), result);
    }
}
Also used : OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) PTContainer(com.datatorrent.stram.plan.physical.PTContainer)

Example 2 with OperatorDeployInfo

use of com.datatorrent.stram.api.OperatorDeployInfo in project apex-core by apache.

the class StreamingContainerAgent method createOperatorDeployInfo.

/**
   * Create deploy info for operator.
   * <p>
   *
   * @return {@link com.datatorrent.stram.api.OperatorDeployInfo}
   */
private OperatorDeployInfo createOperatorDeployInfo(PTOperator oper) {
    OperatorDeployInfo ndi;
    if (oper.isUnifier()) {
        UnifierDeployInfo udi = new UnifierDeployInfo();
        /* the constructor auto sets the type */
        try {
            udi.operatorAttributes = oper.getUnifiedOperatorMeta().getAttributes().clone();
        } catch (CloneNotSupportedException ex) {
            throw new RuntimeException("Cannot clone unifier attributes", ex);
        }
        ndi = udi;
    } else {
        ndi = new OperatorDeployInfo();
        Operator operator = oper.getOperatorMeta().getOperator();
        if (operator instanceof InputOperator) {
            ndi.type = OperatorType.INPUT;
            if (!oper.getInputs().isEmpty()) {
                //we check if any input port is connected which would make it a Generic operator.
                for (PTOperator.PTInput ptInput : oper.getInputs()) {
                    if (ptInput.logicalStream != null && ptInput.logicalStream.getSource() != null) {
                        ndi.type = OperatorType.GENERIC;
                        break;
                    }
                }
            }
        } else {
            ndi.type = OperatorType.GENERIC;
        }
    }
    Checkpoint checkpoint = oper.getRecoveryCheckpoint();
    ProcessingMode pm = oper.getOperatorMeta().getValue(OperatorContext.PROCESSING_MODE);
    if (pm == ProcessingMode.AT_MOST_ONCE || pm == ProcessingMode.EXACTLY_ONCE) {
        // TODO: following should be handled in the container at deploy time
        // for exactly once container should also purge previous checkpoint
        // whenever new checkpoint is written.
        StorageAgent agent = oper.getOperatorMeta().getAttributes().get(OperatorContext.STORAGE_AGENT);
        if (agent == null) {
            agent = initCtx.getValue(OperatorContext.STORAGE_AGENT);
        }
        // pick checkpoint most recently written
        try {
            long[] windowIds = agent.getWindowIds(oper.getId());
            long checkpointId = Stateless.WINDOW_ID;
            for (long windowId : windowIds) {
                if (windowId > checkpointId) {
                    checkpointId = windowId;
                }
            }
            if (checkpoint == null || checkpoint.windowId != checkpointId) {
                checkpoint = new Checkpoint(checkpointId, 0, 0);
            }
        } catch (Exception e) {
            throw new RuntimeException("Failed to determine checkpoint window id " + oper, e);
        }
    }
    LOG.debug("{} recovery checkpoint {}", oper, checkpoint);
    ndi.checkpoint = checkpoint;
    ndi.name = oper.getOperatorMeta().getName();
    ndi.id = oper.getId();
    try {
        // clone map before modifying it
        ndi.contextAttributes = oper.getOperatorMeta().getAttributes().clone();
    } catch (CloneNotSupportedException ex) {
        throw new RuntimeException("Cannot clone operator attributes", ex);
    }
    if (oper.isOperatorStateLess()) {
        ndi.contextAttributes.put(OperatorContext.STATELESS, true);
    }
    return ndi;
}
Also used : Operator(com.datatorrent.api.Operator) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) InputOperator(com.datatorrent.api.InputOperator) OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) ProcessingMode(com.datatorrent.api.Operator.ProcessingMode) UnifierDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo.UnifierDeployInfo) Checkpoint(com.datatorrent.stram.api.Checkpoint) StorageAgent(com.datatorrent.api.StorageAgent) InputOperator(com.datatorrent.api.InputOperator)

Example 3 with OperatorDeployInfo

use of com.datatorrent.stram.api.OperatorDeployInfo in project apex-core by apache.

the class StreamingContainer method activate.

public synchronized void activate(final Map<Integer, OperatorDeployInfo> nodeMap, Map<String, ComponentContextPair<Stream, StreamContext>> newStreams) {
    for (ComponentContextPair<Stream, StreamContext> pair : newStreams.values()) {
        activeStreams.put(pair.component, pair.context);
        pair.component.activate(pair.context);
        eventBus.publish(new StreamActivationEvent(pair));
    }
    for (final OperatorDeployInfo ndi : nodeMap.values()) {
        /*
       * OiO nodes get activated with their primary nodes.
       */
        if (ndi.type == OperatorType.OIO) {
            continue;
        }
        final Node<?> node = nodes.get(ndi.id);
        final String name = new StringBuilder(Integer.toString(ndi.id)).append('/').append(ndi.name).append(':').append(node.getOperator().getClass().getSimpleName()).toString();
        final Thread thread = new Thread(name) {

            @Override
            public void run() {
                HashSet<OperatorDeployInfo> setOperators = new HashSet<>();
                OperatorDeployInfo currentdi = ndi;
                try {
                    /* primary operator initialization */
                    setupNode(currentdi);
                    setOperators.add(currentdi);
                    /* lets go for OiO operator initialization */
                    List<Integer> oioNodeIdList = oioGroups.get(ndi.id);
                    if (oioNodeIdList != null) {
                        for (Integer oioNodeId : oioNodeIdList) {
                            currentdi = nodeMap.get(oioNodeId);
                            setupNode(currentdi);
                            setOperators.add(currentdi);
                        }
                    }
                    currentdi = null;
                    node.run();
                /* this is a blocking call */
                } catch (Error error) {
                    int[] operators;
                    //fetch logFileInfo before logging exception, to get offset before exception
                    LogFileInformation logFileInfo = LoggerUtil.getLogFileInformation();
                    if (currentdi == null) {
                        logger.error("Voluntary container termination due to an error in operator set {}.", setOperators, error);
                        operators = new int[setOperators.size()];
                        int i = 0;
                        for (Iterator<OperatorDeployInfo> it = setOperators.iterator(); it.hasNext(); i++) {
                            operators[i] = it.next().id;
                        }
                    } else {
                        logger.error("Voluntary container termination due to an error in operator {}.", currentdi, error);
                        operators = new int[] { currentdi.id };
                    }
                    try {
                        umbilical.reportError(containerId, operators, "Voluntary container termination due to an error. " + ExceptionUtils.getStackTrace(error), logFileInfo);
                    } catch (Exception e) {
                        logger.debug("Fail to log", e);
                    } finally {
                        System.exit(1);
                    }
                } catch (Exception ex) {
                    //fetch logFileInfo before logging exception, to get offset before exception
                    LogFileInformation logFileInfo = LoggerUtil.getLogFileInformation();
                    if (currentdi == null) {
                        failedNodes.add(ndi.id);
                        logger.error("Operator set {} stopped running due to an exception.", setOperators, ex);
                        int[] operators = new int[] { ndi.id };
                        try {
                            umbilical.reportError(containerId, operators, "Stopped running due to an exception. " + ExceptionUtils.getStackTrace(ex), logFileInfo);
                        } catch (Exception e) {
                            logger.debug("Fail to log", e);
                        }
                    } else {
                        failedNodes.add(currentdi.id);
                        logger.error("Abandoning deployment of operator {} due to setup failure.", currentdi, ex);
                        int[] operators = new int[] { currentdi.id };
                        try {
                            umbilical.reportError(containerId, operators, "Abandoning deployment due to setup failure. " + ExceptionUtils.getStackTrace(ex), logFileInfo);
                        } catch (Exception e) {
                            logger.debug("Fail to log", e);
                        }
                    }
                } finally {
                    if (setOperators.contains(ndi)) {
                        try {
                            teardownNode(ndi);
                        } catch (Exception ex) {
                            failedNodes.add(ndi.id);
                            logger.error("Shutdown of operator {} failed due to an exception.", ndi, ex);
                        }
                    }
                    List<Integer> oioNodeIdList = oioGroups.get(ndi.id);
                    if (oioNodeIdList != null) {
                        for (Integer oioNodeId : oioNodeIdList) {
                            OperatorDeployInfo oiodi = nodeMap.get(oioNodeId);
                            if (setOperators.contains(oiodi)) {
                                try {
                                    teardownNode(oiodi);
                                } catch (Exception ex) {
                                    failedNodes.add(oiodi.id);
                                    logger.error("Shutdown of operator {} failed due to an exception.", oiodi, ex);
                                }
                            }
                        }
                    }
                }
            }
        };
        node.context.setThread(thread);
        List<Integer> oioNodeIdList = oioGroups.get(ndi.id);
        if (oioNodeIdList != null) {
            for (Integer oioNodeId : oioNodeIdList) {
                Node<?> oioNode = nodes.get(oioNodeId);
                oioNode.context.setThread(thread);
            }
        }
        thread.start();
    }
    for (WindowGenerator wg : generators.values()) {
        if (!activeGenerators.containsKey(wg)) {
            activeGenerators.put(wg, generators);
            wg.activate(null);
        }
    }
}
Also used : OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) StreamActivationEvent(com.datatorrent.stram.api.ContainerEvent.StreamActivationEvent) LogFileInformation(org.apache.apex.log.LogFileInformation) Iterator(java.util.Iterator) OiOStream(com.datatorrent.stram.stream.OiOStream) InlineStream(com.datatorrent.stram.stream.InlineStream) MuxStream(com.datatorrent.stram.stream.MuxStream) HashSet(java.util.HashSet)

Example 4 with OperatorDeployInfo

use of com.datatorrent.stram.api.OperatorDeployInfo in project apex-core by apache.

the class StreamingContainer method deployInputStreams.

@SuppressWarnings("unchecked")
private void deployInputStreams(List<OperatorDeployInfo> operatorList, HashMap<String, ComponentContextPair<Stream, StreamContext>> newStreams) throws UnknownHostException {
    /*
     * collect any input operators along with their smallest window id,
     * those are subsequently used to setup the window generator
     */
    ArrayList<OperatorDeployInfo> inputNodes = new ArrayList<>();
    long smallestCheckpointedWindowId = Long.MAX_VALUE;
    //a simple map which maps the oio node to it's the node which owns the thread.
    Map<Integer, Integer> oioNodes = new ConcurrentHashMap<>();
    /*
     * Hook up all the downstream ports. There are 2 places where we deal with more than 1
     * downstream ports. The first one follows immediately for WindowGenerator. The second
     * case is when source for the input port of some node in this container is in another
     * container. So we need to create the stream. We need to track this stream along with
     * other streams,and many such streams may exist, we hash them against buffer server
     * info as we did for outputs but throw in the sinkid in the mix as well.
     */
    for (OperatorDeployInfo ndi : operatorList) {
        if (ndi.inputs == null || ndi.inputs.isEmpty()) {
            /*
         * This has to be InputNode, so let's hook the WindowGenerator to it.
         * A node which does not take any input cannot exist in the DAG since it would be completely
         * unaware of the windows. So for that reason, AbstractInputNode allows Component.INPUT port.
         */
            inputNodes.add(ndi);
            /*
         * When we activate the window Generator, we plan to activate it only from required windowId.
         */
            ndi.checkpoint = getFinishedCheckpoint(ndi);
            if (ndi.checkpoint.windowId < smallestCheckpointedWindowId) {
                smallestCheckpointedWindowId = ndi.checkpoint.windowId;
            }
        } else {
            Node<?> node = nodes.get(ndi.id);
            for (OperatorDeployInfo.InputDeployInfo nidi : ndi.inputs) {
                if (nidi.streamCodecs.size() != 1) {
                    throw new IllegalStateException("Only one input codec configuration should be present");
                }
                Map.Entry<Integer, StreamCodec<?>> entry = nidi.streamCodecs.entrySet().iterator().next();
                Integer streamCodecIdentifier = entry.getKey();
                StreamCodec<?> streamCodec = entry.getValue();
                String sourceIdentifier = Integer.toString(nidi.sourceNodeId).concat(Component.CONCAT_SEPARATOR).concat(nidi.sourcePortName);
                String sinkIdentifier = Integer.toString(ndi.id).concat(Component.CONCAT_SEPARATOR).concat(nidi.portName);
                int queueCapacity = getValue(PortContext.QUEUE_CAPACITY, nidi, ndi);
                Checkpoint checkpoint = getFinishedCheckpoint(ndi);
                ComponentContextPair<Stream, StreamContext> pair = streams.get(sourceIdentifier);
                if (pair == null) {
                    pair = newStreams.get(sourceIdentifier);
                }
                if (pair == null) {
                    /*
             * We connect to the buffer server for the input on this port.
             * We have already placed all the output streams for all the operators in this container.
             * Yet, there is no stream which can source this port so it has to come from the buffer
             * server, so let's make a connection to it.
             */
                    assert (nidi.locality != Locality.CONTAINER_LOCAL && nidi.locality != Locality.THREAD_LOCAL);
                    StreamContext context = new StreamContext(nidi.declaredStreamId);
                    context.setBufferServerAddress(InetSocketAddress.createUnresolved(nidi.bufferServerHost, nidi.bufferServerPort));
                    InetAddress inetAddress = context.getBufferServerAddress().getAddress();
                    if (inetAddress != null && NetUtils.isLocalAddress(inetAddress)) {
                        context.setBufferServerAddress(new InetSocketAddress(InetAddress.getByName(null), nidi.bufferServerPort));
                    }
                    context.put(StreamContext.BUFFER_SERVER_TOKEN, nidi.bufferServerToken);
                    String connIdentifier = sourceIdentifier + Component.CONCAT_SEPARATOR + streamCodecIdentifier;
                    context.setPortId(nidi.portName);
                    context.put(StreamContext.CODEC, streamCodec);
                    context.put(StreamContext.EVENT_LOOP, eventloop);
                    context.setPartitions(nidi.partitionMask, nidi.partitionKeys);
                    //context.setSourceId(sourceIdentifier);
                    context.setSourceId(connIdentifier);
                    context.setSinkId(sinkIdentifier);
                    context.setFinishedWindowId(checkpoint.windowId);
                    BufferServerSubscriber subscriber = fastPublisherSubscriber ? new FastSubscriber("tcp://".concat(nidi.bufferServerHost).concat(":").concat(String.valueOf(nidi.bufferServerPort)).concat("/").concat(connIdentifier), queueCapacity) : new BufferServerSubscriber("tcp://".concat(nidi.bufferServerHost).concat(":").concat(String.valueOf(nidi.bufferServerPort)).concat("/").concat(connIdentifier), queueCapacity);
                    if (streamCodec instanceof StreamCodecWrapperForPersistance) {
                        subscriber.acquireReservoirForPersistStream(sinkIdentifier, queueCapacity, streamCodec);
                    }
                    SweepableReservoir reservoir = subscriber.acquireReservoir(sinkIdentifier, queueCapacity);
                    if (checkpoint.windowId >= 0) {
                        node.connectInputPort(nidi.portName, new WindowIdActivatedReservoir(sinkIdentifier, reservoir, checkpoint.windowId));
                    }
                    node.connectInputPort(nidi.portName, reservoir);
                    newStreams.put(sinkIdentifier, new ComponentContextPair<Stream, StreamContext>(subscriber, context));
                    logger.debug("put input stream {} against key {}", subscriber, sinkIdentifier);
                } else {
                    assert (nidi.locality == Locality.CONTAINER_LOCAL || nidi.locality == Locality.THREAD_LOCAL);
                    /* we are still dealing with the MuxStream originating at the output of the source port */
                    StreamContext inlineContext = new StreamContext(nidi.declaredStreamId);
                    inlineContext.setSourceId(sourceIdentifier);
                    inlineContext.setSinkId(sinkIdentifier);
                    Stream stream;
                    SweepableReservoir reservoir;
                    switch(nidi.locality) {
                        case CONTAINER_LOCAL:
                            int outputQueueCapacity = getOutputQueueCapacity(operatorList, nidi.sourceNodeId, nidi.sourcePortName);
                            if (outputQueueCapacity > queueCapacity) {
                                queueCapacity = outputQueueCapacity;
                            }
                            stream = new InlineStream(queueCapacity);
                            reservoir = ((InlineStream) stream).getReservoir();
                            if (checkpoint.windowId >= 0) {
                                node.connectInputPort(nidi.portName, new WindowIdActivatedReservoir(sinkIdentifier, reservoir, checkpoint.windowId));
                            }
                            break;
                        case THREAD_LOCAL:
                            stream = new OiOStream();
                            reservoir = ((OiOStream) stream).getReservoir();
                            ((OiOStream.OiOReservoir) reservoir).setControlSink(((OiONode) node).getControlSink(reservoir));
                            oioNodes.put(ndi.id, nidi.sourceNodeId);
                            break;
                        default:
                            throw new IllegalStateException("Locality can be either ContainerLocal or ThreadLocal");
                    }
                    node.connectInputPort(nidi.portName, reservoir);
                    newStreams.put(sinkIdentifier, new ComponentContextPair<>(stream, inlineContext));
                    if (!(pair.component instanceof Stream.MultiSinkCapableStream)) {
                        String originalSinkId = pair.context.getSinkId();
                        /* we come here only if we are trying to augment the dag */
                        StreamContext muxContext = new StreamContext(nidi.declaredStreamId);
                        muxContext.setSourceId(sourceIdentifier);
                        muxContext.setFinishedWindowId(checkpoint.windowId);
                        muxContext.setSinkId(originalSinkId);
                        MuxStream muxStream = new MuxStream();
                        muxStream.setSink(originalSinkId, pair.component);
                        streams.put(originalSinkId, pair);
                        Node<?> sourceNode = nodes.get(nidi.sourceNodeId);
                        sourceNode.connectOutputPort(nidi.sourcePortName, muxStream);
                        newStreams.put(sourceIdentifier, pair = new ComponentContextPair<Stream, StreamContext>(muxStream, muxContext));
                    }
                    /* here everything should be multisink capable */
                    if (streamCodec instanceof StreamCodecWrapperForPersistance) {
                        PartitionAwareSinkForPersistence pas;
                        if (nidi.partitionKeys == null) {
                            pas = new PartitionAwareSinkForPersistence((StreamCodecWrapperForPersistance<Object>) streamCodec, nidi.partitionMask, stream);
                        } else {
                            pas = new PartitionAwareSinkForPersistence((StreamCodecWrapperForPersistance<Object>) streamCodec, nidi.partitionKeys, nidi.partitionMask, stream);
                        }
                        ((Stream.MultiSinkCapableStream) pair.component).setSink(sinkIdentifier, pas);
                    } else if (nidi.partitionKeys == null || nidi.partitionKeys.isEmpty()) {
                        ((Stream.MultiSinkCapableStream) pair.component).setSink(sinkIdentifier, stream);
                    } else {
                        /*
               * generally speaking we do not have partitions on the inline streams so the control should not
               * come here but if it comes, then we are ready to handle it using the partition aware streams.
               */
                        PartitionAwareSink<Object> pas = new PartitionAwareSink<>(streamCodec == null ? nonSerializingStreamCodec : (StreamCodec<Object>) streamCodec, nidi.partitionKeys, nidi.partitionMask, stream);
                        ((Stream.MultiSinkCapableStream) pair.component).setSink(sinkIdentifier, pas);
                    }
                    String streamSinkId = pair.context.getSinkId();
                    if (streamSinkId == null) {
                        pair.context.setSinkId(sinkIdentifier);
                    } else {
                        pair.context.setSinkId(streamSinkId.concat(", ").concat(sinkIdentifier));
                    }
                }
            }
        }
    }
    setupOiOGroups(oioNodes);
    if (!inputNodes.isEmpty()) {
        WindowGenerator windowGenerator = setupWindowGenerator(smallestCheckpointedWindowId);
        for (OperatorDeployInfo ndi : inputNodes) {
            generators.put(ndi.id, windowGenerator);
            Node<?> node = nodes.get(ndi.id);
            SweepableReservoir reservoir = windowGenerator.acquireReservoir(String.valueOf(ndi.id), 1024);
            if (ndi.checkpoint.windowId >= 0) {
                node.connectInputPort(Node.INPUT, new WindowIdActivatedReservoir(Integer.toString(ndi.id), reservoir, ndi.checkpoint.windowId));
            }
            node.connectInputPort(Node.INPUT, reservoir);
        }
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) StreamCodec(com.datatorrent.api.StreamCodec) BufferServerSubscriber(com.datatorrent.stram.stream.BufferServerSubscriber) PartitionAwareSinkForPersistence(com.datatorrent.stram.stream.PartitionAwareSinkForPersistence) StreamCodecWrapperForPersistance(com.datatorrent.stram.plan.logical.StreamCodecWrapperForPersistance) ComponentContextPair(com.datatorrent.stram.ComponentContextPair) FastSubscriber(com.datatorrent.stram.stream.FastSubscriber) InlineStream(com.datatorrent.stram.stream.InlineStream) OiOStream(com.datatorrent.stram.stream.OiOStream) InlineStream(com.datatorrent.stram.stream.InlineStream) MuxStream(com.datatorrent.stram.stream.MuxStream) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) PartitionAwareSink(com.datatorrent.stram.stream.PartitionAwareSink) OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) Checkpoint(com.datatorrent.stram.api.Checkpoint) Checkpoint(com.datatorrent.stram.api.Checkpoint) MuxStream(com.datatorrent.stram.stream.MuxStream) OiOStream(com.datatorrent.stram.stream.OiOStream) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) InetAddress(java.net.InetAddress)

Example 5 with OperatorDeployInfo

use of com.datatorrent.stram.api.OperatorDeployInfo in project apex-core by apache.

the class StreamingContainer method setupNode.

private void setupNode(OperatorDeployInfo ndi) {
    failedNodes.remove(ndi.id);
    final Node<?> node = nodes.get(ndi.id);
    node.setup(node.context);
    /* setup context for all the input ports */
    LinkedHashMap<String, PortContextPair<InputPort<?>>> inputPorts = node.getPortMappingDescriptor().inputPorts;
    LinkedHashMap<String, PortContextPair<InputPort<?>>> newInputPorts = new LinkedHashMap<>(inputPorts.size());
    for (OperatorDeployInfo.InputDeployInfo idi : ndi.inputs) {
        InputPort<?> port = inputPorts.get(idi.portName).component;
        PortContext context = new PortContext(idi.contextAttributes, node.context);
        newInputPorts.put(idi.portName, new PortContextPair<InputPort<?>>(port, context));
        port.setup(context);
    }
    inputPorts.putAll(newInputPorts);
    /* setup context for all the output ports */
    LinkedHashMap<String, PortContextPair<OutputPort<?>>> outputPorts = node.getPortMappingDescriptor().outputPorts;
    LinkedHashMap<String, PortContextPair<OutputPort<?>>> newOutputPorts = new LinkedHashMap<>(outputPorts.size());
    for (OperatorDeployInfo.OutputDeployInfo odi : ndi.outputs) {
        OutputPort<?> port = outputPorts.get(odi.portName).component;
        PortContext context = new PortContext(odi.contextAttributes, node.context);
        newOutputPorts.put(odi.portName, new PortContextPair<OutputPort<?>>(port, context));
        port.setup(context);
    }
    outputPorts.putAll(newOutputPorts);
    logger.debug("activating {} in container {}", node, containerId);
    /* This introduces need for synchronization on processNodeRequest which was solved by adding deleted field in StramToNodeRequest  */
    processNodeRequests(false);
    node.activate();
    eventBus.publish(new NodeActivationEvent(node));
}
Also used : OutputPort(com.datatorrent.api.Operator.OutputPort) OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) InputPort(com.datatorrent.api.Operator.InputPort) LinkedHashMap(java.util.LinkedHashMap) PortContextPair(com.datatorrent.stram.plan.logical.Operators.PortContextPair) NodeActivationEvent(com.datatorrent.stram.api.ContainerEvent.NodeActivationEvent)

Aggregations

OperatorDeployInfo (com.datatorrent.stram.api.OperatorDeployInfo)28 PTContainer (com.datatorrent.stram.plan.physical.PTContainer)18 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)16 PhysicalPlan (com.datatorrent.stram.plan.physical.PhysicalPlan)16 Test (org.junit.Test)16 PTOperator (com.datatorrent.stram.plan.physical.PTOperator)15 LogicalPlan (com.datatorrent.stram.plan.logical.LogicalPlan)13 StramTestSupport (com.datatorrent.stram.support.StramTestSupport)11 Checkpoint (com.datatorrent.stram.api.Checkpoint)6 Operator (com.datatorrent.api.Operator)5 PhysicalPlanTest (com.datatorrent.stram.plan.physical.PhysicalPlanTest)5 LinkedHashMap (java.util.LinkedHashMap)5 InputDeployInfo (com.datatorrent.stram.api.OperatorDeployInfo.InputDeployInfo)4 OutputDeployInfo (com.datatorrent.stram.api.OperatorDeployInfo.OutputDeployInfo)4 InlineStream (com.datatorrent.stram.stream.InlineStream)4 MuxStream (com.datatorrent.stram.stream.MuxStream)4 OiOStream (com.datatorrent.stram.stream.OiOStream)4 MemoryStorageAgent (com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent)4 ComponentContextPair (com.datatorrent.stram.ComponentContextPair)3 TestGeneratorInputOperator (com.datatorrent.stram.engine.TestGeneratorInputOperator)3