Search in sources :

Example 6 with Checkpoint

use of com.datatorrent.stram.api.Checkpoint in project apex-core by apache.

the class PhysicalPlan method getActivationCheckpoint.

/**
   * Initialize the activation checkpoint for the given operator.
   * Recursively traverses inputs until existing checkpoint or root operator is found.
   * NoOp when already initialized.
   * @param oper
   */
private Checkpoint getActivationCheckpoint(PTOperator oper) {
    if (oper.recoveryCheckpoint == null && oper.checkpoints.isEmpty()) {
        Checkpoint activationCheckpoint = Checkpoint.INITIAL_CHECKPOINT;
        for (PTInput input : oper.inputs) {
            PTOperator sourceOper = input.source.source;
            Checkpoint checkpoint = sourceOper.recoveryCheckpoint;
            if (sourceOper.checkpoints.isEmpty()) {
                checkpoint = getActivationCheckpoint(sourceOper);
            }
            activationCheckpoint = Checkpoint.max(activationCheckpoint, checkpoint);
        }
        return activationCheckpoint;
    }
    return oper.recoveryCheckpoint;
}
Also used : Checkpoint(com.datatorrent.stram.api.Checkpoint) PTInput(com.datatorrent.stram.plan.physical.PTOperator.PTInput)

Example 7 with Checkpoint

use of com.datatorrent.stram.api.Checkpoint in project apex-core by apache.

the class StreamingContainerAgent method createOperatorDeployInfo.

/**
   * Create deploy info for operator.
   * <p>
   *
   * @return {@link com.datatorrent.stram.api.OperatorDeployInfo}
   */
private OperatorDeployInfo createOperatorDeployInfo(PTOperator oper) {
    OperatorDeployInfo ndi;
    if (oper.isUnifier()) {
        UnifierDeployInfo udi = new UnifierDeployInfo();
        /* the constructor auto sets the type */
        try {
            udi.operatorAttributes = oper.getUnifiedOperatorMeta().getAttributes().clone();
        } catch (CloneNotSupportedException ex) {
            throw new RuntimeException("Cannot clone unifier attributes", ex);
        }
        ndi = udi;
    } else {
        ndi = new OperatorDeployInfo();
        Operator operator = oper.getOperatorMeta().getOperator();
        if (operator instanceof InputOperator) {
            ndi.type = OperatorType.INPUT;
            if (!oper.getInputs().isEmpty()) {
                //we check if any input port is connected which would make it a Generic operator.
                for (PTOperator.PTInput ptInput : oper.getInputs()) {
                    if (ptInput.logicalStream != null && ptInput.logicalStream.getSource() != null) {
                        ndi.type = OperatorType.GENERIC;
                        break;
                    }
                }
            }
        } else {
            ndi.type = OperatorType.GENERIC;
        }
    }
    Checkpoint checkpoint = oper.getRecoveryCheckpoint();
    ProcessingMode pm = oper.getOperatorMeta().getValue(OperatorContext.PROCESSING_MODE);
    if (pm == ProcessingMode.AT_MOST_ONCE || pm == ProcessingMode.EXACTLY_ONCE) {
        // TODO: following should be handled in the container at deploy time
        // for exactly once container should also purge previous checkpoint
        // whenever new checkpoint is written.
        StorageAgent agent = oper.getOperatorMeta().getAttributes().get(OperatorContext.STORAGE_AGENT);
        if (agent == null) {
            agent = initCtx.getValue(OperatorContext.STORAGE_AGENT);
        }
        // pick checkpoint most recently written
        try {
            long[] windowIds = agent.getWindowIds(oper.getId());
            long checkpointId = Stateless.WINDOW_ID;
            for (long windowId : windowIds) {
                if (windowId > checkpointId) {
                    checkpointId = windowId;
                }
            }
            if (checkpoint == null || checkpoint.windowId != checkpointId) {
                checkpoint = new Checkpoint(checkpointId, 0, 0);
            }
        } catch (Exception e) {
            throw new RuntimeException("Failed to determine checkpoint window id " + oper, e);
        }
    }
    LOG.debug("{} recovery checkpoint {}", oper, checkpoint);
    ndi.checkpoint = checkpoint;
    ndi.name = oper.getOperatorMeta().getName();
    ndi.id = oper.getId();
    try {
        // clone map before modifying it
        ndi.contextAttributes = oper.getOperatorMeta().getAttributes().clone();
    } catch (CloneNotSupportedException ex) {
        throw new RuntimeException("Cannot clone operator attributes", ex);
    }
    if (oper.isOperatorStateLess()) {
        ndi.contextAttributes.put(OperatorContext.STATELESS, true);
    }
    return ndi;
}
Also used : Operator(com.datatorrent.api.Operator) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) InputOperator(com.datatorrent.api.InputOperator) OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) ProcessingMode(com.datatorrent.api.Operator.ProcessingMode) UnifierDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo.UnifierDeployInfo) Checkpoint(com.datatorrent.stram.api.Checkpoint) StorageAgent(com.datatorrent.api.StorageAgent) InputOperator(com.datatorrent.api.InputOperator)

Example 8 with Checkpoint

use of com.datatorrent.stram.api.Checkpoint in project apex-core by apache.

the class StreamingContainerManager method updateRecoveryCheckpoints.

/**
   * Compute checkpoints required for a given operator instance to be recovered.
   * This is done by looking at checkpoints available for downstream dependencies first,
   * and then selecting the most recent available checkpoint that is smaller than downstream.
   *
   * @param operator Operator instance for which to find recovery checkpoint
   * @param ctx      Context into which to collect traversal info
   */
public void updateRecoveryCheckpoints(PTOperator operator, UpdateCheckpointsContext ctx, boolean recovery) {
    if (operator.getRecoveryCheckpoint().windowId < ctx.committedWindowId.longValue()) {
        ctx.committedWindowId.setValue(operator.getRecoveryCheckpoint().windowId);
    }
    if (operator.getState() == PTOperator.State.ACTIVE && (ctx.currentTms - operator.stats.lastWindowIdChangeTms) > operator.stats.windowProcessingTimeoutMillis) {
        // if the checkpoint is ahead, then it is not blocked but waiting for activation (state-less recovery, at-most-once)
        if (ctx.committedWindowId.longValue() >= operator.getRecoveryCheckpoint().windowId && !recovery) {
            LOG.warn("Marking operator {} blocked committed window {}, recovery window {}, current time {}, last window id change time {}, window processing timeout millis {}", operator, Codec.getStringWindowId(ctx.committedWindowId.longValue()), Codec.getStringWindowId(operator.getRecoveryCheckpoint().windowId), ctx.currentTms, operator.stats.lastWindowIdChangeTms, operator.stats.windowProcessingTimeoutMillis);
            ctx.blocked.add(operator);
        }
    }
    // the most recent checkpoint eligible for recovery based on downstream state
    Checkpoint maxCheckpoint = Checkpoint.INITIAL_CHECKPOINT;
    Set<OperatorMeta> checkpointGroup = ctx.checkpointGroups.get(operator.getOperatorMeta());
    if (checkpointGroup == null) {
        checkpointGroup = Collections.singleton(operator.getOperatorMeta());
    }
    // find intersection of checkpoints that group can collectively move to
    TreeSet<Checkpoint> commonCheckpoints = new TreeSet<>(new Checkpoint.CheckpointComparator());
    synchronized (operator.checkpoints) {
        commonCheckpoints.addAll(operator.checkpoints);
    }
    Set<PTOperator> groupOpers = new HashSet<>(checkpointGroup.size());
    boolean pendingDeploy = operator.getState() == PTOperator.State.PENDING_DEPLOY;
    if (checkpointGroup.size() > 1) {
        for (OperatorMeta om : checkpointGroup) {
            Collection<PTOperator> operators = plan.getAllOperators(om);
            Collection<PTOperator> unifiers = getUnifiersInCheckpointGroup(operators);
            operators.addAll(unifiers);
            for (PTOperator groupOper : operators) {
                synchronized (groupOper.checkpoints) {
                    commonCheckpoints.retainAll(groupOper.checkpoints);
                }
                // visit all downstream operators of the group
                ctx.visited.add(groupOper);
                groupOpers.add(groupOper);
                pendingDeploy |= groupOper.getState() == PTOperator.State.PENDING_DEPLOY;
            }
        }
        // highest common checkpoint
        if (!commonCheckpoints.isEmpty()) {
            maxCheckpoint = commonCheckpoints.last();
        }
    } else {
        // without logical grouping, treat partitions as independent
        // this is especially important for parallel partitioning
        ctx.visited.add(operator);
        groupOpers.add(operator);
        maxCheckpoint = operator.getRecentCheckpoint();
        if (ctx.recovery && maxCheckpoint.windowId == Stateless.WINDOW_ID && operator.isOperatorStateLess()) {
            long currentWindowId = WindowGenerator.getWindowId(ctx.currentTms, this.vars.windowStartMillis, this.getLogicalPlan().getValue(LogicalPlan.STREAMING_WINDOW_SIZE_MILLIS));
            maxCheckpoint = new Checkpoint(currentWindowId, 0, 0);
        }
    }
    // DFS downstream operators
    for (PTOperator groupOper : groupOpers) {
        for (PTOperator.PTOutput out : groupOper.getOutputs()) {
            for (PTOperator.PTInput sink : out.sinks) {
                PTOperator sinkOperator = sink.target;
                if (groupOpers.contains(sinkOperator)) {
                    // downstream operator within group
                    continue;
                }
                if (!ctx.visited.contains(sinkOperator)) {
                    // downstream traversal
                    updateRecoveryCheckpoints(sinkOperator, ctx, recovery);
                }
                // when dynamically adding new operators
                if (sinkOperator.getRecoveryCheckpoint().windowId >= operator.getRecoveryCheckpoint().windowId) {
                    maxCheckpoint = Checkpoint.min(maxCheckpoint, sinkOperator.getRecoveryCheckpoint());
                }
                if (ctx.blocked.contains(sinkOperator)) {
                    if (sinkOperator.stats.getCurrentWindowId() == operator.stats.getCurrentWindowId()) {
                        // downstream operator is blocked by this operator
                        ctx.blocked.remove(sinkOperator);
                    }
                }
            }
        }
    }
    // find the common checkpoint that is <= downstream recovery checkpoint
    if (!commonCheckpoints.contains(maxCheckpoint)) {
        if (!commonCheckpoints.isEmpty()) {
            maxCheckpoint = Objects.firstNonNull(commonCheckpoints.floor(maxCheckpoint), maxCheckpoint);
        }
    }
    for (PTOperator groupOper : groupOpers) {
        // checkpoint frozen during deployment
        if (!pendingDeploy || ctx.recovery) {
            // remove previous checkpoints
            Checkpoint c1 = Checkpoint.INITIAL_CHECKPOINT;
            LinkedList<Checkpoint> checkpoints = groupOper.checkpoints;
            synchronized (checkpoints) {
                if (!checkpoints.isEmpty() && (checkpoints.getFirst()).windowId <= maxCheckpoint.windowId) {
                    c1 = checkpoints.getFirst();
                    Checkpoint c2;
                    while (checkpoints.size() > 1 && ((c2 = checkpoints.get(1)).windowId) <= maxCheckpoint.windowId) {
                        checkpoints.removeFirst();
                        //LOG.debug("Checkpoint to delete: operator={} windowId={}", operator.getName(), c1);
                        this.purgeCheckpoints.add(new Pair<>(groupOper, c1.windowId));
                        c1 = c2;
                    }
                } else {
                    if (ctx.recovery && checkpoints.isEmpty() && groupOper.isOperatorStateLess()) {
                        LOG.debug("Adding checkpoint for stateless operator {} {}", groupOper, Codec.getStringWindowId(maxCheckpoint.windowId));
                        c1 = groupOper.addCheckpoint(maxCheckpoint.windowId, this.vars.windowStartMillis);
                    }
                }
            }
            //LOG.debug("Operator {} checkpoints: commit {} recent {}", new Object[] {operator.getName(), c1, operator.checkpoints});
            groupOper.setRecoveryCheckpoint(c1);
        } else {
            LOG.debug("Skipping checkpoint update {} during {}", groupOper, groupOper.getState());
        }
    }
}
Also used : OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) PTInput(com.datatorrent.stram.plan.physical.PTOperator.PTInput) PTOutput(com.datatorrent.stram.plan.physical.PTOperator.PTOutput) Checkpoint(com.datatorrent.stram.api.Checkpoint) TreeSet(java.util.TreeSet) LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet)

Example 9 with Checkpoint

use of com.datatorrent.stram.api.Checkpoint in project apex-core by apache.

the class Node method checkpoint.

void checkpoint(long windowId) {
    if (!context.stateless) {
        if (operator instanceof Operator.CheckpointNotificationListener) {
            ((Operator.CheckpointNotificationListener) operator).beforeCheckpoint(windowId);
        }
        StorageAgent ba = context.getValue(OperatorContext.STORAGE_AGENT);
        if (ba != null) {
            try {
                checkpointStats = new Stats.CheckpointStats();
                checkpointStats.checkpointStartTime = System.currentTimeMillis();
                ba.save(operator, id, windowId);
                if (ba instanceof AsyncStorageAgent) {
                    AsyncStorageAgent asyncStorageAgent = (AsyncStorageAgent) ba;
                    if (!asyncStorageAgent.isSyncCheckpoint()) {
                        if (PROCESSING_MODE != ProcessingMode.EXACTLY_ONCE) {
                            CheckpointWindowInfo checkpointWindowInfo = new CheckpointWindowInfo();
                            checkpointWindowInfo.windowId = windowId;
                            checkpointWindowInfo.applicationWindowCount = applicationWindowCount;
                            checkpointWindowInfo.checkpointWindowCount = checkpointWindowCount;
                            CheckpointHandler checkpointHandler = new CheckpointHandler();
                            checkpointHandler.agent = asyncStorageAgent;
                            checkpointHandler.operatorId = id;
                            checkpointHandler.windowId = windowId;
                            checkpointHandler.stats = checkpointStats;
                            FutureTask<Stats.CheckpointStats> futureTask = new FutureTask<>(checkpointHandler);
                            taskQueue.add(new Pair<>(futureTask, checkpointWindowInfo));
                            executorService.submit(futureTask);
                            checkpoint = null;
                            checkpointStats = null;
                            return;
                        } else {
                            asyncStorageAgent.flush(id, windowId);
                        }
                    }
                }
                checkpointStats.checkpointTime = System.currentTimeMillis() - checkpointStats.checkpointStartTime;
            } catch (IOException ie) {
                try {
                    logger.warn("Rolling back checkpoint {} for Operator {} due to the exception {}", Codec.getStringWindowId(windowId), operator, ie);
                    ba.delete(id, windowId);
                } catch (IOException ex) {
                    logger.warn("Error while rolling back checkpoint", ex);
                }
                throw new RuntimeException(ie);
            }
        }
    }
    calculateNextCheckpointWindow();
    dagCheckpointOffsetCount = 0;
    checkpoint = new Checkpoint(windowId, applicationWindowCount, checkpointWindowCount);
    if (operator instanceof Operator.CheckpointListener) {
        ((Operator.CheckpointListener) operator).checkpointed(windowId);
    }
}
Also used : IOException(java.io.IOException) Checkpoint(com.datatorrent.stram.api.Checkpoint) AsyncStorageAgent(org.apache.apex.common.util.AsyncStorageAgent) FutureTask(java.util.concurrent.FutureTask) AsyncStorageAgent(org.apache.apex.common.util.AsyncStorageAgent) StorageAgent(com.datatorrent.api.StorageAgent) Stats(com.datatorrent.api.Stats) ContainerStats(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerStats)

Example 10 with Checkpoint

use of com.datatorrent.stram.api.Checkpoint in project apex-core by apache.

the class StreamingContainer method deployInputStreams.

@SuppressWarnings("unchecked")
private void deployInputStreams(List<OperatorDeployInfo> operatorList, HashMap<String, ComponentContextPair<Stream, StreamContext>> newStreams) throws UnknownHostException {
    /*
     * collect any input operators along with their smallest window id,
     * those are subsequently used to setup the window generator
     */
    ArrayList<OperatorDeployInfo> inputNodes = new ArrayList<>();
    long smallestCheckpointedWindowId = Long.MAX_VALUE;
    //a simple map which maps the oio node to it's the node which owns the thread.
    Map<Integer, Integer> oioNodes = new ConcurrentHashMap<>();
    /*
     * Hook up all the downstream ports. There are 2 places where we deal with more than 1
     * downstream ports. The first one follows immediately for WindowGenerator. The second
     * case is when source for the input port of some node in this container is in another
     * container. So we need to create the stream. We need to track this stream along with
     * other streams,and many such streams may exist, we hash them against buffer server
     * info as we did for outputs but throw in the sinkid in the mix as well.
     */
    for (OperatorDeployInfo ndi : operatorList) {
        if (ndi.inputs == null || ndi.inputs.isEmpty()) {
            /*
         * This has to be InputNode, so let's hook the WindowGenerator to it.
         * A node which does not take any input cannot exist in the DAG since it would be completely
         * unaware of the windows. So for that reason, AbstractInputNode allows Component.INPUT port.
         */
            inputNodes.add(ndi);
            /*
         * When we activate the window Generator, we plan to activate it only from required windowId.
         */
            ndi.checkpoint = getFinishedCheckpoint(ndi);
            if (ndi.checkpoint.windowId < smallestCheckpointedWindowId) {
                smallestCheckpointedWindowId = ndi.checkpoint.windowId;
            }
        } else {
            Node<?> node = nodes.get(ndi.id);
            for (OperatorDeployInfo.InputDeployInfo nidi : ndi.inputs) {
                if (nidi.streamCodecs.size() != 1) {
                    throw new IllegalStateException("Only one input codec configuration should be present");
                }
                Map.Entry<Integer, StreamCodec<?>> entry = nidi.streamCodecs.entrySet().iterator().next();
                Integer streamCodecIdentifier = entry.getKey();
                StreamCodec<?> streamCodec = entry.getValue();
                String sourceIdentifier = Integer.toString(nidi.sourceNodeId).concat(Component.CONCAT_SEPARATOR).concat(nidi.sourcePortName);
                String sinkIdentifier = Integer.toString(ndi.id).concat(Component.CONCAT_SEPARATOR).concat(nidi.portName);
                int queueCapacity = getValue(PortContext.QUEUE_CAPACITY, nidi, ndi);
                Checkpoint checkpoint = getFinishedCheckpoint(ndi);
                ComponentContextPair<Stream, StreamContext> pair = streams.get(sourceIdentifier);
                if (pair == null) {
                    pair = newStreams.get(sourceIdentifier);
                }
                if (pair == null) {
                    /*
             * We connect to the buffer server for the input on this port.
             * We have already placed all the output streams for all the operators in this container.
             * Yet, there is no stream which can source this port so it has to come from the buffer
             * server, so let's make a connection to it.
             */
                    assert (nidi.locality != Locality.CONTAINER_LOCAL && nidi.locality != Locality.THREAD_LOCAL);
                    StreamContext context = new StreamContext(nidi.declaredStreamId);
                    context.setBufferServerAddress(InetSocketAddress.createUnresolved(nidi.bufferServerHost, nidi.bufferServerPort));
                    InetAddress inetAddress = context.getBufferServerAddress().getAddress();
                    if (inetAddress != null && NetUtils.isLocalAddress(inetAddress)) {
                        context.setBufferServerAddress(new InetSocketAddress(InetAddress.getByName(null), nidi.bufferServerPort));
                    }
                    context.put(StreamContext.BUFFER_SERVER_TOKEN, nidi.bufferServerToken);
                    String connIdentifier = sourceIdentifier + Component.CONCAT_SEPARATOR + streamCodecIdentifier;
                    context.setPortId(nidi.portName);
                    context.put(StreamContext.CODEC, streamCodec);
                    context.put(StreamContext.EVENT_LOOP, eventloop);
                    context.setPartitions(nidi.partitionMask, nidi.partitionKeys);
                    //context.setSourceId(sourceIdentifier);
                    context.setSourceId(connIdentifier);
                    context.setSinkId(sinkIdentifier);
                    context.setFinishedWindowId(checkpoint.windowId);
                    BufferServerSubscriber subscriber = fastPublisherSubscriber ? new FastSubscriber("tcp://".concat(nidi.bufferServerHost).concat(":").concat(String.valueOf(nidi.bufferServerPort)).concat("/").concat(connIdentifier), queueCapacity) : new BufferServerSubscriber("tcp://".concat(nidi.bufferServerHost).concat(":").concat(String.valueOf(nidi.bufferServerPort)).concat("/").concat(connIdentifier), queueCapacity);
                    if (streamCodec instanceof StreamCodecWrapperForPersistance) {
                        subscriber.acquireReservoirForPersistStream(sinkIdentifier, queueCapacity, streamCodec);
                    }
                    SweepableReservoir reservoir = subscriber.acquireReservoir(sinkIdentifier, queueCapacity);
                    if (checkpoint.windowId >= 0) {
                        node.connectInputPort(nidi.portName, new WindowIdActivatedReservoir(sinkIdentifier, reservoir, checkpoint.windowId));
                    }
                    node.connectInputPort(nidi.portName, reservoir);
                    newStreams.put(sinkIdentifier, new ComponentContextPair<Stream, StreamContext>(subscriber, context));
                    logger.debug("put input stream {} against key {}", subscriber, sinkIdentifier);
                } else {
                    assert (nidi.locality == Locality.CONTAINER_LOCAL || nidi.locality == Locality.THREAD_LOCAL);
                    /* we are still dealing with the MuxStream originating at the output of the source port */
                    StreamContext inlineContext = new StreamContext(nidi.declaredStreamId);
                    inlineContext.setSourceId(sourceIdentifier);
                    inlineContext.setSinkId(sinkIdentifier);
                    Stream stream;
                    SweepableReservoir reservoir;
                    switch(nidi.locality) {
                        case CONTAINER_LOCAL:
                            int outputQueueCapacity = getOutputQueueCapacity(operatorList, nidi.sourceNodeId, nidi.sourcePortName);
                            if (outputQueueCapacity > queueCapacity) {
                                queueCapacity = outputQueueCapacity;
                            }
                            stream = new InlineStream(queueCapacity);
                            reservoir = ((InlineStream) stream).getReservoir();
                            if (checkpoint.windowId >= 0) {
                                node.connectInputPort(nidi.portName, new WindowIdActivatedReservoir(sinkIdentifier, reservoir, checkpoint.windowId));
                            }
                            break;
                        case THREAD_LOCAL:
                            stream = new OiOStream();
                            reservoir = ((OiOStream) stream).getReservoir();
                            ((OiOStream.OiOReservoir) reservoir).setControlSink(((OiONode) node).getControlSink(reservoir));
                            oioNodes.put(ndi.id, nidi.sourceNodeId);
                            break;
                        default:
                            throw new IllegalStateException("Locality can be either ContainerLocal or ThreadLocal");
                    }
                    node.connectInputPort(nidi.portName, reservoir);
                    newStreams.put(sinkIdentifier, new ComponentContextPair<>(stream, inlineContext));
                    if (!(pair.component instanceof Stream.MultiSinkCapableStream)) {
                        String originalSinkId = pair.context.getSinkId();
                        /* we come here only if we are trying to augment the dag */
                        StreamContext muxContext = new StreamContext(nidi.declaredStreamId);
                        muxContext.setSourceId(sourceIdentifier);
                        muxContext.setFinishedWindowId(checkpoint.windowId);
                        muxContext.setSinkId(originalSinkId);
                        MuxStream muxStream = new MuxStream();
                        muxStream.setSink(originalSinkId, pair.component);
                        streams.put(originalSinkId, pair);
                        Node<?> sourceNode = nodes.get(nidi.sourceNodeId);
                        sourceNode.connectOutputPort(nidi.sourcePortName, muxStream);
                        newStreams.put(sourceIdentifier, pair = new ComponentContextPair<Stream, StreamContext>(muxStream, muxContext));
                    }
                    /* here everything should be multisink capable */
                    if (streamCodec instanceof StreamCodecWrapperForPersistance) {
                        PartitionAwareSinkForPersistence pas;
                        if (nidi.partitionKeys == null) {
                            pas = new PartitionAwareSinkForPersistence((StreamCodecWrapperForPersistance<Object>) streamCodec, nidi.partitionMask, stream);
                        } else {
                            pas = new PartitionAwareSinkForPersistence((StreamCodecWrapperForPersistance<Object>) streamCodec, nidi.partitionKeys, nidi.partitionMask, stream);
                        }
                        ((Stream.MultiSinkCapableStream) pair.component).setSink(sinkIdentifier, pas);
                    } else if (nidi.partitionKeys == null || nidi.partitionKeys.isEmpty()) {
                        ((Stream.MultiSinkCapableStream) pair.component).setSink(sinkIdentifier, stream);
                    } else {
                        /*
               * generally speaking we do not have partitions on the inline streams so the control should not
               * come here but if it comes, then we are ready to handle it using the partition aware streams.
               */
                        PartitionAwareSink<Object> pas = new PartitionAwareSink<>(streamCodec == null ? nonSerializingStreamCodec : (StreamCodec<Object>) streamCodec, nidi.partitionKeys, nidi.partitionMask, stream);
                        ((Stream.MultiSinkCapableStream) pair.component).setSink(sinkIdentifier, pas);
                    }
                    String streamSinkId = pair.context.getSinkId();
                    if (streamSinkId == null) {
                        pair.context.setSinkId(sinkIdentifier);
                    } else {
                        pair.context.setSinkId(streamSinkId.concat(", ").concat(sinkIdentifier));
                    }
                }
            }
        }
    }
    setupOiOGroups(oioNodes);
    if (!inputNodes.isEmpty()) {
        WindowGenerator windowGenerator = setupWindowGenerator(smallestCheckpointedWindowId);
        for (OperatorDeployInfo ndi : inputNodes) {
            generators.put(ndi.id, windowGenerator);
            Node<?> node = nodes.get(ndi.id);
            SweepableReservoir reservoir = windowGenerator.acquireReservoir(String.valueOf(ndi.id), 1024);
            if (ndi.checkpoint.windowId >= 0) {
                node.connectInputPort(Node.INPUT, new WindowIdActivatedReservoir(Integer.toString(ndi.id), reservoir, ndi.checkpoint.windowId));
            }
            node.connectInputPort(Node.INPUT, reservoir);
        }
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList) StreamCodec(com.datatorrent.api.StreamCodec) BufferServerSubscriber(com.datatorrent.stram.stream.BufferServerSubscriber) PartitionAwareSinkForPersistence(com.datatorrent.stram.stream.PartitionAwareSinkForPersistence) StreamCodecWrapperForPersistance(com.datatorrent.stram.plan.logical.StreamCodecWrapperForPersistance) ComponentContextPair(com.datatorrent.stram.ComponentContextPair) FastSubscriber(com.datatorrent.stram.stream.FastSubscriber) InlineStream(com.datatorrent.stram.stream.InlineStream) OiOStream(com.datatorrent.stram.stream.OiOStream) InlineStream(com.datatorrent.stram.stream.InlineStream) MuxStream(com.datatorrent.stram.stream.MuxStream) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) PartitionAwareSink(com.datatorrent.stram.stream.PartitionAwareSink) OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) Checkpoint(com.datatorrent.stram.api.Checkpoint) Checkpoint(com.datatorrent.stram.api.Checkpoint) MuxStream(com.datatorrent.stram.stream.MuxStream) OiOStream(com.datatorrent.stram.stream.OiOStream) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) InetAddress(java.net.InetAddress)

Aggregations

Checkpoint (com.datatorrent.stram.api.Checkpoint)28 PTOperator (com.datatorrent.stram.plan.physical.PTOperator)15 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)9 PhysicalPlan (com.datatorrent.stram.plan.physical.PhysicalPlan)9 Test (org.junit.Test)9 ContainerStats (com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerStats)6 OperatorMeta (com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta)6 MemoryStorageAgent (com.datatorrent.stram.support.StramTestSupport.MemoryStorageAgent)6 OperatorStats (com.datatorrent.api.Stats.OperatorStats)5 UpdateCheckpointsContext (com.datatorrent.stram.StreamingContainerManager.UpdateCheckpointsContext)5 HashMap (java.util.HashMap)5 Map (java.util.Map)5 OperatorHeartbeat (com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.OperatorHeartbeat)4 TestGeneratorInputOperator (com.datatorrent.stram.engine.TestGeneratorInputOperator)4 IOException (java.io.IOException)4 LinkedHashMap (java.util.LinkedHashMap)4 Operator (com.datatorrent.api.Operator)3 StatsListener (com.datatorrent.api.StatsListener)3 AsyncFSStorageAgent (com.datatorrent.common.util.AsyncFSStorageAgent)3 ContainerHeartbeatResponse (com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse)3