Search in sources :

Example 11 with Operator

use of com.datatorrent.api.Operator in project apex-core by apache.

the class StreamingContainer method processHeartbeatResponse.

public void processHeartbeatResponse(ContainerHeartbeatResponse rsp) {
    if (rsp.nodeRequests != null) {
        nodeRequests = rsp.nodeRequests;
    }
    if (rsp.committedWindowId != lastCommittedWindowId) {
        lastCommittedWindowId = rsp.committedWindowId;
        if (bufferServer != null) {
            //  One Window before the committed Window is kept in the Buffer Server, for historical reasons.
            // Jira for that issue is APEXCORE-479
            bufferServer.purge(lastCommittedWindowId - 1);
        }
        OperatorRequest nr = null;
        for (Entry<Integer, Node<?>> e : nodes.entrySet()) {
            final Thread thread = e.getValue().context.getThread();
            if (thread == null || !thread.isAlive()) {
                continue;
            }
            if (e.getValue().getOperator() instanceof Operator.CheckpointListener) {
                if (nr == null) {
                    nr = new OperatorRequest() {

                        @Override
                        public StatsListener.OperatorResponse execute(Operator operator, int operatorId, long windowId) throws IOException {
                            ((Operator.CheckpointListener) operator).committed(lastCommittedWindowId);
                            return null;
                        }
                    };
                }
                e.getValue().context.request(nr);
            }
        }
    }
    if (rsp.undeployRequest != null) {
        logger.info("Undeploy request: {}", rsp.undeployRequest);
        processNodeRequests(false);
        undeploy(rsp.undeployRequest);
    }
    if (rsp.shutdown != null) {
        logger.info("Received shutdown request type {}", rsp.shutdown);
        if (rsp.shutdown == StreamingContainerUmbilicalProtocol.ShutdownType.ABORT) {
            processNodeRequests(false);
            this.exitHeartbeatLoop = true;
            return;
        } else if (rsp.shutdown == StreamingContainerUmbilicalProtocol.ShutdownType.WAIT_TERMINATE) {
            stopInputNodes();
        }
    }
    if (rsp.deployRequest != null) {
        logger.info("Deploy request: {}", rsp.deployRequest);
        try {
            deploy(rsp.deployRequest);
        } catch (Exception e) {
            logger.error("deploy request failed", e);
            try {
                umbilical.log(this.containerId, "deploy request failed: " + rsp.deployRequest + " " + ExceptionUtils.getStackTrace(e));
            } catch (IOException ioe) {
                logger.debug("Fail to log", ioe);
            }
            this.exitHeartbeatLoop = true;
            throw new IllegalStateException("Deploy request failed: " + rsp.deployRequest, e);
        }
    }
    processNodeRequests(true);
}
Also used : Operator(com.datatorrent.api.Operator) IOException(java.io.IOException) Checkpoint(com.datatorrent.stram.api.Checkpoint) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) OperatorRequest(com.datatorrent.api.StatsListener.OperatorRequest)

Example 12 with Operator

use of com.datatorrent.api.Operator in project apex-core by apache.

the class TupleRecorderCollection method startRecording.

private void startRecording(String id, final Node<?> node, int operatorId, final String portName, long numWindows) {
    PortMappingDescriptor descriptor = node.getPortMappingDescriptor();
    OperatorIdPortNamePair operatorIdPortNamePair = new OperatorIdPortNamePair(operatorId, portName);
    // check any recording conflict
    boolean conflict = false;
    if (containsKey(new OperatorIdPortNamePair(operatorId, null))) {
        conflict = true;
    } else if (portName == null) {
        for (Map.Entry<String, PortContextPair<InputPort<?>>> entry : descriptor.inputPorts.entrySet()) {
            if (containsKey(new OperatorIdPortNamePair(operatorId, entry.getKey()))) {
                conflict = true;
                break;
            }
        }
        for (Map.Entry<String, PortContextPair<OutputPort<?>>> entry : descriptor.outputPorts.entrySet()) {
            if (containsKey(new OperatorIdPortNamePair(operatorId, entry.getKey()))) {
                conflict = true;
                break;
            }
        }
    } else {
        if (containsKey(operatorIdPortNamePair)) {
            conflict = true;
        }
    }
    if (!conflict) {
        logger.debug("Executing start recording request for {}", operatorIdPortNamePair);
        if (gatewayAddress != null && wsClient == null) {
            synchronized (this) {
                if (wsClient == null) {
                    try {
                        wsClient = new SharedPubSubWebSocketClient((gatewayUseSsl ? "wss://" : "ws://") + gatewayAddress + "/pubsub", 500);
                        if (gatewayUserName != null && gatewayPassword != null) {
                            wsClient.setLoginUrl((gatewayUseSsl ? "https://" : "http://") + gatewayAddress + StreamingContainerManager.GATEWAY_LOGIN_URL_PATH);
                            wsClient.setUserName(gatewayUserName);
                            wsClient.setPassword(gatewayPassword);
                        }
                        wsClient.setup();
                    } catch (Exception ex) {
                        logger.warn("Error initializing websocket", ex);
                    }
                }
            }
        }
        TupleRecorder tupleRecorder = new TupleRecorder(id, appId);
        tupleRecorder.setWebSocketClient(wsClient);
        HashMap<String, Sink<Object>> sinkMap = new HashMap<>();
        for (Map.Entry<String, PortContextPair<InputPort<?>>> entry : descriptor.inputPorts.entrySet()) {
            String streamId = getDeclaredStreamId(operatorId, entry.getKey());
            if (streamId == null) {
                streamId = portName + "_implicit_stream";
            }
            if (entry.getValue().context != null && (portName == null || entry.getKey().equals(portName))) {
                logger.debug("Adding recorder sink to input port {}, stream {}", entry.getKey(), streamId);
                tupleRecorder.addInputPortInfo(entry.getKey(), streamId);
                sinkMap.put(entry.getKey(), tupleRecorder.newSink(entry.getKey()));
            }
        }
        for (Map.Entry<String, PortContextPair<OutputPort<?>>> entry : descriptor.outputPorts.entrySet()) {
            String streamId = getDeclaredStreamId(operatorId, entry.getKey());
            if (streamId == null) {
                streamId = portName + "_implicit_stream";
            }
            if (portName == null || entry.getKey().equals(portName)) {
                logger.debug("Adding recorder sink to output port {}, stream {}", entry.getKey(), streamId);
                tupleRecorder.addOutputPortInfo(entry.getKey(), streamId);
                sinkMap.put(entry.getKey(), tupleRecorder.newSink(entry.getKey()));
            }
        }
        if (!sinkMap.isEmpty()) {
            logger.debug("Started recording on {} through {}", operatorIdPortNamePair, System.identityHashCode(this));
            String basePath = appPath + "/recordings/" + operatorId + "/" + tupleRecorder.getId();
            tupleRecorder.getStorage().setBasePath(basePath);
            tupleRecorder.getStorage().setBytesPerPartFile(tupleRecordingPartFileSize);
            tupleRecorder.getStorage().setMillisPerPartFile(tupleRecordingPartFileTimeMillis);
            node.addSinks(sinkMap);
            tupleRecorder.setup(node.getOperator(), codecs);
            put(operatorIdPortNamePair, tupleRecorder);
            if (numWindows > 0) {
                tupleRecorder.setNumWindows(numWindows, new Runnable() {

                    @Override
                    public void run() {
                        node.context.request(new OperatorRequest() {

                            @Override
                            public StatsListener.OperatorResponse execute(Operator operator, int operatorId, long windowId) throws IOException {
                                stopRecording(node, operatorId, portName);
                                return null;
                            }
                        });
                    }
                });
            }
        } else {
            logger.warn("Tuple recording request ignored because operator is not connected on the specified port.");
        }
    } else {
        logger.error("Operator id {} is already being recorded.", operatorId);
    }
}
Also used : OutputPort(com.datatorrent.api.Operator.OutputPort) Operator(com.datatorrent.api.Operator) InputPort(com.datatorrent.api.Operator.InputPort) HashMap(java.util.HashMap) SharedPubSubWebSocketClient(com.datatorrent.stram.util.SharedPubSubWebSocketClient) StatsListener(com.datatorrent.api.StatsListener) IOException(java.io.IOException) PortContextPair(com.datatorrent.stram.plan.logical.Operators.PortContextPair) Sink(com.datatorrent.api.Sink) OperatorRequest(com.datatorrent.api.StatsListener.OperatorRequest) HashMap(java.util.HashMap) Map(java.util.Map) PortMappingDescriptor(com.datatorrent.stram.plan.logical.Operators.PortMappingDescriptor)

Example 13 with Operator

use of com.datatorrent.api.Operator in project apex-core by apache.

the class StreamCodecTest method testPartitioningMultipleStreamCodecs.

@Test
public void testPartitioningMultipleStreamCodecs() {
    GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
    GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
    GenericTestOperator node3 = dag.addOperator("node3", GenericTestOperator.class);
    dag.setOperatorAttribute(node1, Context.OperatorContext.PARTITIONER, new StatelessPartitioner<GenericTestOperator>(2));
    TestStreamCodec serDe = new TestStreamCodec();
    dag.setInputPortAttribute(node2.inport1, Context.PortContext.STREAM_CODEC, serDe);
    TestStreamCodec2 serDe2 = new TestStreamCodec2();
    dag.setInputPortAttribute(node3.inport1, Context.PortContext.STREAM_CODEC, serDe2);
    dag.addStream("n1n2n3", node1.outport1, node2.inport1, node3.inport1);
    dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, Integer.MAX_VALUE);
    StramTestSupport.MemoryStorageAgent msa = new StramTestSupport.MemoryStorageAgent();
    dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, msa);
    StreamingContainerManager dnm = new StreamingContainerManager(dag);
    PhysicalPlan plan = dnm.getPhysicalPlan();
    List<PTContainer> containers = plan.getContainers();
    Assert.assertEquals("number containers", 4, containers.size());
    for (int i = 0; i < containers.size(); ++i) {
        StreamingContainerManagerTest.assignContainer(dnm, "container" + (i + 1));
    }
    LogicalPlan.OperatorMeta n1meta = dag.getMeta(node1);
    LogicalPlan.OperatorMeta n2meta = dag.getMeta(node2);
    LogicalPlan.OperatorMeta n3meta = dag.getMeta(node3);
    for (PTContainer container : containers) {
        List<PTOperator> operators = container.getOperators();
        for (PTOperator operator : operators) {
            if (!operator.isUnifier()) {
                if (operator.getOperatorMeta() == n1meta) {
                    OperatorDeployInfo odi = getOperatorDeployInfo(operator, n1meta.getName(), dnm);
                    OperatorDeployInfo.OutputDeployInfo otdi = getOutputDeployInfo(odi, n1meta.getMeta(node1.outport1));
                    String id = n1meta.getName() + " " + otdi.portName;
                    Assert.assertEquals("number stream codecs " + id, otdi.streamCodecs.size(), 2);
                    checkPresentStreamCodec(n2meta, node2.inport1, otdi.streamCodecs, id, plan);
                    checkPresentStreamCodec(n3meta, node3.inport1, otdi.streamCodecs, id, plan);
                } else if (operator.getOperatorMeta() == n2meta) {
                    OperatorDeployInfo odi = getOperatorDeployInfo(operator, n2meta.getName(), dnm);
                    OperatorDeployInfo.InputDeployInfo idi = getInputDeployInfo(odi, n2meta.getMeta(node2.inport1));
                    String id = n2meta.getName() + " " + idi.portName;
                    Assert.assertEquals("number stream codecs " + id, idi.streamCodecs.size(), 1);
                    checkPresentStreamCodec(n2meta, node2.inport1, idi.streamCodecs, id, plan);
                } else if (operator.getOperatorMeta() == n3meta) {
                    OperatorDeployInfo odi = getOperatorDeployInfo(operator, n3meta.getName(), dnm);
                    OperatorDeployInfo.InputDeployInfo idi = getInputDeployInfo(odi, n3meta.getMeta(node3.inport1));
                    String id = n3meta.getName() + " " + idi.portName;
                    Assert.assertEquals("number stream codecs " + id, idi.streamCodecs.size(), 1);
                    checkPresentStreamCodec(n3meta, node3.inport1, idi.streamCodecs, id, plan);
                }
            } else {
                OperatorDeployInfo odi = getOperatorDeployInfo(operator, operator.getName(), dnm);
                Assert.assertEquals("unifier outputs " + operator.getName(), 1, operator.getOutputs().size());
                PTOperator.PTOutput out = operator.getOutputs().get(0);
                Assert.assertEquals("unifier sinks " + operator.getName(), 1, out.sinks.size());
                PTOperator.PTInput idInput = out.sinks.get(0);
                LogicalPlan.OperatorMeta idMeta = idInput.target.getOperatorMeta();
                Operator.InputPort<?> idInputPort = null;
                if (idMeta == n2meta) {
                    idInputPort = node2.inport1;
                } else if (idMeta == n3meta) {
                    idInputPort = node3.inport1;
                }
                List<OperatorDeployInfo.InputDeployInfo> idis = odi.inputs;
                for (OperatorDeployInfo.InputDeployInfo idi : idis) {
                    String id = operator.getName() + " " + idi.portName;
                    Assert.assertEquals("number stream codecs " + id, idi.streamCodecs.size(), 1);
                    checkPresentStreamCodec(idMeta, idInputPort, idi.streamCodecs, id, plan);
                }
            }
        }
    }
}
Also used : GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) Operator(com.datatorrent.api.Operator) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) StramTestSupport(com.datatorrent.stram.support.StramTestSupport) PTContainer(com.datatorrent.stram.plan.physical.PTContainer) PhysicalPlan(com.datatorrent.stram.plan.physical.PhysicalPlan) OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) Test(org.junit.Test)

Example 14 with Operator

use of com.datatorrent.api.Operator in project apex-core by apache.

the class StreamCodecTest method testInlineStreamCodec.

@Test
public void testInlineStreamCodec() {
    GenericTestOperator node1 = dag.addOperator("node1", GenericTestOperator.class);
    GenericTestOperator node2 = dag.addOperator("node2", GenericTestOperator.class);
    GenericTestOperator node3 = dag.addOperator("node3", GenericTestOperator.class);
    TestStreamCodec serDe = new TestStreamCodec();
    dag.setInputPortAttribute(node2.inport1, Context.PortContext.STREAM_CODEC, serDe);
    dag.setInputPortAttribute(node3.inport1, Context.PortContext.STREAM_CODEC, serDe);
    dag.addStream("n1n2n3", node1.outport1, node2.inport1, node3.inport1);
    // Relying on container max count for the manager to layout node1 and node3 in the
    // same container in inline fashion and node2 in a separate container
    dag.setAttribute(LogicalPlan.CONTAINERS_MAX_COUNT, 2);
    StramTestSupport.MemoryStorageAgent msa = new StramTestSupport.MemoryStorageAgent();
    dag.setAttribute(Context.OperatorContext.STORAGE_AGENT, msa);
    StreamingContainerManager dnm = new StreamingContainerManager(dag);
    PhysicalPlan plan = dnm.getPhysicalPlan();
    List<PTContainer> containers = plan.getContainers();
    Assert.assertEquals("number containers", 2, containers.size());
    for (int i = 0; i < containers.size(); ++i) {
        StreamingContainerManagerTest.assignContainer(dnm, "container" + (i + 1));
    }
    LogicalPlan.OperatorMeta n1meta = dag.getMeta(node1);
    LogicalPlan.OperatorMeta nonInlineMeta = null;
    for (int i = 0; i < containers.size(); ++i) {
        PTContainer container = containers.get(i);
        List<PTOperator> operators = container.getOperators();
        if (operators.size() == 1) {
            nonInlineMeta = operators.get(0).getOperatorMeta();
            break;
        }
    }
    Assert.assertNotNull("non inline operator meta is null", nonInlineMeta);
    GenericTestOperator nonInlineOperator = null;
    Operator.InputPort<?> niInputPort = null;
    if (nonInlineMeta.getName().equals("node2")) {
        nonInlineOperator = node2;
        niInputPort = node2.inport1;
    } else if (nonInlineMeta.getName().equals("node3")) {
        nonInlineOperator = node3;
        niInputPort = node3.inport1;
    }
    Assert.assertNotNull("non inline operator is null", nonInlineOperator);
    OperatorDeployInfo n1di = getSingleOperatorDeployInfo(node1, dnm);
    OperatorDeployInfo.OutputDeployInfo n1odi = getOutputDeployInfo(n1di, n1meta.getMeta(node1.outport1));
    String id = n1meta.getName() + " " + n1odi.portName;
    Assert.assertEquals("number stream codecs " + id, n1odi.streamCodecs.size(), 1);
    checkPresentStreamCodec(nonInlineMeta, niInputPort, n1odi.streamCodecs, id, plan);
    OperatorDeployInfo odi = getSingleOperatorDeployInfo(nonInlineOperator, dnm);
    OperatorDeployInfo.InputDeployInfo idi = getInputDeployInfo(odi, nonInlineMeta.getMeta(niInputPort));
    id = nonInlineMeta.getName() + " " + idi.portName;
    Assert.assertEquals("number stream codecs " + id, idi.streamCodecs.size(), 1);
    checkPresentStreamCodec(nonInlineMeta, niInputPort, idi.streamCodecs, id, plan);
/*
    OperatorDeployInfo n3di = getSingleOperatorDeployInfo(node3, node3.getName(), dnm);

    OperatorDeployInfo.InputDeployInfo n3idi = getInputDeployInfo(n3di, n3meta.getMeta(node3.inport1));
    id = n3meta.getName() + " " + n3idi.portName;
    Assert.assertEquals("number stream codecs " + id, n3idi.streamCodecs.size(), 1);
    streamIdentifier.operName = n3meta.getName();
    streamIdentifier.portName = n3meta.getMeta(node3.inport1).getPortName();
    checkStreamCodecInfo(n3idi.streamCodecs, id, streamIdentifier, serDe2);
    */
}
Also used : GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) Operator(com.datatorrent.api.Operator) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) PhysicalPlan(com.datatorrent.stram.plan.physical.PhysicalPlan) OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) GenericTestOperator(com.datatorrent.stram.engine.GenericTestOperator) StramTestSupport(com.datatorrent.stram.support.StramTestSupport) PTContainer(com.datatorrent.stram.plan.physical.PTContainer) LogicalPlan(com.datatorrent.stram.plan.logical.LogicalPlan) Test(org.junit.Test)

Example 15 with Operator

use of com.datatorrent.api.Operator in project beam by apache.

the class TranslationContext method populateDAG.

public void populateDAG(DAG dag) {
    for (Map.Entry<String, Operator> nameAndOperator : this.operators.entrySet()) {
        dag.addOperator(nameAndOperator.getKey(), nameAndOperator.getValue());
    }
    int streamIndex = 0;
    for (Map.Entry<PCollection, Pair<OutputPortInfo, List<InputPortInfo>>> streamEntry : this.streams.entrySet()) {
        List<InputPortInfo> destInfo = streamEntry.getValue().getRight();
        InputPort[] sinks = new InputPort[destInfo.size()];
        for (int i = 0; i < sinks.length; i++) {
            sinks[i] = destInfo.get(i).port;
        }
        if (sinks.length > 0) {
            DAG.StreamMeta streamMeta = dag.addStream("stream" + streamIndex++, streamEntry.getValue().getLeft().port, sinks);
            if (pipelineOptions.isParDoFusionEnabled()) {
                optimizeStreams(streamMeta, streamEntry);
            }
            for (InputPort port : sinks) {
                PCollection pc = streamEntry.getKey();
                Coder coder = pc.getCoder();
                if (pc.getWindowingStrategy() != null) {
                    coder = FullWindowedValueCoder.of(pc.getCoder(), pc.getWindowingStrategy().getWindowFn().windowCoder());
                }
                Coder<Object> wrapperCoder = ApexStreamTuple.ApexStreamTupleCoder.of(coder);
                CoderAdapterStreamCodec streamCodec = new CoderAdapterStreamCodec(wrapperCoder);
                dag.setInputPortAttribute(port, PortContext.STREAM_CODEC, streamCodec);
            }
        }
    }
}
Also used : Operator(com.datatorrent.api.Operator) Coder(org.apache.beam.sdk.coders.Coder) FullWindowedValueCoder(org.apache.beam.sdk.util.WindowedValue.FullWindowedValueCoder) InputPort(com.datatorrent.api.Operator.InputPort) DAG(com.datatorrent.api.DAG) CoderAdapterStreamCodec(org.apache.beam.runners.apex.translation.utils.CoderAdapterStreamCodec) PCollection(org.apache.beam.sdk.values.PCollection) HashMap(java.util.HashMap) Map(java.util.Map) Pair(org.apache.commons.lang3.tuple.Pair) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair)

Aggregations

Operator (com.datatorrent.api.Operator)26 GenericTestOperator (com.datatorrent.stram.engine.GenericTestOperator)8 Test (org.junit.Test)8 Checkpoint (com.datatorrent.stram.api.Checkpoint)7 PTOperator (com.datatorrent.stram.plan.physical.PTOperator)7 InputOperator (com.datatorrent.api.InputOperator)6 HashMap (java.util.HashMap)6 InputPort (com.datatorrent.api.Operator.InputPort)5 OperatorDeployInfo (com.datatorrent.stram.api.OperatorDeployInfo)5 PhysicalPlan (com.datatorrent.stram.plan.physical.PhysicalPlan)5 Map (java.util.Map)5 TestGeneratorInputOperator (com.datatorrent.stram.engine.TestGeneratorInputOperator)4 LogicalPlan (com.datatorrent.stram.plan.logical.LogicalPlan)4 OperatorMeta (com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta)4 PTContainer (com.datatorrent.stram.plan.physical.PTContainer)4 ArrayList (java.util.ArrayList)4 DAG (com.datatorrent.api.DAG)3 DefaultPartition (com.datatorrent.api.DefaultPartition)3 Partitioner (com.datatorrent.api.Partitioner)3 Partition (com.datatorrent.api.Partitioner.Partition)3