Search in sources :

Example 1 with ContainerHeartbeatResponse

use of com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse in project apex-core by apache.

the class MockContainer method sendHeartbeat.

public void sendHeartbeat() {
    ContainerStats cstats = new ContainerStats(sca.container.getExternalId());
    ContainerHeartbeat hb = new ContainerHeartbeat();
    hb.setContainerStats(cstats);
    for (Map.Entry<Integer, MockOperatorStats> oe : this.stats.entrySet()) {
        OperatorHeartbeat ohb = new OperatorHeartbeat();
        ohb.setNodeId(oe.getKey());
        ohb.setState(oe.getValue().deployState);
        OperatorStats lstats = new OperatorStats();
        lstats.checkpoint = new Checkpoint(oe.getValue().checkpointWindowId, 0, 0);
        lstats.windowId = oe.getValue().currentWindowId;
        //stats.outputPorts = Lists.newArrayList();
        //PortStats ps = new PortStats(TestGeneratorInputOperator.OUTPUT_PORT);
        //ps.bufferServerBytes = 101;
        //ps.tupleCount = 1;
        //stats.outputPorts.add(ps);
        ohb.windowStats = Lists.newArrayList(lstats);
        cstats.operators.add(ohb);
    }
    ContainerHeartbeatResponse chr = sca.dnmgr.processHeartbeat(hb);
    Assert.assertNull(chr.deployRequest);
}
Also used : Checkpoint(com.datatorrent.stram.api.Checkpoint) ContainerStats(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerStats) OperatorHeartbeat(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.OperatorHeartbeat) ContainerHeartbeatResponse(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse) OperatorStats(com.datatorrent.api.Stats.OperatorStats) Map(java.util.Map) ContainerHeartbeat(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeat)

Example 2 with ContainerHeartbeatResponse

use of com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse in project apex-core by apache.

the class MockContainer method deploy.

public void deploy() {
    Assert.assertNotNull(sca.container.getExternalId());
    Assert.assertEquals(PTContainer.State.ACTIVE, container.getState());
    //Assert.assertEquals(PTOperator.State.PENDING_DEPLOY, o1p1.getState());
    ContainerStats cstats = new ContainerStats(sca.container.getExternalId());
    ContainerHeartbeat hb = new ContainerHeartbeat();
    hb.setContainerStats(cstats);
    // get deploy request
    ContainerHeartbeatResponse chr = sca.dnmgr.processHeartbeat(hb);
    Assert.assertNotNull(chr.deployRequest);
    Assert.assertEquals("" + chr.deployRequest, container.getOperators().size(), chr.deployRequest.size());
    Assert.assertEquals(PTContainer.State.ACTIVE, container.getState());
    for (PTOperator oper : container.getOperators()) {
        Assert.assertEquals("state " + oper, PTOperator.State.PENDING_DEPLOY, oper.getState());
    }
}
Also used : ContainerStats(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerStats) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) ContainerHeartbeatResponse(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse) ContainerHeartbeat(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeat)

Example 3 with ContainerHeartbeatResponse

use of com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse in project apex-core by apache.

the class StramLocalCluster method run.

@Override
@SuppressWarnings({ "SleepWhileInLoop", "ResultOfObjectAllocationIgnored" })
public void run(long runMillis) {
    if (!perContainerBufferServer) {
        StreamingContainer.eventloop.start();
        bufferServer = new Server(StreamingContainer.eventloop, 0, 1024 * 1024, 8);
        try {
            bufferServer.setSpoolStorage(new DiskStorage());
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
        bufferServerAddress = InetSocketAddress.createUnresolved(LOCALHOST, bufferServer.run().getPort());
        LOG.info("Buffer server started: {}", bufferServerAddress);
    }
    long endMillis = System.currentTimeMillis() + runMillis;
    List<Thread> containerThreads = new LinkedList<>();
    while (!appDone) {
        for (String containerIdStr : dnmgr.containerStopRequests.values()) {
            // teardown child thread
            StreamingContainer c = childContainers.get(containerIdStr);
            if (c != null) {
                ContainerHeartbeatResponse r = new ContainerHeartbeatResponse();
                r.shutdown = StreamingContainerUmbilicalProtocol.ShutdownType.ABORT;
                c.processHeartbeatResponse(r);
            }
            dnmgr.containerStopRequests.remove(containerIdStr);
            LOG.info("Container {} restart.", containerIdStr);
            dnmgr.scheduleContainerRestart(containerIdStr);
        //dnmgr.removeContainerAgent(containerIdStr);
        }
        // start containers
        while (!dnmgr.containerStartRequests.isEmpty()) {
            ContainerStartRequest cdr = dnmgr.containerStartRequests.poll();
            if (cdr != null) {
                new LocalStreamingContainerLauncher(cdr, containerThreads);
            }
        }
        if (heartbeatMonitoringEnabled) {
            // monitor child containers
            dnmgr.monitorHeartbeat(false);
        }
        if (childContainers.isEmpty() && dnmgr.containerStartRequests.isEmpty()) {
            appDone = true;
        }
        if (runMillis > 0 && System.currentTimeMillis() > endMillis) {
            appDone = true;
        }
        try {
            if (exitCondition != null && exitCondition.call()) {
                appDone = true;
            }
        } catch (Exception ex) {
            break;
        }
        if (Thread.interrupted()) {
            break;
        }
        if (!appDone) {
            try {
                Thread.sleep(1000);
            } catch (InterruptedException e) {
                LOG.debug("Sleep interrupted", e);
                break;
            }
        }
    }
    for (LocalStreamingContainer lsc : childContainers.values()) {
        injectShutdown.put(lsc.getContainerId(), lsc);
        lsc.triggerHeartbeat();
    }
    for (Thread thread : containerThreads) {
        try {
            thread.join(1000);
        } catch (InterruptedException e) {
            LOG.debug("Sleep interrupted", e);
        }
        if (thread.isAlive()) {
            LOG.warn("Container thread {} didn't finish", thread.getName());
        }
    }
    dnmgr.teardown();
    LOG.info("Application finished.");
    if (!perContainerBufferServer) {
        bufferServer.stop();
        StreamingContainer.eventloop.stop();
    }
}
Also used : StreamingContainer(com.datatorrent.stram.engine.StreamingContainer) ContainerStartRequest(com.datatorrent.stram.StreamingContainerAgent.ContainerStartRequest) Server(com.datatorrent.bufferserver.server.Server) ContainerHeartbeatResponse(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse) IOException(java.io.IOException) LinkedList(java.util.LinkedList) IOException(java.io.IOException) DiskStorage(com.datatorrent.bufferserver.storage.DiskStorage)

Example 4 with ContainerHeartbeatResponse

use of com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse in project apex-core by apache.

the class StreamingContainerManagerTest method testDeployInfoSerialization.

@Test
public void testDeployInfoSerialization() throws Exception {
    OperatorDeployInfo ndi = new OperatorDeployInfo();
    ndi.name = "node1";
    ndi.type = OperatorDeployInfo.OperatorType.GENERIC;
    ndi.id = 1;
    ndi.contextAttributes = new com.datatorrent.api.Attribute.AttributeMap.DefaultAttributeMap();
    ndi.contextAttributes.put(OperatorContext.SPIN_MILLIS, 100);
    OperatorDeployInfo.InputDeployInfo input = new OperatorDeployInfo.InputDeployInfo();
    input.declaredStreamId = "streamToNode";
    input.portName = "inputPortNameOnNode";
    input.sourceNodeId = 99;
    ndi.inputs = new ArrayList<>();
    ndi.inputs.add(input);
    OperatorDeployInfo.OutputDeployInfo output = new OperatorDeployInfo.OutputDeployInfo();
    output.declaredStreamId = "streamFromNode";
    output.portName = "outputPortNameOnNode";
    ndi.outputs = new ArrayList<>();
    ndi.outputs.add(output);
    ContainerHeartbeatResponse scc = new ContainerHeartbeatResponse();
    scc.deployRequest = Collections.singletonList(ndi);
    DataOutputByteBuffer out = new DataOutputByteBuffer();
    scc.write(out);
    DataInputByteBuffer in = new DataInputByteBuffer();
    in.reset(out.getData());
    ContainerHeartbeatResponse clone = new ContainerHeartbeatResponse();
    clone.readFields(in);
    Assert.assertNotNull(clone.deployRequest);
    Assert.assertEquals(1, clone.deployRequest.size());
    OperatorDeployInfo ndiClone = clone.deployRequest.get(0);
    Assert.assertEquals("name", ndi.name, ndiClone.name);
    Assert.assertEquals("type", ndi.type, ndiClone.type);
    String nodeToString = ndi.toString();
    Assert.assertTrue(nodeToString.contains(input.portName));
    Assert.assertTrue(nodeToString.contains(output.portName));
    Assert.assertEquals("contextAttributes " + ndiClone.contextAttributes, Integer.valueOf(100), ndiClone.contextAttributes.get(OperatorContext.SPIN_MILLIS));
}
Also used : InputDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo.InputDeployInfo) OperatorDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo) DataInputByteBuffer(org.apache.hadoop.io.DataInputByteBuffer) InputDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo.InputDeployInfo) ContainerHeartbeatResponse(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse) DataOutputByteBuffer(org.apache.hadoop.io.DataOutputByteBuffer) OutputDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo.OutputDeployInfo) OutputDeployInfo(com.datatorrent.stram.api.OperatorDeployInfo.OutputDeployInfo) Test(org.junit.Test) PhysicalPlanTest(com.datatorrent.stram.plan.physical.PhysicalPlanTest)

Example 5 with ContainerHeartbeatResponse

use of com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse in project apex-core by apache.

the class StreamingContainerManager method processHeartbeat.

/**
   * process the heartbeat from each container.
   * called by the RPC thread for each container. (i.e. called by multiple threads)
   *
   * @param heartbeat
   * @return heartbeat response
   */
@SuppressWarnings("StatementWithEmptyBody")
public ContainerHeartbeatResponse processHeartbeat(ContainerHeartbeat heartbeat) {
    long currentTimeMillis = clock.getTime();
    final StreamingContainerAgent sca = this.containers.get(heartbeat.getContainerId());
    if (sca == null || sca.container.getState() == PTContainer.State.KILLED) {
        // could be orphaned container that was replaced and needs to terminate
        LOG.error("Unknown container {}", heartbeat.getContainerId());
        ContainerHeartbeatResponse response = new ContainerHeartbeatResponse();
        response.shutdown = ShutdownType.ABORT;
        return response;
    }
    //LOG.debug("{} {} {}", new Object[]{sca.container.containerId, sca.container.bufferServerAddress, sca.container.getState()});
    if (sca.container.getState() == PTContainer.State.ALLOCATED) {
        // capture dynamically assigned address from container
        if (sca.container.bufferServerAddress == null && heartbeat.bufferServerHost != null) {
            sca.container.bufferServerAddress = InetSocketAddress.createUnresolved(heartbeat.bufferServerHost, heartbeat.bufferServerPort);
            LOG.info("Container {} buffer server: {}", sca.container.getExternalId(), sca.container.bufferServerAddress);
        }
        final long containerStartTime = System.currentTimeMillis();
        sca.container.setState(PTContainer.State.ACTIVE);
        sca.container.setStartedTime(containerStartTime);
        sca.container.setFinishedTime(-1);
        sca.jvmName = heartbeat.jvmName;
        poolExecutor.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    containerFile.append(sca.getContainerInfo());
                } catch (IOException ex) {
                    LOG.warn("Cannot write to container file");
                }
                for (PTOperator ptOp : sca.container.getOperators()) {
                    try {
                        JSONObject operatorInfo = new JSONObject();
                        operatorInfo.put("name", ptOp.getName());
                        operatorInfo.put("id", ptOp.getId());
                        operatorInfo.put("container", sca.container.getExternalId());
                        operatorInfo.put("startTime", containerStartTime);
                        operatorFile.append(operatorInfo);
                    } catch (IOException | JSONException ex) {
                        LOG.warn("Cannot write to operator file: ", ex);
                    }
                }
            }
        });
    }
    sca.containerStackTrace = heartbeat.stackTrace;
    if (heartbeat.restartRequested) {
        LOG.error("Container {} restart request", sca.container.getExternalId());
        containerStopRequests.put(sca.container.getExternalId(), sca.container.getExternalId());
    }
    sca.memoryMBFree = heartbeat.memoryMBFree;
    sca.gcCollectionCount = heartbeat.gcCollectionCount;
    sca.gcCollectionTime = heartbeat.gcCollectionTime;
    sca.undeployOpers.clear();
    sca.deployOpers.clear();
    if (!this.deployChangeInProgress.get()) {
        sca.deployCnt = this.deployChangeCnt;
    }
    Set<Integer> reportedOperators = Sets.newHashSetWithExpectedSize(sca.container.getOperators().size());
    for (OperatorHeartbeat shb : heartbeat.getContainerStats().operators) {
        long maxEndWindowTimestamp = 0;
        reportedOperators.add(shb.nodeId);
        PTOperator oper = this.plan.getAllOperators().get(shb.getNodeId());
        if (oper == null) {
            LOG.info("Heartbeat for unknown operator {} (container {})", shb.getNodeId(), heartbeat.getContainerId());
            sca.undeployOpers.add(shb.nodeId);
            continue;
        }
        if (shb.requestResponse != null) {
            for (StatsListener.OperatorResponse obj : shb.requestResponse) {
                if (obj instanceof OperatorResponse) {
                    // This is to identify platform requests
                    commandResponse.put((Long) obj.getResponseId(), obj.getResponse());
                    LOG.debug(" Got back the response {} for the request {}", obj, obj.getResponseId());
                } else {
                    // This is to identify user requests
                    oper.stats.responses.add(obj);
                }
            }
        }
        //LOG.debug("heartbeat {} {}/{} {}", oper, oper.getState(), shb.getState(), oper.getContainer().getExternalId());
        if (!(oper.getState() == PTOperator.State.ACTIVE && shb.getState() == OperatorHeartbeat.DeployState.ACTIVE)) {
            // deploy state may require synchronization
            processOperatorDeployStatus(oper, shb, sca);
        }
        oper.stats.lastHeartbeat = shb;
        List<ContainerStats.OperatorStats> statsList = shb.getOperatorStatsContainer();
        if (!statsList.isEmpty()) {
            long tuplesProcessed = 0;
            long tuplesEmitted = 0;
            long totalCpuTimeUsed = 0;
            int statCount = 0;
            long maxDequeueTimestamp = -1;
            oper.stats.recordingId = null;
            final OperatorStatus status = oper.stats;
            status.statsRevs.checkout();
            for (Map.Entry<String, PortStatus> entry : status.inputPortStatusList.entrySet()) {
                entry.getValue().recordingId = null;
            }
            for (Map.Entry<String, PortStatus> entry : status.outputPortStatusList.entrySet()) {
                entry.getValue().recordingId = null;
            }
            for (ContainerStats.OperatorStats stats : statsList) {
                if (stats == null) {
                    LOG.warn("Operator {} statistics list contains null element", shb.getNodeId());
                    continue;
                }
                /* report checkpoint-ed WindowId status of the operator */
                if (stats.checkpoint instanceof Checkpoint) {
                    if (oper.getRecentCheckpoint() == null || oper.getRecentCheckpoint().windowId < stats.checkpoint.getWindowId()) {
                        addCheckpoint(oper, (Checkpoint) stats.checkpoint);
                        if (stats.checkpointStats != null) {
                            status.checkpointStats = stats.checkpointStats;
                            status.checkpointTimeMA.add(stats.checkpointStats.checkpointTime);
                        }
                        oper.failureCount = 0;
                    }
                }
                oper.stats.recordingId = stats.recordingId;
                /* report all the other stuff */
                // calculate the stats related to end window
                // end window stats for a particular window id for a particular node
                EndWindowStats endWindowStats = new EndWindowStats();
                Collection<ContainerStats.OperatorStats.PortStats> ports = stats.inputPorts;
                if (ports != null) {
                    Set<String> currentInputPortSet = Sets.newHashSetWithExpectedSize(ports.size());
                    for (ContainerStats.OperatorStats.PortStats s : ports) {
                        currentInputPortSet.add(s.id);
                        PortStatus ps = status.inputPortStatusList.get(s.id);
                        if (ps == null) {
                            ps = status.new PortStatus();
                            ps.portName = s.id;
                            status.inputPortStatusList.put(s.id, ps);
                        }
                        ps.totalTuples += s.tupleCount;
                        ps.recordingId = s.recordingId;
                        tuplesProcessed += s.tupleCount;
                        endWindowStats.dequeueTimestamps.put(s.id, s.endWindowTimestamp);
                        Pair<Integer, String> operatorPortName = new Pair<>(oper.getId(), s.id);
                        Long lastEndWindowTimestamp = operatorPortLastEndWindowTimestamps.get(operatorPortName);
                        if (lastEndWindowTimestamp == null) {
                            lastEndWindowTimestamp = lastStatsTimestamp;
                        }
                        long portElapsedMillis = Math.max(s.endWindowTimestamp - lastEndWindowTimestamp, 0);
                        //LOG.debug("=== PROCESSED TUPLE COUNT for {}: {}, {}, {}, {}", operatorPortName, s.tupleCount, portElapsedMillis, operatorPortLastEndWindowTimestamps.get(operatorPortName), lastStatsTimestamp);
                        ps.tuplesPMSMA.add(s.tupleCount, portElapsedMillis);
                        ps.bufferServerBytesPMSMA.add(s.bufferServerBytes, portElapsedMillis);
                        ps.queueSizeMA.add(s.queueSize);
                        operatorPortLastEndWindowTimestamps.put(operatorPortName, s.endWindowTimestamp);
                        if (maxEndWindowTimestamp < s.endWindowTimestamp) {
                            maxEndWindowTimestamp = s.endWindowTimestamp;
                        }
                        if (s.endWindowTimestamp > maxDequeueTimestamp) {
                            maxDequeueTimestamp = s.endWindowTimestamp;
                        }
                    }
                    // need to remove dead ports, for unifiers
                    Iterator<Map.Entry<String, PortStatus>> it = status.inputPortStatusList.entrySet().iterator();
                    while (it.hasNext()) {
                        Map.Entry<String, PortStatus> entry = it.next();
                        if (!currentInputPortSet.contains(entry.getKey())) {
                            it.remove();
                        }
                    }
                }
                ports = stats.outputPorts;
                if (ports != null) {
                    Set<String> currentOutputPortSet = Sets.newHashSetWithExpectedSize(ports.size());
                    for (ContainerStats.OperatorStats.PortStats s : ports) {
                        currentOutputPortSet.add(s.id);
                        PortStatus ps = status.outputPortStatusList.get(s.id);
                        if (ps == null) {
                            ps = status.new PortStatus();
                            ps.portName = s.id;
                            status.outputPortStatusList.put(s.id, ps);
                        }
                        ps.totalTuples += s.tupleCount;
                        ps.recordingId = s.recordingId;
                        tuplesEmitted += s.tupleCount;
                        Pair<Integer, String> operatorPortName = new Pair<>(oper.getId(), s.id);
                        Long lastEndWindowTimestamp = operatorPortLastEndWindowTimestamps.get(operatorPortName);
                        if (lastEndWindowTimestamp == null) {
                            lastEndWindowTimestamp = lastStatsTimestamp;
                        }
                        long portElapsedMillis = Math.max(s.endWindowTimestamp - lastEndWindowTimestamp, 0);
                        //LOG.debug("=== EMITTED TUPLE COUNT for {}: {}, {}, {}, {}", operatorPortName, s.tupleCount, portElapsedMillis, operatorPortLastEndWindowTimestamps.get(operatorPortName), lastStatsTimestamp);
                        ps.tuplesPMSMA.add(s.tupleCount, portElapsedMillis);
                        ps.bufferServerBytesPMSMA.add(s.bufferServerBytes, portElapsedMillis);
                        operatorPortLastEndWindowTimestamps.put(operatorPortName, s.endWindowTimestamp);
                        if (maxEndWindowTimestamp < s.endWindowTimestamp) {
                            maxEndWindowTimestamp = s.endWindowTimestamp;
                        }
                    }
                    if (ports.size() > 0) {
                        endWindowStats.emitTimestamp = ports.iterator().next().endWindowTimestamp;
                    }
                    // need to remove dead ports, for unifiers
                    Iterator<Map.Entry<String, PortStatus>> it = status.outputPortStatusList.entrySet().iterator();
                    while (it.hasNext()) {
                        Map.Entry<String, PortStatus> entry = it.next();
                        if (!currentOutputPortSet.contains(entry.getKey())) {
                            it.remove();
                        }
                    }
                }
                // (we don't know the latency for output operators because they don't emit tuples)
                if (endWindowStats.emitTimestamp < 0) {
                    endWindowStats.emitTimestamp = maxDequeueTimestamp;
                }
                if (status.currentWindowId.get() != stats.windowId) {
                    status.lastWindowIdChangeTms = currentTimeMillis;
                    status.currentWindowId.set(stats.windowId);
                }
                totalCpuTimeUsed += stats.cpuTimeUsed;
                statCount++;
                if (oper.getOperatorMeta().getValue(OperatorContext.COUNTERS_AGGREGATOR) != null) {
                    endWindowStats.counters = stats.counters;
                }
                if (oper.getOperatorMeta().getMetricAggregatorMeta() != null && oper.getOperatorMeta().getMetricAggregatorMeta().getAggregator() != null) {
                    endWindowStats.metrics = stats.metrics;
                }
                if (stats.windowId > currentEndWindowStatsWindowId) {
                    Map<Integer, EndWindowStats> endWindowStatsMap = endWindowStatsOperatorMap.get(stats.windowId);
                    if (endWindowStatsMap == null) {
                        endWindowStatsMap = new ConcurrentSkipListMap<>();
                        Map<Integer, EndWindowStats> endWindowStatsMapPrevious = endWindowStatsOperatorMap.putIfAbsent(stats.windowId, endWindowStatsMap);
                        if (endWindowStatsMapPrevious != null) {
                            endWindowStatsMap = endWindowStatsMapPrevious;
                        }
                    }
                    endWindowStatsMap.put(shb.getNodeId(), endWindowStats);
                    Set<Integer> allCurrentOperators = plan.getAllOperators().keySet();
                    int numOperators = plan.getAllOperators().size();
                    if (allCurrentOperators.containsAll(endWindowStatsMap.keySet()) && endWindowStatsMap.size() == numOperators) {
                        completeEndWindowStatsWindowId = stats.windowId;
                    }
                }
            }
            status.totalTuplesProcessed.add(tuplesProcessed);
            status.totalTuplesEmitted.add(tuplesEmitted);
            OperatorMeta logicalOperator = oper.getOperatorMeta();
            LogicalOperatorStatus logicalStatus = logicalOperator.getStatus();
            if (!oper.isUnifier()) {
                logicalStatus.totalTuplesProcessed += tuplesProcessed;
                logicalStatus.totalTuplesEmitted += tuplesEmitted;
            }
            long lastMaxEndWindowTimestamp = operatorLastEndWindowTimestamps.containsKey(oper.getId()) ? operatorLastEndWindowTimestamps.get(oper.getId()) : lastStatsTimestamp;
            if (maxEndWindowTimestamp >= lastMaxEndWindowTimestamp) {
                double tuplesProcessedPMSMA = 0.0;
                double tuplesEmittedPMSMA = 0.0;
                if (statCount != 0) {
                    //LOG.debug("CPU for {}: {} / {} - {}", oper.getId(), totalCpuTimeUsed, maxEndWindowTimestamp, lastMaxEndWindowTimestamp);
                    status.cpuNanosPMSMA.add(totalCpuTimeUsed, maxEndWindowTimestamp - lastMaxEndWindowTimestamp);
                }
                for (PortStatus ps : status.inputPortStatusList.values()) {
                    tuplesProcessedPMSMA += ps.tuplesPMSMA.getAvg();
                }
                for (PortStatus ps : status.outputPortStatusList.values()) {
                    tuplesEmittedPMSMA += ps.tuplesPMSMA.getAvg();
                }
                status.tuplesProcessedPSMA.set(Math.round(tuplesProcessedPMSMA * 1000));
                status.tuplesEmittedPSMA.set(Math.round(tuplesEmittedPMSMA * 1000));
            } else {
            //LOG.warn("This timestamp for {} is lower than the previous!! {} < {}", oper.getId(),
            // maxEndWindowTimestamp, lastMaxEndWindowTimestamp);
            }
            operatorLastEndWindowTimestamps.put(oper.getId(), maxEndWindowTimestamp);
            status.listenerStats.add(statsList);
            this.reportStats.put(oper, oper);
            status.statsRevs.commit();
        }
        if (lastStatsTimestamp < maxEndWindowTimestamp) {
            lastStatsTimestamp = maxEndWindowTimestamp;
        }
    }
    sca.lastHeartbeatMillis = currentTimeMillis;
    for (PTOperator oper : sca.container.getOperators()) {
        if (!reportedOperators.contains(oper.getId())) {
            processOperatorDeployStatus(oper, null, sca);
        }
    }
    ContainerHeartbeatResponse rsp = getHeartbeatResponse(sca);
    if (heartbeat.getContainerStats().operators.isEmpty() && isApplicationIdle()) {
        LOG.info("requesting idle shutdown for container {}", heartbeat.getContainerId());
        rsp.shutdown = ShutdownType.ABORT;
    } else {
        if (sca.isShutdownRequested()) {
            LOG.info("requesting shutdown for container {}", heartbeat.getContainerId());
            rsp.shutdown = sca.shutdownRequest;
        }
    }
    List<StramToNodeRequest> requests = rsp.nodeRequests != null ? rsp.nodeRequests : new ArrayList<StramToNodeRequest>();
    ConcurrentLinkedQueue<StramToNodeRequest> operatorRequests = sca.getOperatorRequests();
    while (true) {
        StramToNodeRequest r = operatorRequests.poll();
        if (r == null) {
            break;
        }
        requests.add(r);
    }
    rsp.nodeRequests = requests;
    rsp.committedWindowId = committedWindowId;
    rsp.stackTraceRequired = sca.stackTraceRequested;
    sca.stackTraceRequested = false;
    apexPluginDispatcher.dispatch(new DAGExecutionEvent.HeartbeatExecutionEvent(heartbeat));
    return rsp;
}
Also used : StramToNodeRequest(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.StramToNodeRequest) OperatorResponse(com.datatorrent.stram.engine.OperatorResponse) OperatorStats(com.datatorrent.api.Stats.OperatorStats) PortStatus(com.datatorrent.stram.plan.physical.OperatorStatus.PortStatus) DAGExecutionEvent(org.apache.apex.engine.api.plugin.DAGExecutionEvent) LogicalOperatorStatus(com.datatorrent.stram.plan.logical.LogicalOperatorStatus) Pair(com.datatorrent.common.util.Pair) PortContextPair(com.datatorrent.stram.plan.logical.Operators.PortContextPair) ContainerStats(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerStats) PTOperator(com.datatorrent.stram.plan.physical.PTOperator) OperatorMeta(com.datatorrent.stram.plan.logical.LogicalPlan.OperatorMeta) OperatorHeartbeat(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.OperatorHeartbeat) ContainerHeartbeatResponse(com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse) IOException(java.io.IOException) StatsListener(com.datatorrent.api.StatsListener) Checkpoint(com.datatorrent.stram.api.Checkpoint) Checkpoint(com.datatorrent.stram.api.Checkpoint) JSONObject(org.codehaus.jettison.json.JSONObject) OperatorStatus(com.datatorrent.stram.plan.physical.OperatorStatus) LogicalOperatorStatus(com.datatorrent.stram.plan.logical.LogicalOperatorStatus) MutableLong(org.apache.commons.lang3.mutable.MutableLong) MovingAverageLong(com.datatorrent.stram.util.MovingAverage.MovingAverageLong) AtomicLong(java.util.concurrent.atomic.AtomicLong) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) ConcurrentSkipListMap(java.util.concurrent.ConcurrentSkipListMap)

Aggregations

ContainerHeartbeatResponse (com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeatResponse)8 ContainerStats (com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerStats)5 Checkpoint (com.datatorrent.stram.api.Checkpoint)4 ContainerHeartbeat (com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.ContainerHeartbeat)4 OperatorHeartbeat (com.datatorrent.stram.api.StreamingContainerUmbilicalProtocol.OperatorHeartbeat)4 PTOperator (com.datatorrent.stram.plan.physical.PTOperator)4 OperatorStats (com.datatorrent.api.Stats.OperatorStats)3 Map (java.util.Map)3 StatsListener (com.datatorrent.api.StatsListener)2 OperatorDeployInfo (com.datatorrent.stram.api.OperatorDeployInfo)2 PortStatus (com.datatorrent.stram.plan.physical.OperatorStatus.PortStatus)2 PhysicalPlanTest (com.datatorrent.stram.plan.physical.PhysicalPlanTest)2 IOException (java.io.IOException)2 HashMap (java.util.HashMap)2 LinkedHashMap (java.util.LinkedHashMap)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2 PortStats (com.datatorrent.api.Stats.OperatorStats.PortStats)1 Server (com.datatorrent.bufferserver.server.Server)1 DiskStorage (com.datatorrent.bufferserver.storage.DiskStorage)1 Pair (com.datatorrent.common.util.Pair)1