use of org.knime.core.node.workflow.Workflow.NodeAndInports in project knime-core by knime.
the class WorkflowManager method restartLoop.
/**
* Restart execution of a loop if possible. Can delay restart if we are still waiting for some node in the loop body
* (or any dangling loop branches) to finish execution
*
* @param slc FlowLoopContext of the actual loop
*/
private void restartLoop(final FlowLoopContext slc) throws IllegalLoopException {
assert m_workflowLock.isHeldByCurrentThread();
NodeContainer tailNode = m_workflow.getNode(slc.getTailNode());
NodeContainer headNode = m_workflow.getNode(slc.getOwner());
if ((tailNode == null) || (headNode == null)) {
throw new IllegalLoopException("Loop Nodes must both be in the same workflow!");
}
if (!(tailNode instanceof NativeNodeContainer) || !(headNode instanceof NativeNodeContainer)) {
throw new IllegalLoopException("Loop Nodes must both be NativeNodeContainers!");
}
// (1) find all intermediate node, the loop's "body"
ArrayList<NodeAndInports> loopBodyNodes = m_workflow.findAllNodesConnectedToLoopBody(headNode.getID(), tailNode.getID());
// marking/queuing those nodes already in doAfterExecute to fix bug 2292!)
for (NodeAndInports nai : loopBodyNodes) {
NodeID id = nai.getID();
NodeContainer currNode = m_workflow.getNode(id);
if (currNode.getInternalState().isExecutionInProgress()) {
// stop right here - loop cannot yet be restarted!
currNode.addWaitingLoop(slc);
return;
}
}
// (3) mark the origin of the loop to be executed again
// do this now so that we have an executing node in this WFM
// and an intermediate state does not suggest everything is done.
// (this used to happen before (9))
// NOTE: if we ever queue nodes asynchronosly this might cause problems.
NativeNodeContainer headNNC = ((NativeNodeContainer) headNode);
assert headNNC.isModelCompatibleTo(LoopStartNode.class);
headNNC.markForReExecution(new ExecutionEnvironment(true, null, false));
// clean up all newly added objects on FlowVariable Stack
// (otherwise we will push the same variables many times...
// push ISLC back onto the stack is done in doBeforeExecute()!
final FlowObjectStack headOutgoingStack = headNNC.getOutgoingFlowObjectStack();
headOutgoingStack.pop(InnerFlowLoopExecuteMarker.class);
FlowLoopContext flc = headOutgoingStack.peek(FlowLoopContext.class);
assert !flc.isInactiveScope();
flc.incrementIterationIndex();
// (4-7) reset/configure loop body - or not...
if (headNNC.resetAndConfigureLoopBody()) {
// (4a) reset the nodes in the body (only those -
// make sure end of loop is NOT reset). Make sure reset()
// is performed in the correct order (last nodes first!)
ListIterator<NodeAndInports> li = loopBodyNodes.listIterator(loopBodyNodes.size());
while (li.hasPrevious()) {
NodeAndInports nai = li.previous();
NodeID id = nai.getID();
NodeContainer nc = m_workflow.getNode(id);
if (nc == null) {
throw new IllegalLoopException("Node in loop body not in same workflow as head&tail!");
} else if (!nc.isResetable()) {
// do not warn - this can actually happen if we (try to) enter a metanode with two inports twice.
continue;
}
if (nc instanceof SingleNodeContainer) {
invokeResetOnSingleNodeContainer((SingleNodeContainer) nc);
} else {
assert nc instanceof WorkflowManager;
// only reset the nodes connected to relevant ports.
// See also bug 2225
((WorkflowManager) nc).resetNodesInWFMConnectedToInPorts(nai.getInports());
}
}
// clean outports of start but do not call reset
headNNC.cleanOutPorts(true);
// (5a) configure the nodes from start to rest (it's not
// so important if we configure more than the body)
// do NOT configure start of loop because otherwise
// we will re-create the FlowObjectStack and
// remove the loop-object as well!
configureNodeAndPortSuccessors(headNode.getID(), null, false, true, false);
// loop body nodes do NOT affect the state of the tailNode.)
if (tailNode.getInternalState().equals(CONFIGURED_MARKEDFOREXEC)) {
// (6a) ... we enable the body to be queued again.
for (NodeAndInports nai : loopBodyNodes) {
NodeID id = nai.getID();
NodeContainer nc = m_workflow.getNode(id);
if (nc instanceof SingleNodeContainer) {
// make sure it's not already done...
if (nc.getInternalState().equals(IDLE) || nc.getInternalState().equals(CONFIGURED)) {
((SingleNodeContainer) nc).markForExecution(true);
}
} else {
// Mark only idle or configured nodes for re-execution
// which are part of the flow.
((WorkflowManager) nc).markForExecutionNodesInWFMConnectedToInPorts(nai.getInports(), false);
}
}
// // and (7a) mark end of loop for re-execution
// not needed anymore: end-of-loop state _is_ MARKEDFOREXEC!
// ((SingleNodeContainer)tailNode).markForExecution(true);
} else {
// configure of tailNode failed! Abort execution of loop:
// unqueue head node
headNNC.markForExecution(false);
// and bail:
throw new IllegalLoopException("Loop end node could not be executed." + " This is likely due to a failure in the loop's body. Aborting Loop execution.");
}
} else {
// (4b-5b) skip reset/configure... just clean outports
ListIterator<NodeAndInports> li = loopBodyNodes.listIterator(loopBodyNodes.size());
while (li.hasPrevious()) {
NodeAndInports nai = li.previous();
NodeID id = nai.getID();
NodeContainer nc = m_workflow.getNode(id);
if (nc == null) {
throw new IllegalLoopException("Node in loop body not in same workflow as head&tail!");
}
if (nc instanceof SingleNodeContainer) {
((SingleNodeContainer) nc).cleanOutPorts(true);
} else {
WorkflowManager wm = (WorkflowManager) nc;
wm.cleanOutputPortsInWFMConnectedToInPorts(nai.getInports());
}
}
// clean outports of start but do not call reset
headNNC.cleanOutPorts(true);
// (6b) ...only re-"mark" loop body (tail is already marked)
for (NodeAndInports nai : loopBodyNodes) {
NodeID id = nai.getID();
NodeContainer nc = m_workflow.getNode(id);
if (nc instanceof SingleNodeContainer) {
// make sure it's not already done...
if (nc.getInternalState().equals(EXECUTED)) {
((SingleNodeContainer) nc).markForReExecution(new ExecutionEnvironment(false, null, false));
}
} else {
// Mark executed nodes for re-execution (will also mark
// queuded and idle nodes but those don't exist)
((WorkflowManager) nc).markForExecutionNodesInWFMConnectedToInPorts(nai.getInports(), true);
}
}
// see above - state is ok
assert tailNode.getInternalState().equals(CONFIGURED_MARKEDFOREXEC);
}
// (8) allow access to tail node
((NativeNodeContainer) headNode).getNode().setLoopEndNode(((NativeNodeContainer) tailNode).getNode());
// (9) and finally try to queue the head of this loop!
assert headNode.getInternalState().equals(EXECUTED_MARKEDFOREXEC);
queueIfQueuable(headNode);
}
use of org.knime.core.node.workflow.Workflow.NodeAndInports in project knime-core by knime.
the class WorkflowManager method resetNodesInWFMConnectedToInPorts.
/**
* Reset all nodes in this workflow that are connected to the given inports. The reset is performed in the correct
* order, that is last nodes are reset first. Note that this routine will NOT trigger any resets connected to
* possible outports of this WFM.
*
* @param inPorts set of port indices of the WFM.
*/
void resetNodesInWFMConnectedToInPorts(final Set<Integer> inPorts) {
try (WorkflowLock lock = lock()) {
if (!isResetable()) {
// only attempt to do this if possible.
return;
}
// first make sure we clean up indirectly affected
// loop start nodes inside this WFM
resetAndConfigureAffectedLoopContext(this.getID(), inPorts);
// now find all nodes that are directly affected:
ArrayList<NodeAndInports> nodes = m_workflow.findAllConnectedNodes(inPorts);
ListIterator<NodeAndInports> li = nodes.listIterator(nodes.size());
while (li.hasPrevious()) {
NodeAndInports nai = li.previous();
NodeContainer nc = m_workflow.getNode(nai.getID());
if (nc.isResetable()) {
if (nc instanceof SingleNodeContainer) {
// reset node
invokeResetOnSingleNodeContainer((SingleNodeContainer) nc);
} else {
assert nc instanceof WorkflowManager;
((WorkflowManager) nc).resetNodesInWFMConnectedToInPorts(nai.getInports());
}
}
}
lock.queueCheckForNodeStateChangeNotification(false);
}
}
use of org.knime.core.node.workflow.Workflow.NodeAndInports in project knime-core by knime.
the class WorkflowManager method markForExecutionNodesInWFMConnectedToInPorts.
/**
* Mark all nodes in this workflow that are connected to the given inports. Note that this routine will NOT trigger
* any actions connected to possible outports of this WFM.
*
* @param inPorts set of port indices of the WFM.
* @param markExecutedNodes if true also (re)mark executed nodes.
*/
void markForExecutionNodesInWFMConnectedToInPorts(final Set<Integer> inPorts, final boolean markExecutedNodes) {
try (WorkflowLock lock = lock()) {
// will be true in case of state changes
boolean changed = false;
ArrayList<NodeAndInports> nodes = m_workflow.findAllConnectedNodes(inPorts);
for (NodeAndInports nai : nodes) {
NodeContainer nc = m_workflow.getNode(nai.getID());
if (nc instanceof SingleNodeContainer) {
SingleNodeContainer snc = (SingleNodeContainer) nc;
switch(nc.getInternalState()) {
case CONFIGURED:
case IDLE:
changed = true;
snc.markForExecution(true);
break;
case EXECUTED:
if (markExecutedNodes) {
// in case of loop bodies that asked not to be reset.
changed = true;
snc.markForReExecution(new ExecutionEnvironment(true, null, false));
break;
}
default:
}
} else {
WorkflowManager wfm = ((WorkflowManager) nc);
assert nc instanceof WorkflowManager;
// does not need to set "changed" flag here as child
// will propagate state changes by calling
// call queueCheckForNodeStateChangeNotification (likely too often)
wfm.markForExecutionNodesInWFMConnectedToInPorts(nai.getInports(), markExecutedNodes);
}
}
if (changed) {
lock.queueCheckForNodeStateChangeNotification(true);
}
}
}
use of org.knime.core.node.workflow.Workflow.NodeAndInports in project knime-core by knime.
the class WorkflowManager method cleanOutputPortsInWFMConnectedToInPorts.
/**
* Clean outports of nodes connected to set of input ports. Used while restarting the loop, whereby the loop body is
* not to be reset (special option in start nodes). Clearing is done in correct order: downstream nodes first.
*
* @param inPorts set of port indices of the WFM.
*/
void cleanOutputPortsInWFMConnectedToInPorts(final Set<Integer> inPorts) {
try (WorkflowLock lock = lock()) {
ArrayList<NodeAndInports> nodes = m_workflow.findAllConnectedNodes(inPorts);
ListIterator<NodeAndInports> li = nodes.listIterator(nodes.size());
while (li.hasPrevious()) {
NodeAndInports nai = li.previous();
NodeContainer nc = m_workflow.getNode(nai.getID());
if (nc.isResetable()) {
if (nc instanceof SingleNodeContainer) {
((SingleNodeContainer) nc).cleanOutPorts(true);
} else {
assert nc instanceof WorkflowManager;
((WorkflowManager) nc).cleanOutputPortsInWFMConnectedToInPorts(nai.getInports());
}
}
}
}
}
use of org.knime.core.node.workflow.Workflow.NodeAndInports in project knime-core by knime.
the class WorkflowManager method doAfterExecution.
/**
* Cleanup a node after execution. This will also permit the argument node to change its state in
* {@link NodeContainer#performStateTransitionEXECUTED(NodeContainerExecutionStatus)}. This method also takes care
* of restarting loops, if there are any to be continued.
*
* <p>
* As in {@link #doBeforeExecution(NodeContainer)} the argument node is usually a {@link SingleNodeContainer} but
* can also be a remotely executed <code>WorkflowManager</code>.
*
* @param nc node which just finished execution
* @param status indicates if node execution was finished successfully (note that this does not imply State=EXECUTED
* e.g. for loop ends)
*/
void doAfterExecution(final NodeContainer nc, final NodeContainerExecutionStatus status) {
assert isLocalWFM() : "doAfterExecute not allowed for remotely executing workflows";
assert !nc.getID().equals(this.getID());
boolean success = status.isSuccess();
try (WorkflowLock lock = lock()) {
nc.getNodeTimer().endExec(success);
String st = success ? " - success" : " - failure";
LOGGER.debug(nc.getNameWithID() + " doAfterExecute" + st);
if (!success) {
disableNodeForExecution(nc.getID());
}
// switch state from POSTEXECUTE to new state: EXECUTED resp. CONFIGURED
// in case of success (w/out resp. with loop) or IDLE in case of an error.
nc.performStateTransitionEXECUTED(status);
boolean canConfigureSuccessors = true;
// remember previous message in case loop restart fails...
NodeMessage latestNodeMessage = nc.getNodeMessage();
if (nc instanceof NativeNodeContainer) {
NativeNodeContainer nnc = (NativeNodeContainer) nc;
if (success) {
Node node = nnc.getNode();
// process start of bundle of parallel chunks
if (node.getNodeModel() instanceof LoopStartParallelizeNode && !node.isInactive()) {
try {
parallelizeLoop(nc.getID());
} catch (Exception e) {
if (!(e instanceof IllegalLoopException)) {
// handle unexpected exceptions properly (i.e.
// clean up loop) but report them as error!
LOGGER.error("Error in parallelizeLoop: " + e, e);
} else {
// can happen during regular use
// (e.g. wrong end node)
LOGGER.debug("parallelizeLoop failed: " + e, e);
}
// make sure the start node is reset and
// and appropriate message is set.
latestNodeMessage = new NodeMessage(NodeMessage.Type.ERROR, "Parallel Branch Start Failure: " + e.getMessage());
LOGGER.error(latestNodeMessage.getMessage(), e);
success = false;
canConfigureSuccessors = false;
disableNodeForExecution(nc.getID());
resetAndConfigureNode(nc.getID());
}
}
// process loop context for "real" nodes:
if (nnc.isModelCompatibleTo(LoopStartNode.class)) {
// if this was BEGIN, it's not anymore (until we do not restart it explicitly!)
node.setLoopEndNode(null);
}
if (nnc.isModelCompatibleTo(LoopEndNode.class)) {
// make sure entire loop body is executed. Trigger execution of rest if not.
// (note that we do not worry about waiting for executing dangling branches, for those
// we only need to wait when the loop is about to be restarted!)
ArrayList<NodeAndInports> loopBodyNodes = new ArrayList<NodeAndInports>();
try {
NodeID endID = nnc.getID();
NodeID startID = m_workflow.getMatchingLoopStart(endID);
loopBodyNodes = m_workflow.findAllNodesConnectedToLoopBody(startID, endID);
} catch (IllegalLoopException ile) {
// loop is incorrectly wired. We cannot restart potentially dangling branches
latestNodeMessage = new NodeMessage(NodeMessage.Type.ERROR, "Loop Body wired incorrectly (" + ile.getMessage() + ").");
LOGGER.error(latestNodeMessage.getMessage(), ile);
success = false;
}
// check if any of those nodes can still be executed (configured but not yet executing)
for (NodeAndInports nai : loopBodyNodes) {
NodeID id = nai.getID();
NodeContainer currNode = m_workflow.getNode(id);
if (!currNode.getInternalState().equals(EXECUTED)) {
// after this first simple & light-weight test we test true executability:
if (this.canExecuteNodeDirectly(id)) {
// Fixes Bug 2292 (dangling branches were not executed in 1-iteration loops)
if (currNode instanceof WorkflowManager) {
// FIXME: also here we need to execute...?
} else {
assert currNode instanceof SingleNodeContainer;
this.markAndQueueNodeAndPredecessors(id, -1);
}
}
}
}
}
if (success && node.getLoopContext() != null) {
// we are supposed to execute this loop again.
assert nnc.isModelCompatibleTo(LoopEndNode.class);
FlowLoopContext slc = node.getLoopContext();
// then check if the loop is properly configured:
if (m_workflow.getNode(slc.getHeadNode()) == null) {
// obviously not: origin of loop is not in this WFM!
assert false : "Inconsistent loops should be caught earlier.";
// nothing else to do: NC returns to being configured
if (!InternalNodeContainerState.CONFIGURED_MARKEDFOREXEC.equals(nnc.getInternalState())) {
nnc.markForExecution(false);
}
latestNodeMessage = new NodeMessage(NodeMessage.Type.ERROR, "Loop nodes are not in the same workflow!");
LOGGER.error(latestNodeMessage.getMessage());
success = false;
} else {
try {
slc.setTailNode(nc.getID());
if (!nnc.getNode().getPauseLoopExecution()) {
restartLoop(slc);
} else {
// do nothing - leave successors marked. Cancel execution to stop paused loop.
}
} catch (IllegalLoopException ile) {
LOGGER.error(ile.getMessage(), ile);
latestNodeMessage = new NodeMessage(NodeMessage.Type.ERROR, ile.getMessage());
success = false;
}
// make sure we do not accidentally configure the remainder of this node
// since we are not yet done with the loop
canConfigureSuccessors = false;
}
}
if (!success) {
// make sure any marks are removed (only for loop ends!)
disableNodeForExecution(nnc.getID());
nnc.getNode().clearLoopContext();
}
}
}
// note this is NOT the else of the if above - success can be modified...
if (!success && nc instanceof SingleNodeContainer) {
// switch from IDLE to CONFIGURED if possible!
// keeps node messages, also for nodes within a component/subnode
configureSingleNodeContainer((SingleNodeContainer) nc, /*keepNodeMessage=*/
true);
// in case there is a more recent message from above
nc.setNodeMessage(latestNodeMessage);
}
// now handle non success for all types of nodes:
if (!success) {
// clean loops which were waiting for this one to be executed.
for (FlowLoopContext flc : nc.getWaitingLoops()) {
disableNodeForExecution(flc.getTailNode());
}
nc.clearWaitingLoopList();
}
if (nc.getWaitingLoops().size() >= 1) {
// finish! Let's try to restart them:
for (FlowLoopContext slc : nc.getWaitingLoops()) {
try {
restartLoop(slc);
} catch (IllegalLoopException ile) {
// set error message in LoopEnd node not this one!
NodeMessage nm = new NodeMessage(NodeMessage.Type.ERROR, ile.getMessage());
getNodeContainer(slc.getTailNode()).setNodeMessage(nm);
}
}
nc.clearWaitingLoopList();
}
if (canConfigureSuccessors) {
// may be SingleNodeContainer or WFM contained within this
// one but then it can be treated like a SNC
getExecutionController().checkHaltingCriteria(nc.getID());
configureNodeAndPortSuccessors(nc.getID(), null, false, true, false);
}
lock.queueCheckForNodeStateChangeNotification(true);
}
}
Aggregations