use of org.knime.core.node.workflow.NodeMessage.Type in project knime-core by knime.
the class WorkflowManager method doBeforeExecution.
/**
* Call-back from NodeContainer called before node is actually executed. The argument node is in usually a
* {@link SingleNodeContainer}, although it can also be a metanode (i.e. a <code>WorkflowManager</code>), which is
* executed remotely (execution takes place as a single operation).
*
* @param nc node whose execution is about to start
* @throws IllegalFlowObjectStackException If loop end nodes have problems identifying their start node
*/
void doBeforeExecution(final NodeContainer nc) {
assert !nc.getID().equals(this.getID());
assert !nc.isLocalWFM() : "No execution of local metanodes";
try (WorkflowLock lock = lock()) {
// allow NNC to update states etc
LOGGER.debug(nc.getNameWithID() + " doBeforeExecution");
nc.getNodeTimer().startExec();
if (nc instanceof SingleNodeContainer) {
FlowObjectStack flowObjectStack = nc.getFlowObjectStack();
FlowScopeContext fsc = flowObjectStack.peek(FlowScopeContext.class);
// if the node is in a subnode the subnode may be part of restored loop, see AP-7585
FlowLoopContext subnodeOuterFlowLoopContext = flowObjectStack.peekOptional(FlowSubnodeScopeContext.class).map(s -> s.getOuterFlowScopeContext(FlowLoopContext.class)).orElse(null);
if (fsc instanceof RestoredFlowLoopContext || subnodeOuterFlowLoopContext instanceof RestoredFlowLoopContext) {
throw new IllegalFlowObjectStackException("Can't continue loop as the workflow was restored with the loop being partially " + "executed. Reset loop start and execute entire loop again.");
}
if (nc instanceof NativeNodeContainer) {
NativeNodeContainer nnc = (NativeNodeContainer) nc;
if (nnc.isModelCompatibleTo(ScopeEndNode.class)) {
// if this is an END to a loop/scope, make sure it knows its head
if (fsc == null) {
LOGGER.debug("Incoming flow object stack for " + nnc.getNameWithID() + ":\n" + flowObjectStack.toDeepString());
if (nnc.isModelCompatibleTo(LoopEndNode.class)) {
throw new IllegalFlowObjectStackException("Encountered loop-end without corresponding head!");
} else {
throw new IllegalFlowObjectStackException("Encountered scope-end without corresponding head!");
}
}
NodeContainer headNode = m_workflow.getNode(fsc.getOwner());
if (headNode == null) {
if (nnc.isModelCompatibleTo(LoopEndNode.class)) {
throw new IllegalFlowObjectStackException("Loop start and end nodes are not in the" + " same workflow");
} else {
throw new IllegalFlowObjectStackException("Scope start and end nodes are not in the" + " same workflow");
}
} else if (headNode instanceof NativeNodeContainer && ((NativeNodeContainer) headNode).isModelCompatibleTo(ScopeStartNode.class)) {
// check that the start and end nodes have compatible flow scope contexts
Class<? extends FlowScopeContext> endNodeFlowScopeContext = ((ScopeEndNode<?>) nnc.getNodeModel()).getFlowScopeContextClass();
if (!endNodeFlowScopeContext.isAssignableFrom(fsc.getClass())) {
if (nnc.isModelCompatibleTo(LoopEndNode.class)) {
throw new IllegalFlowObjectStackException("Encountered loop-end without compatible head!");
} else {
throw new IllegalFlowObjectStackException("Encountered scope-end without compatible head!");
}
}
}
assert ((NativeNodeContainer) headNode).getNode().getNodeModel().equals(nnc.getNode().getScopeStartNode(ScopeStartNode.class).orElse(null));
} else if (nnc.isModelCompatibleTo(LoopStartNode.class)) {
pushFlowContextForLoopIteration(nnc);
} else {
// or not if it's any other type of node
nnc.getNode().setScopeStartNode(null);
}
}
}
nc.performStateTransitionEXECUTING();
lock.queueCheckForNodeStateChangeNotification(true);
}
}
use of org.knime.core.node.workflow.NodeMessage.Type in project knime-core by knime.
the class WorkflowManager method postLoad.
private void postLoad(final Map<NodeID, NodeContainerPersistor> persistorMap, final Map<Integer, BufferedDataTable> tblRep, final boolean mustWarnOnDataLoadError, final ExecutionMonitor exec, final LoadResult loadResult, final boolean keepNodeMessage) throws CanceledExecutionException {
// linked set because we need reverse order later on
Collection<NodeID> failedNodes = new LinkedHashSet<NodeID>();
boolean isStateChangePredictable = false;
final Set<NodeID> nodeIDsInPersistorSet = persistorMap.keySet();
// had NPE below - adding this line to get better debug information
CheckUtils.checkArgumentNotNull(nodeIDsInPersistorSet, "NodeID list from persistor must not be null for workflow %s", getNameWithID());
for (NodeID bfsID : m_workflow.createBreadthFirstSortedList(nodeIDsInPersistorSet, true).keySet()) {
NodeContainer cont = getNodeContainer(bfsID);
// initialize node container with CredentialsStore
if (cont instanceof SingleNodeContainer) {
SingleNodeContainer snc = (SingleNodeContainer) cont;
snc.setCredentialsStore(m_credentialsStore);
}
LoadResult subResult = new LoadResult(cont.getNameWithID());
InternalNodeContainerState contStateBeforeLoadContent = cont.getInternalState();
NodeOutPort[] predPorts = assemblePredecessorOutPorts(bfsID);
final int predCount = predPorts.length;
PortObject[] portObjects = new PortObject[predCount];
boolean inPortsContainNull = false;
FlowObjectStack[] predStacks = new FlowObjectStack[predCount];
for (int i = 0; i < predCount; i++) {
NodeOutPort p = predPorts[i];
if (cont instanceof SingleNodeContainer && p != null) {
SingleNodeContainer snc = (SingleNodeContainer) cont;
snc.setInHiLiteHandler(i, p.getHiLiteHandler());
}
if (p != null) {
predStacks[i] = p.getFlowObjectStack();
portObjects[i] = p.getPortObject();
inPortsContainNull &= portObjects[i] == null;
}
}
FlowObjectStack inStack;
boolean needsReset = false;
try {
if (isSourceNode(bfsID)) {
predStacks = new FlowObjectStack[] { getWorkflowVariableStack() };
}
inStack = new FlowObjectStack(cont.getID(), predStacks);
} catch (IllegalFlowObjectStackException ex) {
subResult.addError("Errors creating flow object stack for node \"" + cont.getNameWithID() + "\", (resetting flow variables): " + ex.getMessage());
needsReset = true;
inStack = new FlowObjectStack(cont.getID());
}
NodeContainerPersistor persistor = persistorMap.get(bfsID);
InternalNodeContainerState loadState = persistor.getMetaPersistor().getState();
exec.setMessage(cont.getNameWithID());
exec.checkCanceled();
// two steps below: loadNodeContainer and loadContent
ExecutionMonitor sub1 = exec.createSubProgress(1.0 / (2 * m_workflow.getNrNodes()));
ExecutionMonitor sub2 = exec.createSubProgress(1.0 / (2 * m_workflow.getNrNodes()));
NodeContext.pushContext(cont);
try {
persistor.loadNodeContainer(tblRep, sub1, subResult);
} catch (CanceledExecutionException e) {
throw e;
} catch (Exception e) {
if (!(e instanceof InvalidSettingsException) && !(e instanceof IOException)) {
LOGGER.error("Caught unexpected \"" + e.getClass().getSimpleName() + "\" during node loading", e);
}
subResult.addError("Errors loading, skipping it: " + e.getMessage());
needsReset = true;
} finally {
NodeContext.removeLastContext();
}
sub1.setProgress(1.0);
// (that is being asserted in methods which get called indirectly)
try (WorkflowLock lock = cont instanceof WorkflowManager ? ((WorkflowManager) cont).lock() : lock()) {
cont.loadContent(persistor, tblRep, inStack, sub2, subResult, keepNodeMessage);
}
sub2.setProgress(1.0);
if (persistor.isDirtyAfterLoad()) {
cont.setDirty();
}
boolean hasPredecessorFailed = false;
for (ConnectionContainer cc : m_workflow.getConnectionsByDest(bfsID)) {
NodeID s = cc.getSource();
if (s.equals(getID())) {
// don't consider WFM_IN connections
continue;
}
if (failedNodes.contains(s)) {
hasPredecessorFailed = true;
}
}
if (!needsReset) {
boolean isFullyConnected = isFullyConnected(bfsID);
switch(contStateBeforeLoadContent) {
case IDLE:
case UNCONFIGURED_MARKEDFOREXEC:
needsReset = false;
break;
default:
// we reset everything which is not fully connected
needsReset = !isFullyConnected;
break;
}
needsReset |= persistor.needsResetAfterLoad();
needsReset |= hasPredecessorFailed;
}
boolean isExecuted = cont.getInternalState().equals(EXECUTED);
boolean remoteExec = persistor.getMetaPersistor().getExecutionJobSettings() != null;
// predecessors has been loaded as IDLE
if (!needsReset && isExecuted && inPortsContainNull) {
needsReset = true;
subResult.addError("Predecessor ports have no data", true);
}
if (needsReset && cont instanceof SingleNodeContainer && cont.isResetable()) {
// we don't care for successors because they are not loaded yet
invokeResetOnSingleNodeContainer((SingleNodeContainer) cont);
isExecuted = false;
}
if (needsReset) {
failedNodes.add(bfsID);
}
if (!isExecuted && cont instanceof SingleNodeContainer) {
configureSingleNodeContainer((SingleNodeContainer) cont, keepNodeMessage);
}
if (persistor.mustComplainIfStateDoesNotMatch() && !cont.getInternalState().equals(loadState) && !hasPredecessorFailed) {
isStateChangePredictable = true;
String warning = "State has changed from " + loadState + " to " + cont.getInternalState();
switch(subResult.getType()) {
case DataLoadError:
// data load errors cause state changes
subResult.addError(warning, true);
break;
default:
subResult.addNodeStateChangedWarning(warning);
}
cont.setDirty();
}
// saved in executing state (e.g. grid job), request to reconnect
if (remoteExec) {
if (needsReset) {
subResult.addError("Can't continue execution " + "due to load errors");
}
if (inPortsContainNull) {
subResult.addError("Can't continue execution; no data in inport");
}
if (!cont.getInternalState().equals(EXECUTINGREMOTELY)) {
subResult.addError("Can't continue execution; node is not " + "configured but " + cont.getInternalState());
}
try {
if (!continueExecutionOnLoad(cont, persistor)) {
cont.cancelExecution();
cont.setDirty();
subResult.addError("Can't continue execution; unknown reason");
}
} catch (Exception exc) {
StringBuilder error = new StringBuilder("Can't continue execution");
if (exc instanceof NodeExecutionJobReconnectException || exc instanceof InvalidSettingsException) {
error.append(": ").append(exc.getMessage());
} else {
error.append(" due to ");
error.append(exc.getClass().getSimpleName());
error.append(": ").append(exc.getMessage());
}
LOGGER.error(error, exc);
cont.cancelExecution();
cont.setDirty();
subResult.addError(error.toString());
}
}
loadResult.addChildError(subResult);
// which must be reported.
switch(subResult.getType()) {
case Ok:
case Warning:
break;
case DataLoadError:
if (!mustWarnOnDataLoadError) {
break;
}
default:
NodeMessage oldMessage = cont.getNodeMessage();
StringBuilder messageBuilder = new StringBuilder(oldMessage.getMessage());
if (messageBuilder.length() != 0) {
messageBuilder.append("\n");
}
NodeMessage.Type type;
switch(oldMessage.getMessageType()) {
case RESET:
case WARNING:
type = NodeMessage.Type.WARNING;
break;
default:
type = NodeMessage.Type.ERROR;
}
messageBuilder.append(subResult.getFilteredError("", LoadResultEntryType.Warning));
cont.setNodeMessage(new NodeMessage(type, messageBuilder.toString()));
}
}
if (!sweep(nodeIDsInPersistorSet) && !isStateChangePredictable) {
loadResult.addWarning("Some node states were invalid");
}
}
use of org.knime.core.node.workflow.NodeMessage.Type in project knime-core by knime.
the class FileNodeContainerMetaPersistor method loadNodeMessage.
/**
* Load messages that were set on the node.
*
* @param settings to load from.
* @return null in this class, sub-classes overwrite this method.
* @throws InvalidSettingsException If this fails.
*/
protected NodeMessage loadNodeMessage(final NodeSettingsRO settings) throws InvalidSettingsException {
if (getLoadVersion().isOlderThan(LoadVersion.V200)) {
return null;
} else {
final String key;
// the node message in the Node class.
if (getLoadVersion().ordinal() >= LoadVersion.V280.ordinal()) {
key = "nodecontainer_message";
} else {
key = "node_message";
}
if (settings.containsKey(key)) {
NodeSettingsRO sub = settings.getNodeSettings(key);
String typeS = sub.getString("type");
if (typeS == null) {
throw new InvalidSettingsException("Message type must not be null");
}
Type type;
try {
type = Type.valueOf(typeS);
} catch (IllegalArgumentException iae) {
throw new InvalidSettingsException("Invalid message type: " + typeS, iae);
}
String message = sub.getString("message");
return new NodeMessage(type, message);
}
return null;
}
}
Aggregations