use of org.knime.core.node.workflow.FlowLoopContext in project knime-core by knime.
the class Node method cleanOutPorts.
/**
* Sets output objects to null.
* @param isLoopRestart If true, does not clear tables that are part
* of the internally held tables (loop start nodes implements the
* {@link BufferedDataTableHolder} interface). This can only be true
* between two loop iterations.
* @noreference This method is not intended to be referenced by clients.
*/
public void cleanOutPorts(final boolean isLoopRestart) {
if (isLoopRestart) {
// just as an assertion
FlowObjectStack inStack = getFlowObjectStack();
FlowLoopContext flc = inStack.peek(FlowLoopContext.class);
if (flc != null && flc.isInactiveScope()) {
LOGGER.coding("Encountered an inactive FlowLoopContext in a loop restart.");
// continue with historically "correct" solution:
flc = inStack.peekScopeContext(FlowLoopContext.class, false);
}
if (flc == null && !this.isModelCompatibleTo(LoopStartNode.class)) {
LOGGER.coding("Encountered a loop restart action but there is" + " no loop context on the flow object stack (node " + getName() + ")");
}
}
LOGGER.debug("clean output ports.");
Set<BufferedDataTable> disposableTables = new LinkedHashSet<BufferedDataTable>();
for (int i = 0; i < m_outputs.length; i++) {
PortObject portObject = m_outputs[i].object;
if (portObject instanceof BufferedDataTable) {
final BufferedDataTable table = (BufferedDataTable) portObject;
table.collectTableAndReferencesOwnedBy(this, disposableTables);
}
m_outputs[i].spec = null;
m_outputs[i].object = null;
m_outputs[i].summary = null;
}
if (m_internalHeldPortObjects != null) {
Set<BufferedDataTable> internalTableSet = collectTableAndReferences(m_internalHeldPortObjects);
// internal table reference that must not be cleared).
if (isLoopRestart) {
disposableTables.removeAll(internalTableSet);
} else {
disposableTables.addAll(internalTableSet);
m_internalHeldPortObjects = null;
}
}
for (BufferedDataTable disposable : disposableTables) {
disposable.clearSingle(this);
}
// clear temporary tables that have been created during execute
for (ContainerTable t : m_localTempTables) {
t.clear();
}
m_localTempTables.clear();
}
use of org.knime.core.node.workflow.FlowLoopContext in project knime-core by knime.
the class Node method configure.
/**
* Allows passing an object that may modify the specs created by the
* {@link NodeModel}, for example in case the node is wrapped and the
* output is modified.
*
* @param rawInSpecs table specs from the predecessors
* @param configureHelper object called after node model calculated output
* specs
* @return true if configure finished successfully.
* @noreference This method is not intended to be referenced by clients.
*/
public boolean configure(final PortObjectSpec[] rawInSpecs, final NodeConfigureHelper configureHelper) {
boolean success = false;
LOGGER.assertLog(NodeContext.getContext() != null, "No node context available, please check call hierarchy and fix it");
synchronized (m_configureLock) {
// reset message object
clearNodeMessageAndNotify();
// copy input port object specs, ignoring the 0-variable port:
PortObjectSpec[] inSpecs = Arrays.copyOfRange(rawInSpecs, 1, rawInSpecs.length);
// clean output spec
for (int p = 0; p < m_outputs.length; p++) {
// update data table spec
m_outputs[p].spec = null;
}
PortObjectSpec[] newOutSpec = new PortObjectSpec[getNrOutPorts() - 1];
try {
// check the inspecs against null
for (int i = 0; i < inSpecs.length; i++) {
if (inSpecs[i] == null) {
if (m_inputs[i + 1].getType().isOptional()) {
// ignore, unconnected optional input
} else {
return false;
}
// TODO: did we really need a warning here??
// throw new InvalidSettingsException(
// "Node is not executable until all predecessors "
// + "are configured and/or executed.");
}
}
// check if the node is part of a skipped branch and return
// appropriate specs without actually configuring the node.
// Note that we must also check the incoming variable port!
boolean isInactive = false;
if (!isInactiveBranchConsumer()) {
for (int i = 0; i < rawInSpecs.length; i++) {
if (rawInSpecs[i] instanceof InactiveBranchPortObjectSpec) {
isInactive = true;
break;
}
}
} else {
FlowLoopContext flc = getFlowObjectStack().peek(FlowLoopContext.class);
if (flc != null && flc.isInactiveScope()) {
isInactive = true;
}
}
if (isInactive) {
for (int j = 0; j < m_outputs.length; j++) {
m_outputs[j].spec = InactiveBranchPortObjectSpec.INSTANCE;
}
if (success) {
LOGGER.debug("Configure skipped. (" + getName() + " in inactive branch.)");
}
return true;
}
if (configureHelper != null) {
configureHelper.preConfigure();
}
// call configure model to create output table specs
// guaranteed to return non-null, correct-length array
newOutSpec = invokeNodeModelConfigure(inSpecs);
if (configureHelper != null) {
newOutSpec = configureHelper.postConfigure(inSpecs, newOutSpec);
}
// find out if we are in the middle of executing a loop and this is a LoopEnd node
boolean isIntermediateRunningLoop = false;
if (isModelCompatibleTo(LoopEndNode.class)) {
if ((getLoopContext() != null) && !getPauseLoopExecution()) {
FlowLoopContext flc = m_model.getFlowObjectStack().peek(FlowLoopContext.class);
if ((flc != null) && (flc.getIterationIndex() > 0)) {
// don't treat first iteration as "in the middle":
isIntermediateRunningLoop = true;
}
}
}
if (!isIntermediateRunningLoop) {
// update data table specs
for (int p = 0; p < newOutSpec.length; p++) {
m_outputs[p + 1].spec = newOutSpec[p];
}
} else {
// on the loop end node (avoids costly configure calls on remainder of workflow).
for (int p = 0; p < newOutSpec.length; p++) {
if (newOutSpec[p] instanceof DataTableSpec) {
// remove domain before assigning spec to outputs
DataTableSpecCreator dtsCreator = new DataTableSpecCreator((DataTableSpec) newOutSpec[p]);
dtsCreator.dropAllDomains();
m_outputs[p + 1].spec = dtsCreator.createSpec();
} else {
// no domain to clean in PortObjectSpecs
m_outputs[p + 1].spec = newOutSpec[p];
}
}
}
m_outputs[0].spec = FlowVariablePortObjectSpec.INSTANCE;
success = true;
} catch (InvalidSettingsException ise) {
Throwable cause = ise.getCause();
if (cause == null) {
createWarningMessageAndNotify(ise.getMessage());
} else {
createWarningMessageAndNotify(ise.getMessage(), ise);
}
} catch (Throwable t) {
String error = "Configure failed (" + t.getClass().getSimpleName() + "): " + t.getMessage();
createErrorMessageAndNotify(error, t);
}
}
if (success) {
LOGGER.debug("Configure succeeded. (" + this.getName() + ")");
}
return success;
}
use of org.knime.core.node.workflow.FlowLoopContext in project knime-core by knime.
the class NodeModel method continueLoop.
// ////////////////////////////////////////
// Loop Support...
//
// TODO: maybe all of this should be moved into an adaptor class
// "LoopNodeModelAdapter" which keeps the node's role and all of
// the loop specific stuff? Later...
//
// ////////////////////////////////////////
/**
* Informs WorkflowManager after execute to continue the loop.
* Call by the end of the loop! This will result in both
* this Node as well as the creator of the FlowLoopContext to be
* queued for execution once again. In this case the node can return
* an empty table after execution.
*
* Called on LoopTail Node's only.
*/
protected final void continueLoop() {
if (!(this instanceof LoopEndNode)) {
throw new IllegalStateException("continueLoop called from non-end node (Coding Error)!");
}
FlowLoopContext slc = m_flowObjectStack.peek(FlowLoopContext.class);
if (slc != null && slc.isInactiveScope()) {
m_logger.coding("Encountered an inactive FlowLoopContext in continueLoop.");
// continue with historically "correct" solution:
slc = m_flowObjectStack.peekScopeContext(FlowLoopContext.class, false);
}
if (slc == null) {
// wrong wiring of the pipeline: head seems to be missing!
throw new IllegalStateException("Missing Loop Start in Pipeline!");
}
m_loopContext = slc;
// note that the WFM will set the tail ID so we can retrieve it
// in the head node!
}
use of org.knime.core.node.workflow.FlowLoopContext in project knime-core by knime.
the class Node method execute.
/**
* Starts executing this node. If the node has been executed already, it
* does nothing - just returns <code>true</code>.
*
* Otherwise, the procedure starts executing all predecessor nodes connected
* to an input port (which in turn recursively trigger their predecessors)
* and calls the function <code>#execute()</code> in the model after all
* connected nodes return successfully. If a port is not connected this
* function returns false without executing itself (it may have executed
* some predecessor nodes though). If a predecessor node returns false this
* method also returns false without executing this node or any further
* connected node.
*
* @param rawInData the data from the predecessor, includes flow variable port.
* @param exEnv the environment for the execution.
* @param exec The execution monitor.
* @return <code>true</code> if execution was successful otherwise
* <code>false</code>.
* @see NodeModel#execute(BufferedDataTable[],ExecutionContext)
* @noreference This method is not intended to be referenced by clients.
* @since 2.8
*/
public boolean execute(final PortObject[] rawInData, final ExecutionEnvironment exEnv, final ExecutionContext exec) {
LOGGER.assertLog(NodeContext.getContext() != null, "No node context available, please check call hierarchy and fix it");
// clear the message object
clearNodeMessageAndNotify();
// loops that override the resetAndConfigureLoopBody (returning true)
// will not call reset between successive executions
// => force a clear of the model's content here
m_model.setHasContent(false);
// check if the node is part of a skipped branch and return appropriate objects without actually executing
// the node. We also need to make sure that we don't run InactiveBranchConsumers if they are in the middle of
// an inactive scope or loop so this check is not trivial...
// are we not a consumer and any of the incoming branches are inactive?
boolean isInactive = !isInactiveBranchConsumer() && containsInactiveObjects(rawInData);
// are we a consumer but in the middle of an inactive scope?
FlowObjectStack inStack = getFlowObjectStack();
FlowScopeContext peekfsc = inStack.peek(FlowScopeContext.class);
if (peekfsc != null) {
isInactive = isInactive || peekfsc.isInactiveScope();
}
PortObject[] newOutData;
if (isInactive) {
// just a normal node: skip execution and fill output ports with inactive markers
newOutData = new PortObject[getNrOutPorts()];
Arrays.fill(newOutData, InactiveBranchPortObject.INSTANCE);
} else {
PortObject[] newInData = new PortObject[rawInData.length];
// flow variable port (or inactive)
newInData[0] = rawInData[0];
// check for existence of all input tables
for (int i = 1; i < rawInData.length; i++) {
if (rawInData[i] == null && !m_inputs[i].getType().isOptional()) {
createErrorMessageAndNotify("Couldn't get data from predecessor (Port No." + i + ").");
return false;
}
if (rawInData[i] == null) {
// optional input
// (checked above)
newInData[i] = null;
} else if (rawInData[i] instanceof BufferedDataTable) {
newInData[i] = rawInData[i];
} else {
exec.setMessage("Copying input object at port " + i);
ExecutionContext subExec = exec.createSubExecutionContext(0.0);
try {
newInData[i] = copyPortObject(rawInData[i], subExec);
} catch (CanceledExecutionException e) {
createWarningMessageAndNotify("Execution canceled");
return false;
} catch (Throwable e) {
createErrorMessageAndNotify("Unable to clone input data at port " + i + " (" + m_inputs[i].getName() + "): " + e.getMessage(), e);
return false;
}
}
}
PortObject[] rawOutData;
try {
// INVOKE MODEL'S EXECUTE
// (warnings will now be processed "automatically" - we listen)
rawOutData = invokeFullyNodeModelExecute(exec, exEnv, newInData);
} catch (Throwable th) {
boolean isCanceled = th instanceof CanceledExecutionException;
isCanceled = isCanceled || th instanceof InterruptedException;
// TODO this can all be shortened to exec.isCanceled()?
isCanceled = isCanceled || exec.isCanceled();
// writing to a buffer is done asynchronously -- if this thread
// is interrupted while waiting for the IO thread to flush we take
// it as a graceful exit
isCanceled = isCanceled || (th instanceof DataContainerException && th.getCause() instanceof InterruptedException);
if (isCanceled) {
// clear the flag so that the ThreadPool does not kill the thread
Thread.interrupted();
reset();
createWarningMessageAndNotify("Execution canceled");
return false;
} else {
// check if we are inside a try-catch block (only if it was a real
// error - not when canceled!)
FlowObjectStack flowObjectStack = getFlowObjectStack();
FlowTryCatchContext tcslc = flowObjectStack.peek(FlowTryCatchContext.class);
if ((tcslc != null) && (!tcslc.isInactiveScope())) {
// failure inside an active try-catch:
// make node inactive but preserve error message.
reset();
PortObject[] outs = new PortObject[getNrOutPorts()];
Arrays.fill(outs, InactiveBranchPortObject.INSTANCE);
setOutPortObjects(outs, false, false);
createErrorMessageAndNotify("Execution failed in Try-Catch block: " + th.getMessage());
// and store information catch-node can report it
FlowObjectStack fos = getNodeModel().getOutgoingFlowObjectStack();
fos.push(new FlowVariable(FlowTryCatchContext.ERROR_FLAG, 1));
fos.push(new FlowVariable(FlowTryCatchContext.ERROR_NODE, getName()));
fos.push(new FlowVariable(FlowTryCatchContext.ERROR_REASON, th.getMessage()));
StringWriter thstack = new StringWriter();
th.printStackTrace(new PrintWriter(thstack));
tcslc.setError(getName(), th.getMessage(), thstack.toString());
fos.push(new FlowVariable(FlowTryCatchContext.ERROR_STACKTRACE, thstack.toString()));
return true;
}
}
String message = "Execute failed: ";
if (th.getMessage() != null && th.getMessage().length() >= 5) {
message = message.concat(th.getMessage());
} else {
message = message.concat("(\"" + th.getClass().getSimpleName() + "\"): " + th.getMessage());
}
reset();
createErrorMessageAndNotify(message, th);
return false;
}
// copy to new array to prevent later modification in client code
newOutData = Arrays.copyOf(rawOutData, rawOutData.length);
if (newOutData[0] instanceof InactiveBranchPortObject) {
Arrays.fill(newOutData, InactiveBranchPortObject.INSTANCE);
isInactive = true;
}
}
if (isInactive) {
if (m_model instanceof ScopeStartNode) {
// inactive scope start node must indicate to their scope
// end node that they were inactive...
FlowScopeContext fsc = getOutgoingFlowObjectStack().peek(FlowScopeContext.class);
assert fsc != null;
fsc.inactiveScope(true);
}
if (m_model instanceof ScopeEndNode) {
// inactive as well (which we should see in the scope context object).
if (peekfsc == null) {
createErrorMessageAndNotify("Missing Scope Start Node in inactive branch.");
return false;
}
if (!peekfsc.isInactiveScope()) {
// we cannot handle this case: the End scope node needs
// to trigger re-execution which it won't in an inactive
// branch
createErrorMessageAndNotify("Active Scope End node in inactive branch not allowed.");
return false;
} else {
// also the scope start node is inactive, so the entire
// loop is inactive.
// Pop Scope object
// => this is done in configure, not needed here! (MB: Hittisau 2013)
// getOutgoingFlowObjectStack().pop(FlowScopeContext.class);
}
}
assert !m_model.hasContent() : "Inactive node should have no content in node model";
} else {
for (int p = 1; p < getNrOutPorts(); p++) {
m_outputs[p].hiliteHdl = m_model.getOutHiLiteHandler(p - 1);
}
}
// check if we see a loop status in the NodeModel
FlowLoopContext slc = m_model.getLoopContext();
// cannot be true for inactive nodes, see getLoopContext method
boolean continuesLoop = (slc != null);
boolean tolerateOutSpecDiff = (exEnv != null) && (exEnv.reExecute());
if (!setOutPortObjects(newOutData, continuesLoop, tolerateOutSpecDiff)) {
return false;
}
assignInternalHeldObjects(rawInData, exEnv, exec, newOutData);
return true;
}
Aggregations