use of org.knime.core.node.workflow.execresult.WorkflowExecutionResult in project knime-core by knime.
the class WorkflowManager method loadExecutionResult.
/**
* {@inheritDoc}
*/
@Override
public void loadExecutionResult(final NodeContainerExecutionResult result, final ExecutionMonitor exec, final LoadResult loadResult) {
CheckUtils.checkArgument(result instanceof WorkflowExecutionResult, "Argument must be instance of \"%s\": %s", WorkflowExecutionResult.class.getSimpleName(), result == null ? "null" : result.getClass().getSimpleName());
WorkflowExecutionResult r = (WorkflowExecutionResult) result;
try (WorkflowLock lock = lock()) {
super.loadExecutionResult(result, exec, loadResult);
Map<NodeID, NodeContainerExecutionResult> map = r.getExecutionResultMap();
final int count = map.size();
// contains the corrected NodeID in this workflow (the node ids in
// the execution result refer to the base id of the remote workflow)
Map<NodeID, NodeID> transMap = new HashMap<NodeID, NodeID>();
NodeID otherIDPrefix = r.getBaseID();
for (NodeID otherID : map.keySet()) {
assert otherID.hasSamePrefix(otherIDPrefix);
transMap.put(new NodeID(getID(), otherID.getIndex()), otherID);
}
for (NodeID id : m_workflow.createBreadthFirstSortedList(transMap.keySet(), true).keySet()) {
NodeID otherID = transMap.get(id);
NodeContainer nc = m_workflow.getNode(id);
NodeContainerExecutionResult exResult = map.get(otherID);
if (exResult == null) {
loadResult.addError("No execution result for node " + nc.getNameWithID());
continue;
}
exec.setMessage(nc.getNameWithID());
ExecutionMonitor subExec = exec.createSubProgress(1.0 / count);
// Propagagte the flow variables
if (nc instanceof SingleNodeContainer) {
NodeOutPort[] predecessorOutPorts = assemblePredecessorOutPorts(id);
FlowObjectStack[] sos = Arrays.stream(predecessorOutPorts).map(p -> p != null ? p.getFlowObjectStack() : null).toArray(FlowObjectStack[]::new);
createAndSetFlowObjectStackFor((SingleNodeContainer) nc, sos);
}
nc.loadExecutionResult(exResult, subExec, loadResult);
subExec.setProgress(1.0);
}
}
}
use of org.knime.core.node.workflow.execresult.WorkflowExecutionResult in project knime-core by knime.
the class SubNodeContainer method createExecutionResult.
/**
* {@inheritDoc}
* @since 2.12
*/
@Override
public SubnodeContainerExecutionResult createExecutionResult(final ExecutionMonitor exec) throws CanceledExecutionException {
try (WorkflowLock lock = lock()) {
SubnodeContainerExecutionResult result = new SubnodeContainerExecutionResult(getID());
super.saveExecutionResult(result);
WorkflowExecutionResult innerResult = m_wfm.createExecutionResult(exec);
if (innerResult.needsResetAfterLoad()) {
result.setNeedsResetAfterLoad();
}
// innerResult is success as soon as one of the nodes is a success - be more strict here
result.setSuccess(innerResult.isSuccess() && getInternalState().equals(EXECUTED));
result.setWorkflowExecutionResult(innerResult);
return result;
}
}
use of org.knime.core.node.workflow.execresult.WorkflowExecutionResult in project knime-core by knime.
the class WorkflowManager method createExecutionResult.
/**
* {@inheritDoc}
*/
@Override
public WorkflowExecutionResult createExecutionResult(final ExecutionMonitor exec) throws CanceledExecutionException {
try (WorkflowLock lock = lock()) {
WorkflowExecutionResult result = new WorkflowExecutionResult(getID());
super.saveExecutionResult(result);
Set<NodeID> bfsSortedSet = m_workflow.createBreadthFirstSortedList(m_workflow.getNodeIDs(), true).keySet();
boolean success = false;
for (NodeID id : bfsSortedSet) {
NodeContainer nc = getNodeContainer(id);
exec.setMessage(nc.getNameWithID());
ExecutionMonitor subExec = exec.createSubProgress(1.0 / bfsSortedSet.size());
NodeContainerExecutionResult subResult = nc.createExecutionResult(subExec);
if (subResult.isSuccess()) {
success = true;
}
result.addNodeExecutionResult(id, subResult);
}
// (important for no-child workflows)
if (success) {
result.setSuccess(true);
}
return result;
}
}
use of org.knime.core.node.workflow.execresult.WorkflowExecutionResult in project knime-core by knime.
the class SubNodeContainer method loadExecutionResult.
/**
* {@inheritDoc}
*/
@Override
public void loadExecutionResult(final NodeContainerExecutionResult result, final ExecutionMonitor exec, final LoadResult loadResult) {
CheckUtils.checkArgument(result instanceof SubnodeContainerExecutionResult, "Argument must be instance of \"%s\": %s", SubnodeContainerExecutionResult.class.getSimpleName(), result == null ? "null" : result.getClass().getSimpleName());
SubnodeContainerExecutionResult r = (SubnodeContainerExecutionResult) result;
try (WorkflowLock lock = lock()) {
super.loadExecutionResult(result, exec, loadResult);
WorkflowExecutionResult innerExecResult = r.getWorkflowExecutionResult();
runParentAction(() -> getWorkflowManager().loadExecutionResult(innerExecResult, exec, loadResult));
// After loading the execution result of the workflow manager we need to set the real output of the subnode
if (r.isSuccess()) {
setVirtualOutputIntoOutport(EXECUTED);
}
}
}
use of org.knime.core.node.workflow.execresult.WorkflowExecutionResult in project knime-core by knime.
the class SandboxedNodeCreator method copyExistingTablesIntoSandboxContainer.
/**
* Copies the tables (port and internal) into the context of the corresponding node in the targetWFM. The execution
* result must fit to the passed node container.
*
* @param execResult the object holding the result of the sourceNC. If the sourceNC is a workflow, this must hold
* all results of all contained nodes.
* @param sourceNC the node that produced the execution result.
* @param targetNC the context into which the tables are copied into
* @param progressMon For progress information
* @param copyDataIntoNewContext as per {@link #setCopyData(boolean)}
* @throws CanceledExecutionException
* @throws IOException
*/
public static void copyExistingTablesIntoSandboxContainer(final NodeContainerExecutionResult execResult, final NodeContainer sourceNC, final NodeContainer targetNC, final ExecutionMonitor progressMon, final boolean copyDataIntoNewContext) throws CanceledExecutionException, IOException {
assert targetNC.getNrOutPorts() == sourceNC.getNrOutPorts();
if (execResult instanceof NativeNodeContainerExecutionResult) {
NativeNodeContainerExecutionResult sncResult = (NativeNodeContainerExecutionResult) execResult;
// execResult and node types must match
assert sourceNC instanceof NativeNodeContainer;
assert targetNC instanceof NativeNodeContainer;
// data is to copy ... get the correct execution context
ExecutionContext targetExec = copyDataIntoNewContext ? ((SingleNodeContainer) targetNC).createExecutionContext() : null;
NodeExecutionResult ner = sncResult.getNodeExecutionResult();
// TODO this copy process has to take place in a different place
// though it needs the final execution context for correct copy
// of BDT objects
PortObject[] resultTables = new PortObject[targetNC.getNrOutPorts()];
int copyCount = resultTables.length;
// copy also the internally held tables (such as for instance
// the table in the table view) -- use the copy of the outports
// if they match (likely they don't)
PortObject[] oldInternTables = ner.getInternalHeldPortObjects();
PortObject[] newInternTables = null;
if (oldInternTables != null) {
newInternTables = new PortObject[oldInternTables.length];
copyCount += newInternTables.length;
}
// skip flow variable output
for (int i = 0; i < resultTables.length; i++) {
ExecutionMonitor sub = progressMon.createSubProgress(1.0 / copyCount);
progressMon.setMessage("Port " + i);
PortObject o = ner.getPortObject(i);
PortObject newPO = copyPortObject(o, sub, targetExec);
if (newInternTables != null) {
for (int j = 0; j < oldInternTables.length; j++) {
if (oldInternTables[j] == o) {
newInternTables[j] = newPO;
}
}
}
sub.setProgress(1.0);
resultTables[i] = newPO;
}
if (newInternTables != null) {
for (int i = 0; i < newInternTables.length; i++) {
ExecutionMonitor sub = progressMon.createSubProgress(1.0 / copyCount);
progressMon.setMessage("Internal Table " + i);
if (newInternTables[i] == null) {
PortObject oldT = oldInternTables[i];
PortObject newT = copyPortObject(oldT, sub, targetExec);
newInternTables[i] = newT;
}
sub.setProgress(1.0);
}
}
if (oldInternTables != null) {
ner.setInternalHeldPortObjects(newInternTables);
}
ner.setPortObjects(resultTables);
} else if (execResult instanceof WorkflowExecutionResult) {
WorkflowExecutionResult wfmResult = (WorkflowExecutionResult) execResult;
// exec result and node types must match
WorkflowManager targetWFM = (WorkflowManager) targetNC;
WorkflowManager sourceWFM = (WorkflowManager) sourceNC;
copyIntoSandboxContainerRecursive(sourceWFM, targetWFM, wfmResult, progressMon, copyDataIntoNewContext);
} else if (execResult instanceof SubnodeContainerExecutionResult) {
SubnodeContainerExecutionResult subResult = (SubnodeContainerExecutionResult) execResult;
WorkflowExecutionResult wfmResult = subResult.getWorkflowExecutionResult();
WorkflowManager targetWFM = ((SubNodeContainer) targetNC).getWorkflowManager();
WorkflowManager sourceWFM = ((SubNodeContainer) sourceNC).getWorkflowManager();
copyIntoSandboxContainerRecursive(sourceWFM, targetWFM, wfmResult, progressMon, copyDataIntoNewContext);
} else {
throw new IllegalStateException("Unsupported node result type: " + execResult.getClass().getSimpleName());
}
}
Aggregations