use of org.knime.core.node.workflow.virtual.AbstractPortObjectRepositoryNodeModel in project knime-core by knime.
the class FlowVirtualScopeContext method addPortObjectToRepositoryAndHostNode.
/**
* Adds a port object to the {@link PortObjectRepository} (to be available to downstream nodes) and the host node
* (whose node model is of type {@link AbstractPortObjectRepositoryNodeModel}) for persistence.
*
* The host node is registered via
* {@link #registerHostNodeForPortObjectPersistence(NativeNodeContainer, NativeNodeContainer, ExecutionContext)}.
*
* @param po the port object to be added to the {@link PortObjectRepository} and published to the host node
* @return the id of the port object in the {@link PortObjectRepository}
* @throws CanceledExecutionException
* @throws IOException
*
* @throws IllegalStateException if there is no host node associated with the virtual scope
*/
public UUID addPortObjectToRepositoryAndHostNode(final PortObject po) throws IOException, CanceledExecutionException {
if (m_nc == null) {
throw new IllegalStateException("No host node to forward the port objects to set");
}
UUID id = PortObjectRepository.addCopy(po, m_exec);
// NOSONAR
((AbstractPortObjectRepositoryNodeModel) m_nc.getNodeModel()).addPortObject(id, PortObjectRepository.get(id).get());
return id;
}
use of org.knime.core.node.workflow.virtual.AbstractPortObjectRepositoryNodeModel in project knime-core by knime.
the class WorkflowManager method parallelizeLoop.
/* Parallelize this "loop": create appropriate number of parallel
* branches executing the matching chunks.
*/
private void parallelizeLoop(final NodeID startID) throws IllegalLoopException {
try (WorkflowLock lock = lock()) {
final NodeID endID = m_workflow.getMatchingLoopEnd(startID);
LoopEndParallelizeNode endNode;
LoopStartParallelizeNode startNode;
try {
// just for validation
startNode = castNodeModel(startID, LoopStartParallelizeNode.class);
endNode = castNodeModel(endID, LoopEndParallelizeNode.class);
} catch (IllegalArgumentException iae) {
throw new IllegalLoopException("Parallel Chunk Start Node not connected to matching end node!", iae);
}
final ArrayList<NodeAndInports> loopBody = m_workflow.findAllNodesConnectedToLoopBody(startID, endID);
NodeID[] loopNodes = new NodeID[loopBody.size()];
loopNodes[0] = startID;
for (int i = 0; i < loopBody.size(); i++) {
loopNodes[i] = loopBody.get(i).getID();
}
// creating matching sub workflow node holding all chunks
Set<Pair<NodeID, Integer>> exposedInports = findNodesWithExternalSources(startID, loopNodes);
HashMap<Pair<NodeID, Integer>, Integer> extInConnections = new HashMap<Pair<NodeID, Integer>, Integer>();
PortType[] exposedInportTypes = new PortType[exposedInports.size() + 1];
// the first port is the variable port
exposedInportTypes[0] = FlowVariablePortObject.TYPE;
// the remaining ports cover the exposed inports of the loop body
int index = 1;
for (Pair<NodeID, Integer> npi : exposedInports) {
NodeContainer nc = getNodeContainer(npi.getFirst());
int portIndex = npi.getSecond();
exposedInportTypes[index] = nc.getInPort(portIndex).getPortType();
extInConnections.put(npi, index);
index++;
}
WorkflowManager subwfm = null;
if (startNode.getNrRemoteChunks() > 0) {
subwfm = createAndAddSubWorkflow(exposedInportTypes, new PortType[0], "Parallel Chunks");
NodeUIInformation startUIPlain = getNodeContainer(startID).getUIInformation();
if (startUIPlain != null) {
NodeUIInformation startUI = NodeUIInformation.builder(startUIPlain).translate(new int[] { 60, -60, 0, 0 }).build();
subwfm.setUIInformation(startUI);
}
// connect outside(!) nodes to new sub metanode
for (Map.Entry<Pair<NodeID, Integer>, Integer> entry : extInConnections.entrySet()) {
final Pair<NodeID, Integer> npi = entry.getKey();
int metanodeindex = entry.getValue();
if (metanodeindex >= 0) {
// ignore variable port!
// we need to find the source again (since our list
// only holds the destination...)
ConnectionContainer cc = this.getIncomingConnectionFor(npi.getFirst(), npi.getSecond());
this.addConnection(cc.getSource(), cc.getSourcePort(), subwfm.getID(), metanodeindex);
}
}
}
ParallelizedChunkContentMaster pccm = new ParallelizedChunkContentMaster(subwfm, endNode, startNode.getNrRemoteChunks());
final NativeNodeContainer startNC;
if (subwfm != null && startNode instanceof AbstractPortObjectRepositoryNodeModel) {
startNC = (NativeNodeContainer) getNodeContainer(startID);
} else {
startNC = null;
}
ExecutionContext exec = startNC != null ? startNC.createExecutionContext() : null;
for (int i = 0; i < startNode.getNrRemoteChunks(); i++) {
ParallelizedChunkContent copiedNodes = duplicateLoopBodyInSubWFMandAttach(subwfm, extInConnections, startID, endID, loopNodes, i);
if (startNC != null) {
NativeNodeContainer virtualInNode = (NativeNodeContainer) subwfm.getNodeContainer(copiedNodes.getVirtualInputID());
FlowVirtualScopeContext.registerHostNodeForPortObjectPersistence(startNC, virtualInNode, exec);
}
copiedNodes.executeChunk();
pccm.addParallelChunk(i, copiedNodes);
}
// make sure head knows his chunk master (for potential cleanup)
startNode.setChunkMaster(pccm);
if (startNC != null && startNode instanceof PortObjectHolder) {
// register a callback with the start node; allows the start node to notify the framework
// that all the internally held port objects are now available (necessary in case they are available
// only after the start node execution is finished)
startNode.setNewInternalPortObjectNotifier(() -> startNC.getNode().assignInternalHeldObjects(new PortObject[0], null, null, new PortObject[0]));
}
}
}
Aggregations