use of org.knime.core.node.Node in project knime-core by knime.
the class WorkflowManager method internalAddNewNode.
@SuppressWarnings("unchecked")
private NodeID internalAddNewNode(final NodeFactory<?> factory, final NodeCreationContext context) {
try (WorkflowLock lock = lock()) {
// TODO synchronize to avoid messing with running workflows!
assert factory != null;
// insert node
NodeID newID = m_workflow.createUniqueID();
NativeNodeContainer container = new NativeNodeContainer(this, new Node((NodeFactory<NodeModel>) factory, context), newID);
addNodeContainer(container, true);
configureNodeAndSuccessors(newID, true);
if (context != null) {
// save node settings if source URL/context was provided (bug 5772)
container.saveNodeSettingsToDefault();
}
LOGGER.debug("Added new node " + newID);
setDirty();
return newID;
}
}
use of org.knime.core.node.Node in project knime-core by knime.
the class WorkflowManager method doBeforeExecution.
/**
* Call-back from NodeContainer called before node is actually executed. The argument node is in usually a
* {@link SingleNodeContainer}, although it can also be a metanode (i.e. a <code>WorkflowManager</code>), which is
* executed remotely (execution takes place as a single operation).
*
* @param nc node whose execution is about to start
* @throws IllegalFlowObjectStackException If loop end nodes have problems identifying their start node
*/
void doBeforeExecution(final NodeContainer nc) {
assert !nc.getID().equals(this.getID());
assert !nc.isLocalWFM() : "No execution of local metanodes";
try (WorkflowLock lock = lock()) {
// allow NNC to update states etc
LOGGER.debug(nc.getNameWithID() + " doBeforeExecution");
nc.getNodeTimer().startExec();
if (nc instanceof SingleNodeContainer) {
FlowObjectStack flowObjectStack = nc.getFlowObjectStack();
FlowLoopContext slc = flowObjectStack.peek(FlowLoopContext.class);
// if the node is in a subnode the subnode may be part of restored loop, see AP-7585
FlowLoopContext subnodeOuterFlowLoopContext = flowObjectStack.peekOptional(FlowSubnodeScopeContext.class).map(s -> s.getOuterFlowLoopContext()).orElse(null);
if (slc instanceof RestoredFlowLoopContext || subnodeOuterFlowLoopContext instanceof RestoredFlowLoopContext) {
throw new IllegalFlowObjectStackException("Can't continue loop as the workflow was restored with the loop being partially " + "executed. Reset loop start and execute entire loop again.");
}
if (nc instanceof NativeNodeContainer) {
NativeNodeContainer nnc = (NativeNodeContainer) nc;
if (nnc.isModelCompatibleTo(LoopEndNode.class)) {
// if this is an END to a loop, make sure it knows its head
if (slc == null) {
LOGGER.debug("Incoming flow object stack for " + nnc.getNameWithID() + ":\n" + flowObjectStack.toDeepString());
throw new IllegalFlowObjectStackException("Encountered loop-end without corresponding head!");
}
NodeContainer headNode = m_workflow.getNode(slc.getOwner());
if (headNode == null) {
throw new IllegalFlowObjectStackException("Loop start and end nodes are not in the" + " same workflow");
}
assert ((NativeNodeContainer) headNode).getNode().getNodeModel().equals(nnc.getNode().getLoopStartNode());
} else if (nnc.isModelCompatibleTo(LoopStartNode.class)) {
nnc.getNode().getOutgoingFlowObjectStack().push(new InnerFlowLoopContext());
// nnc.getNode().getFlowObjectStack().push(new InnerFlowLoopContext());
} else {
// or not if it's any other type of node
nnc.getNode().setLoopStartNode(null);
}
}
}
nc.performStateTransitionEXECUTING();
lock.queueCheckForNodeStateChangeNotification(true);
}
}
use of org.knime.core.node.Node in project knime-core by knime.
the class WorkflowManager method configureSingleNodeContainer.
/**
* Configure a SingleNodeContainer.
*
* @param snc node to be configured
* @param keepNodeMessage Whether to keep previously set node messages (important during load sometimes)
* @return true if the configuration did change something.
*/
private boolean configureSingleNodeContainer(final SingleNodeContainer snc, final boolean keepNodeMessage) {
boolean configurationChanged = false;
try (WorkflowLock lock = lock()) {
NodeMessage oldMessage = keepNodeMessage ? snc.getNodeMessage() : NodeMessage.NONE;
final int inCount = snc.getNrInPorts();
NodeID sncID = snc.getID();
NodeOutPort[] predPorts = assemblePredecessorOutPorts(sncID);
final PortObjectSpec[] inSpecs = new PortObjectSpec[inCount];
final FlowObjectStack[] sos = new FlowObjectStack[inCount];
final HiLiteHandler[] hiliteHdls = new HiLiteHandler[inCount];
// check for presence of input specs and collects inport
// TableSpecs, FlowObjectStacks and HiLiteHandlers
boolean allSpecsExists = true;
for (int i = 0; i < predPorts.length; i++) {
if (predPorts[i] != null) {
inSpecs[i] = predPorts[i].getPortObjectSpec();
sos[i] = predPorts[i].getFlowObjectStack();
hiliteHdls[i] = predPorts[i].getHiLiteHandler();
allSpecsExists &= inSpecs[i] != null;
} else if (snc.getInPort(i).getPortType().isOptional()) {
// optional input, which is not connected ... ignore
} else {
allSpecsExists = false;
}
}
if (!allSpecsExists) {
// (NodeMessage did not change -- can exit here)
return false;
}
if (!canConfigureNodes()) {
snc.setNodeMessage(NodeMessage.merge(oldMessage, NodeMessage.newWarning("Outer workflow does not have input data, execute it first")));
return false;
}
// which might attempt to configure an already queued node again
switch(snc.getInternalState()) {
case IDLE:
case CONFIGURED:
case UNCONFIGURED_MARKEDFOREXEC:
case CONFIGURED_MARKEDFOREXEC:
// grid/server) -- also these nodes will be configured() on load
case EXECUTINGREMOTELY:
// the stack that previously would have been propagated,
// used to track changes
FlowObjectStack oldFOS = snc.createOutFlowObjectStack();
// create new FlowObjectStack
boolean flowStackConflict = false;
FlowObjectStack scsc;
try {
scsc = createAndSetFlowObjectStackFor(snc, sos);
} catch (IllegalFlowObjectStackException e) {
LOGGER.warn("Unable to merge flow object stacks: " + e.getMessage(), e);
scsc = new FlowObjectStack(sncID);
flowStackConflict = true;
}
snc.setCredentialsStore(m_credentialsStore);
// update backwards reference for loops
if (snc.isModelCompatibleTo(LoopEndNode.class)) {
// if this is an END to a loop, make sure it knows its head
// (for both: active and inactive loops)
Node sncNode = ((NativeNodeContainer) snc).getNode();
FlowLoopContext slc = scsc.peek(FlowLoopContext.class);
if (slc == null) {
// no head found - ignore during configure!
sncNode.setLoopStartNode(null);
} else {
// loop seems to be correctly wired - set head
NodeContainer headNode = m_workflow.getNode(slc.getOwner());
if (headNode == null) {
// odd: head is not in the same workflow,
// ignore as well during configure
sncNode.setLoopStartNode(null);
} else {
// head found, let the end node know about it:
sncNode.setLoopStartNode(((NativeNodeContainer) headNode).getNode());
}
}
}
// TODO think about it... happens magically
for (int i = 0; i < inCount; i++) {
snc.setInHiLiteHandler(i, hiliteHdls[i]);
}
// remember HiLiteHandler on OUTPORTS of all nodes!
HiLiteHandler[] oldHdl = new HiLiteHandler[snc.getNrOutPorts()];
for (int i = 0; i < oldHdl.length; i++) {
oldHdl[i] = snc.getOutPort(i).getHiLiteHandler();
}
// configure node itself
boolean outputSpecsChanged = false;
if (flowStackConflict) {
// can't be configured due to stack clash.
// make sure execution from here on is canceled
disableNodeForExecution(sncID);
// (ought to be red with this type of error!)
if (!snc.getInternalState().equals(IDLE)) {
// if not already idle make sure it is!
invokeResetOnSingleNodeContainer(snc);
}
// report the problem
snc.setNodeMessage(NodeMessage.merge(oldMessage, NodeMessage.newError("Can't merge FlowVariable Stacks! (likely a loop problem.)")));
// different outputs - empty ports!
outputSpecsChanged = true;
} else {
outputSpecsChanged = snc.configure(inSpecs, keepNodeMessage);
}
// NOTE:
// no need to clean stacks of LoopEnd nodes - done automagically
// inside the getFlowObjectStack of the ports of LoopEnd
// Nodes.
// check if FlowObjectStacks have changed
boolean stackChanged = false;
FlowObjectStack newFOS = snc.createOutFlowObjectStack();
stackChanged = !newFOS.equals(oldFOS);
// check if HiLiteHandlers have changed
boolean hiLiteHdlsChanged = false;
for (int i = 0; i < oldHdl.length; i++) {
HiLiteHandler hdl = snc.getOutPort(i).getHiLiteHandler();
hiLiteHdlsChanged |= (hdl != oldHdl[i]);
}
configurationChanged = (outputSpecsChanged || stackChanged || hiLiteHdlsChanged);
// and finally check if we can queue this node!
if (snc.getInternalState().equals(UNCONFIGURED_MARKEDFOREXEC) || snc.getInternalState().equals(CONFIGURED_MARKEDFOREXEC)) {
queueIfQueuable(snc);
}
break;
case EXECUTED:
case EXECUTED_MARKEDFOREXEC:
// should not happen but could if reset has worked on slightly
// different nodes than configure, for instance.
// FIXME: report errors again, once configure follows only ports, not nodes.
LOGGER.debug("configure found " + snc.getInternalState() + " node: " + snc.getNameWithID());
break;
case PREEXECUTE:
case POSTEXECUTE:
case EXECUTING:
// should not happen but could if reset has worked on slightly
// different nodes than configure, for instance.
LOGGER.debug("configure found " + snc.getInternalState() + " node: " + snc.getNameWithID());
break;
case CONFIGURED_QUEUED:
case EXECUTED_QUEUED:
// should not happen but could if reset has worked on slightly
// different nodes than configure, for instance.
LOGGER.debug("configure found " + snc.getInternalState() + " node: " + snc.getNameWithID());
break;
default:
LOGGER.error("configure found weird state (" + snc.getInternalState() + "): " + snc.getNameWithID());
}
}
return configurationChanged;
// we have a problem here. Subsequent metanodes with through connections
// need to be configured no matter what - they can change their state
// because 3 nodes before in the pipeline the execute state changed...
// return configurationChanged == configurationChanged;
}
use of org.knime.core.node.Node in project knime-core by knime.
the class SubNodeContainer method getConvertToMetaNodeCopyPersistor.
/**
* @return a persistor containing all but the virtual nodes and that is also fixing the in/out connections
* once the node is unwrapped to a metanode.
*/
WorkflowPersistor getConvertToMetaNodeCopyPersistor() {
assert isLockedByCurrentThread();
Collection<WorkflowAnnotation> workflowAnnotations = m_wfm.getWorkflowAnnotations();
// all but virtual in and output node
NodeID[] nodes = m_wfm.getNodeContainers().stream().map(nc -> nc.getID()).filter(id -> id.getIndex() != m_virtualInNodeIDSuffix).filter(id -> id.getIndex() != m_virtualOutNodeIDSuffix).toArray(NodeID[]::new);
WorkflowCopyContent.Builder cnt = WorkflowCopyContent.builder();
cnt.setNodeIDs(nodes);
cnt.setAnnotation(workflowAnnotations.toArray(new WorkflowAnnotation[workflowAnnotations.size()]));
cnt.setIncludeInOutConnections(true);
WorkflowPersistor persistor = m_wfm.copy(true, cnt.build());
final Set<ConnectionContainerTemplate> additionalConnectionSet = persistor.getAdditionalConnectionSet();
for (Iterator<ConnectionContainerTemplate> it = additionalConnectionSet.iterator(); it.hasNext(); ) {
ConnectionContainerTemplate c = it.next();
if (c.getSourceSuffix() == m_virtualInNodeIDSuffix) {
if (c.getSourcePort() == 0) {
it.remove();
continue;
}
c.setSourceSuffix(-1);
c.setSourcePort(c.getSourcePort() - 1);
}
if (c.getDestSuffix() == m_virtualOutNodeIDSuffix) {
if (c.getDestPort() == 0) {
it.remove();
continue;
}
c.setDestSuffix(-1);
c.setDestPort(c.getDestPort() - 1);
}
}
return persistor;
}
use of org.knime.core.node.Node in project knime-core by knime.
the class CovarianceMatrixCalculatorTest method setUp.
/**
* @throws java.lang.Exception
*/
@Before
public void setUp() throws Exception {
@SuppressWarnings({ "unchecked", "rawtypes" }) NodeFactory<NodeModel> dummyFactory = (NodeFactory) new VirtualParallelizedChunkPortObjectInNodeFactory(new PortType[0]);
m_exec = new ExecutionContext(new DefaultNodeProgressMonitor(), new Node(dummyFactory), SingleNodeContainer.MemoryPolicy.CacheOnDisc, new HashMap<Integer, ContainerTable>());
}
Aggregations