use of org.knime.core.node.workflow.LoopStartNode in project knime-core by knime.
the class LoopEndJoinNodeModel method execute.
/**
* {@inheritDoc}
*/
@Override
protected BufferedDataTable[] execute(final BufferedDataTable[] inData, final ExecutionContext exec) throws Exception {
boolean hasSameRowsInEachIteration = m_configuration.hasSameRowsInEachIteration();
LoopStartNode startNode = getLoopStartNode();
if (!(startNode instanceof LoopStartNodeTerminator)) {
throw new IllegalStateException("Loop end is not connected" + " to matching/corresponding loop start node. You" + " are trying to create an infinite loop!");
}
boolean continueLoop = !((LoopStartNodeTerminator) startNode).terminateLoop();
if (m_currentAppendTable == null) {
m_currentAppendTable = copy(inData[0], false, exec);
} else if (hasSameRowsInEachIteration) {
boolean isCacheNew = m_iteration % 50 == 0;
double amount = isCacheNew ? (1.0 / 3.0) : (1.0 / 2.0);
ExecutionContext copyCtx = exec.createSubExecutionContext(amount);
ExecutionContext joinCtx = exec.createSubExecutionContext(amount);
exec.setProgress("Copying input");
BufferedDataTable t = copy(inData[0], true, copyCtx);
copyCtx.setProgress(1.0);
exec.setProgress("Joining with previous input");
m_currentAppendTable = exec.createJoinedTable(m_currentAppendTable, t, joinCtx);
joinCtx.setProgress(1.0);
if (isCacheNew) {
exec.setProgress("Caching intermediate results (iteration " + m_iteration + ")");
ExecutionContext ctx = exec.createSubExecutionContext(amount);
// copy the whole table every 50 columns (avoids wrapping to much individual tables)
// In this case the whole table is copied and column names DON'T need to be made unique (bugfix 6544)
m_currentAppendTable = copy(m_currentAppendTable, m_appendIterSuffixForBackwardComp, ctx);
ctx.setProgress(1.0);
}
} else {
Joiner2Settings settings = new Joiner2Settings();
settings.setCompositionMode(CompositionMode.MatchAll);
settings.setDuplicateColumnSuffix(" (Iter #" + m_iteration + ")");
settings.setDuplicateHandling(DuplicateHandling.AppendSuffix);
settings.setEnableHiLite(false);
// joining on RowIDs, this should not generate new row IDs but
// only fill missing rows in either table
settings.setJoinMode(JoinMode.FullOuterJoin);
settings.setLeftIncludeAll(true);
settings.setRightIncludeAll(true);
// TODO to be replaced by Joiner2Settings.ROW_KEY_IDENTIFIER
// once that is public
settings.setLeftJoinColumns(new String[] { "$RowID$" });
settings.setRightJoinColumns(new String[] { "$RowID$" });
BufferedDataTable left = m_currentAppendTable;
BufferedDataTable right = copy(inData[0], true, exec.createSubExecutionContext(0.1));
Joiner joiner = new Joiner(left.getDataTableSpec(), right.getDataTableSpec(), settings);
m_currentAppendTable = joiner.computeJoinTable(left, right, exec.createSubExecutionContext(0.9));
}
m_iteration += 1;
if (continueLoop) {
super.continueLoop();
return null;
} else {
return new BufferedDataTable[] { m_currentAppendTable };
}
}
use of org.knime.core.node.workflow.LoopStartNode in project knime-core by knime.
the class LoopEndConditionNodeModel method execute.
/**
* {@inheritDoc}
*/
@Override
protected BufferedDataTable[] execute(final BufferedDataTable[] inData, final ExecutionContext exec) throws Exception {
int count = peekFlowVariableInt("currentIteration");
exec.setMessage("Iteration " + count);
DataTableSpec spec1 = createSpec1(inData[0].getDataTableSpec());
if (m_collectContainer == null) {
assert m_variableContainer == null;
m_startTime = System.currentTimeMillis();
// first time we are getting to this: open container
m_collectContainer = exec.createDataContainer(spec1);
m_variableContainer = exec.createDataContainer(createSpec2());
} else if (!spec1.equalStructure(m_collectContainer.getTableSpec())) {
DataTableSpec predSpec = m_collectContainer.getTableSpec();
StringBuilder error = new StringBuilder("Input table's structure differs from reference " + "(first iteration) table: ");
if (spec1.getNumColumns() != predSpec.getNumColumns()) {
error.append("different column counts ");
error.append(spec1.getNumColumns());
error.append(" vs. ").append(predSpec.getNumColumns());
} else {
for (int i = 0; i < spec1.getNumColumns(); i++) {
DataColumnSpec inCol = spec1.getColumnSpec(i);
DataColumnSpec predCol = predSpec.getColumnSpec(i);
if (!inCol.equalStructure(predCol)) {
error.append("Column ").append(i).append(" [");
error.append(inCol).append("] vs. [");
error.append(predCol).append("]");
}
}
}
throw new IllegalArgumentException(error.toString());
}
RowKey rk = new RowKey("Iteration " + count);
if (m_settings.variableType() == Type.DOUBLE) {
m_variableContainer.addRowToTable(new DefaultRow(rk, new DoubleCell(peekFlowVariableDouble(m_settings.variableName()))));
} else if (m_settings.variableType() == Type.INTEGER) {
m_variableContainer.addRowToTable(new DefaultRow(rk, new IntCell(peekFlowVariableInt(m_settings.variableName()))));
} else {
m_variableContainer.addRowToTable(new DefaultRow(rk, new StringCell(peekFlowVariableString(m_settings.variableName()))));
}
LoopStartNode lsn = getLoopStartNode();
boolean stop = checkCondition() || ((lsn instanceof LoopStartNodeTerminator) && ((LoopStartNodeTerminator) lsn).terminateLoop());
if ((m_settings.addLastRows() && !m_settings.addLastRowsOnly()) || ((stop == m_settings.addLastRows()) && (stop == m_settings.addLastRowsOnly()))) {
exec.setMessage("Collecting rows from current iteration");
int k = 0;
final double max = inData[0].size();
IntCell currIterCell = new IntCell(count);
for (DataRow row : inData[0]) {
exec.checkCanceled();
if (k++ % 10 == 0) {
exec.setProgress(k / max);
}
DataRow newRow = new DefaultRow(new RowKey(row.getKey() + "#" + count), row);
if (m_settings.addIterationColumn()) {
newRow = new AppendedColumnRow(newRow, currIterCell);
}
m_collectContainer.addRowToTable(newRow);
}
}
if (stop) {
m_collectContainer.close();
m_variableContainer.close();
BufferedDataTable out1 = m_collectContainer.getTable();
BufferedDataTable out2 = m_variableContainer.getTable();
LOGGER.debug("Total loop execution time: " + (System.currentTimeMillis() - m_startTime) + "ms");
m_startTime = 0;
return new BufferedDataTable[] { out1, out2 };
} else {
continueLoop();
return new BufferedDataTable[2];
}
}
use of org.knime.core.node.workflow.LoopStartNode in project knime-core by knime.
the class NodeModel method executeModel.
/**
* Invokes the abstract <code>#execute()</code> method of this model. In
* addition, this method notifies all assigned views of the model about the
* changes.
*
* @param rawData An array of <code>PortObject</code> objects holding the data
* from the inputs (includes flow variable port).
* @param exEnv The execution environment used for execution of this model.
* @param exec The execution monitor which is passed to the execute method
* of this model.
* @return The result of the execution in form of an array with
* <code>DataTable</code> elements, as many as the node has
* outputs.
* @throws Exception any exception or error that is fired in the derived
* model will be just forwarded. It may throw an
* CanceledExecutionException if the user pressed cancel during
* execution. Even if the derived model doesn't check, the
* result will be discarded and the exception thrown.
* @throws IllegalStateException If the number of <code>PortObject</code>
* objects returned by the derived <code>NodeModel</code>
* does not match the number of outputs. Or if any of them is
* null.
* @see #execute(PortObject[],ExecutionContext)
* @since 2.8
* @noreference This method is not intended to be referenced by clients
* (use Node class instead)
*/
PortObject[] executeModel(final PortObject[] rawData, final ExecutionEnvironment exEnv, final ExecutionContext exec) throws Exception {
final PortObject[] data = ArrayUtils.remove(rawData, 0);
assert (data != null && data.length == getNrInPorts());
assert (exec != null);
setWarningMessage(null);
// check for compatible input PortObjects
for (int i = 0; i < data.length; i++) {
PortType thisType = getInPortType(i);
if (thisType.isOptional() && data[i] == null) {
// ignore non-populated optional input
} else if (data[i] instanceof InactiveBranchPortObject) {
assert this instanceof InactiveBranchConsumer;
// allow Inactive POs at InactiveBranchConsumer
} else if (!(thisType.getPortObjectClass().isInstance(data[i]))) {
m_logger.error(" (Wanted: " + thisType.getPortObjectClass().getName() + ", " + "actual: " + data[i].getClass().getName() + ")");
throw new IllegalStateException("Connection Error: Mismatch" + " of input port types (port " + (i) + ").");
}
}
// temporary storage for result of derived model.
// EXECUTE DERIVED MODEL
PortObject[] outData;
if (!exEnv.reExecute()) {
outData = execute(data, exec);
} else {
// FIXME: implement reexecution with loading view content and execute
if (this instanceof InteractiveNode) {
InteractiveNode iThis = (InteractiveNode) this;
ViewContent viewContent = exEnv.getPreExecuteViewContent();
iThis.loadViewValue(viewContent, exEnv.getUseAsDefault());
outData = execute(data, exec);
} else if (this instanceof LoopStartNode) {
outData = execute(data, exec);
} else {
m_logger.coding("Cannot re-execute non interactive node. Using normal execute instead.");
outData = execute(data, exec);
}
}
// if execution was canceled without exception flying return false
if (exec.isCanceled()) {
throw new CanceledExecutionException("Result discarded due to user cancel");
}
if (outData == null) {
outData = new PortObject[getNrOutPorts()];
}
/* Cleanup operation for nodes that just pass on their input
* data table. We need to wrap those here so that the framework
* explicitly references them (instead of copying) */
for (int i = 0; i < outData.length; i++) {
if (outData[i] instanceof BufferedDataTable) {
for (int j = 0; j < data.length; j++) {
if (outData[i] == data[j]) {
outData[i] = exec.createWrappedTable((BufferedDataTable) data[j]);
}
}
} else if (outData[i] instanceof FileStorePortObject) {
// file stores can be 'external', e.g. when a model reader node reads an external model file
FileStorePortObject fsPO = (FileStorePortObject) outData[i];
FileStoreHandlerRepository expectedRep = exec.getFileStoreHandler().getFileStoreHandlerRepository();
FileStoreHandlerRepository actualRep = FileStoreUtil.getFileStores(fsPO).stream().map(FileStoreUtil::getFileStoreHandler).map(h -> h.getFileStoreHandlerRepository()).findFirst().orElse(expectedRep);
if (actualRep != expectedRep) {
outData[i] = Node.copyPortObject(fsPO, exec);
}
}
}
// if number of out tables does not match: fail
if (outData.length != getNrOutPorts()) {
throw new IllegalStateException("Invalid result. Execution failed. " + "Reason: Incorrect implementation; the execute" + " method in " + this.getClass().getSimpleName() + " returned null or an incorrect number of output" + " tables.");
}
// check the result, data tables must not be null
for (int i = 0; i < outData.length; i++) {
// of a loop and another loop iteration is requested
if ((getLoopContext() == null) && (outData[i] == null)) {
m_logger.error("Execution failed: Incorrect implementation;" + " the execute method in " + this.getClass().getSimpleName() + "returned a null data table at port: " + i);
throw new IllegalStateException("Invalid result. " + "Execution failed, reason: data at output " + i + " is null.");
}
}
// - only if the execute didn't issue a warning already
if ((m_warningMessage == null) || (m_warningMessage.length() == 0)) {
boolean hasData = false;
// number of BDT ports
int bdtPortCount = 0;
for (int i = 0; i < outData.length; i++) {
if (outData[i] instanceof BufferedDataTable) {
// do some sanity checks on PortObjects holding data tables
bdtPortCount += 1;
BufferedDataTable outDataTable = (BufferedDataTable) outData[i];
if (outDataTable.size() > 0) {
hasData = true;
} else {
m_logger.info("The result table at port " + i + " contains no rows");
}
}
}
if (!hasData && bdtPortCount > 0) {
if (bdtPortCount == 1) {
setWarningMessage("Node created an empty data table.");
} else {
setWarningMessage("Node created empty data tables on all out-ports.");
}
}
}
setHasContent(true);
PortObject[] rawOutData = new PortObject[getNrOutPorts() + 1];
rawOutData[0] = FlowVariablePortObject.INSTANCE;
System.arraycopy(outData, 0, rawOutData, 1, outData.length);
return rawOutData;
}
Aggregations