use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class VirtualSubNodeOutputNodeModel method createStreamableOperator.
/**
* {@inheritDoc}
*/
@Override
public StreamableOperator createStreamableOperator(final PartitionInfo partitionInfo, final PortObjectSpec[] inSpecs) throws InvalidSettingsException {
return new StreamableOperator() {
@Override
public void runFinal(final PortInput[] inputs, final PortOutput[] outputs, final ExecutionContext exec) throws Exception {
PortObject[] inObjects = new PortObject[getNrInPorts()];
for (int i = 0; i < inObjects.length; i++) {
PortType inPortType = getInPortType(i);
if (BufferedDataTable.TYPE.equals(inPortType)) {
BufferedDataContainer container = exec.createDataContainer((DataTableSpec) inSpecs[i]);
DataRow r;
while ((r = ((RowInput) inputs[i]).poll()) != null) {
container.addRowToTable(r);
}
container.close();
inObjects[i] = container.getTable();
} else {
inObjects[i] = ((PortObjectInput) inputs[i]).getPortObject();
}
}
setNewExchange(new VirtualSubNodeExchange(inObjects, getVisibleFlowVariables()));
}
};
}
use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class SandboxedNodeCreator method copyExistingTablesIntoSandboxContainer.
/**
* Copies the tables (port and internal) into the context of the corresponding node in the targetWFM. The execution
* result must fit to the passed node container.
*
* @param execResult the object holding the result of the sourceNC. If the sourceNC is a workflow, this must hold
* all results of all contained nodes.
* @param sourceNC the node that produced the execution result.
* @param targetNC the context into which the tables are copied into
* @param progressMon For progress information
* @param copyDataIntoNewContext as per {@link #setCopyData(boolean)}
* @throws CanceledExecutionException
* @throws IOException
*/
public static void copyExistingTablesIntoSandboxContainer(final NodeContainerExecutionResult execResult, final NodeContainer sourceNC, final NodeContainer targetNC, final ExecutionMonitor progressMon, final boolean copyDataIntoNewContext) throws CanceledExecutionException, IOException {
assert targetNC.getNrOutPorts() == sourceNC.getNrOutPorts();
if (execResult instanceof NativeNodeContainerExecutionResult) {
NativeNodeContainerExecutionResult sncResult = (NativeNodeContainerExecutionResult) execResult;
// execResult and node types must match
assert sourceNC instanceof NativeNodeContainer;
assert targetNC instanceof NativeNodeContainer;
// data is to copy ... get the correct execution context
ExecutionContext targetExec = copyDataIntoNewContext ? ((SingleNodeContainer) targetNC).createExecutionContext() : null;
NodeExecutionResult ner = sncResult.getNodeExecutionResult();
// TODO this copy process has to take place in a different place
// though it needs the final execution context for correct copy
// of BDT objects
PortObject[] resultTables = new PortObject[targetNC.getNrOutPorts()];
int copyCount = resultTables.length;
// copy also the internally held tables (such as for instance
// the table in the table view) -- use the copy of the outports
// if they match (likely they don't)
PortObject[] oldInternTables = ner.getInternalHeldPortObjects();
PortObject[] newInternTables = null;
if (oldInternTables != null) {
newInternTables = new PortObject[oldInternTables.length];
copyCount += newInternTables.length;
}
// skip flow variable output
for (int i = 0; i < resultTables.length; i++) {
ExecutionMonitor sub = progressMon.createSubProgress(1.0 / copyCount);
progressMon.setMessage("Port " + i);
PortObject o = ner.getPortObject(i);
PortObject newPO = copyPortObject(o, sub, targetExec);
if (newInternTables != null) {
for (int j = 0; j < oldInternTables.length; j++) {
if (oldInternTables[j] == o) {
newInternTables[j] = newPO;
}
}
}
sub.setProgress(1.0);
resultTables[i] = newPO;
}
if (newInternTables != null) {
for (int i = 0; i < newInternTables.length; i++) {
ExecutionMonitor sub = progressMon.createSubProgress(1.0 / copyCount);
progressMon.setMessage("Internal Table " + i);
if (newInternTables[i] == null) {
PortObject oldT = oldInternTables[i];
PortObject newT = copyPortObject(oldT, sub, targetExec);
newInternTables[i] = newT;
}
sub.setProgress(1.0);
}
}
if (oldInternTables != null) {
ner.setInternalHeldPortObjects(newInternTables);
}
ner.setPortObjects(resultTables);
} else if (execResult instanceof WorkflowExecutionResult) {
WorkflowExecutionResult wfmResult = (WorkflowExecutionResult) execResult;
// exec result and node types must match
WorkflowManager targetWFM = (WorkflowManager) targetNC;
WorkflowManager sourceWFM = (WorkflowManager) sourceNC;
copyIntoSandboxContainerRecursive(sourceWFM, targetWFM, wfmResult, progressMon, copyDataIntoNewContext);
} else if (execResult instanceof SubnodeContainerExecutionResult) {
SubnodeContainerExecutionResult subResult = (SubnodeContainerExecutionResult) execResult;
WorkflowExecutionResult wfmResult = subResult.getWorkflowExecutionResult();
WorkflowManager targetWFM = ((SubNodeContainer) targetNC).getWorkflowManager();
WorkflowManager sourceWFM = ((SubNodeContainer) sourceNC).getWorkflowManager();
copyIntoSandboxContainerRecursive(sourceWFM, targetWFM, wfmResult, progressMon, copyDataIntoNewContext);
} else {
throw new IllegalStateException("Unsupported node result type: " + execResult.getClass().getSimpleName());
}
}
use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class NormalizerApplyNodeModel method createStreamableOperator.
/**
* {@inheritDoc}
*/
@Override
public StreamableOperator createStreamableOperator(final PartitionInfo partitionInfo, final PortObjectSpec[] inSpecs) throws InvalidSettingsException {
if (getNrOutPorts() == 2) {
// by default call the default implementation of this method
return super.createStreamableOperator(partitionInfo, inSpecs);
} else {
return new StreamableOperator() {
@Override
public void runFinal(final PortInput[] inputs, final PortOutput[] outputs, final ExecutionContext exec) throws Exception {
assert outputs.length == 1;
NormalizerPortObject model = (NormalizerPortObject) ((PortObjectInput) inputs[0]).getPortObject();
RowInput rowInput = (RowInput) inputs[1];
AffineTransTable t = new AffineTransTable(rowInput, getAffineTrans(model.getConfiguration()));
RowOutput rowOutput = (RowOutput) outputs[0];
RowIterator it = t.iterator();
while (it.hasNext()) {
rowOutput.push(it.next());
}
if (t.getErrorMessage() != null) {
// TODO collect error message from remote nodes if run distributed
setWarningMessage(t.getErrorMessage());
}
rowInput.close();
rowOutput.close();
}
};
}
}
use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class Pivot2NodeModel method execute.
/**
* {@inheritDoc}
*/
@Override
protected PortObject[] execute(final PortObject[] inData, final ExecutionContext exec) throws Exception {
final BufferedDataTable table = (BufferedDataTable) inData[0];
final List<String> groupAndPivotCols = createAllColumns();
final BufferedDataTable groupTable;
final String orderPivotColumnName;
ExecutionContext groupAndPivotExec = exec.createSubExecutionContext(0.5);
ExecutionContext groupExec = exec.createSubExecutionContext(0.25);
ExecutionContext pivotExec = exec.createSubExecutionContext(0.25);
double progMainTotal = 0.0;
double progMainTableAppendIndexForSort = isProcessInMemory() || isRetainOrder() ? 1.0 : 0.0;
progMainTotal += progMainTableAppendIndexForSort;
double progMainTableGroup = 5.0;
progMainTotal += progMainTableGroup;
double progMainTableInMemSort = isProcessInMemory() ? 3.0 : 0.0;
progMainTotal += progMainTableInMemSort;
double progMainTableGetPivots = 1.0;
progMainTotal += progMainTableGetPivots;
double progMainTableFillPivots = 1.0;
progMainTotal += progMainTableFillPivots;
double progMainTableRestoreSort = isProcessInMemory() || isRetainOrder() ? 1.0 : 0.0;
progMainTotal += progMainTableRestoreSort;
double progMainTableReplaceRowKey = isProcessInMemory() ? 1.0 : 0.0;
progMainTotal += progMainTableReplaceRowKey;
if (isProcessInMemory() || isRetainOrder()) {
exec.setMessage("Keeping row order");
final String retainOrderCol = DataTableSpec.getUniqueColumnName(table.getDataTableSpec(), "#pivot_order#");
// append temp. id column with minimum-aggregation method
final ColumnAggregator[] colAggregators = getColumnAggregators().toArray(new ColumnAggregator[0]);
final Set<String> workingCols = new LinkedHashSet<String>();
workingCols.addAll(groupAndPivotCols);
for (final ColumnAggregator ca : colAggregators) {
workingCols.add(ca.getOriginalColName());
}
workingCols.add(retainOrderCol);
final BufferedDataTable appTable = GroupByTable.appendOrderColumn(groupAndPivotExec.createSubExecutionContext(progMainTableAppendIndexForSort / progMainTotal), table, workingCols, retainOrderCol);
final DataColumnSpec retainOrderColSpec = appTable.getSpec().getColumnSpec(retainOrderCol);
final ColumnAggregator[] aggrs = new ColumnAggregator[colAggregators.length + 1];
System.arraycopy(colAggregators, 0, aggrs, 0, colAggregators.length);
aggrs[colAggregators.length] = new ColumnAggregator(retainOrderColSpec, AggregationMethods.getRowOrderMethod(), true);
orderPivotColumnName = getColumnNamePolicy().createColumName(aggrs[colAggregators.length]);
exec.setMessage("Grouping main table");
final GroupByTable groupByTable = createGroupByTable(groupAndPivotExec.createSubExecutionContext(progMainTableGroup / progMainTotal), appTable, groupAndPivotCols, isProcessInMemory(), false, /* retain order always false; handled by pivoting */
Arrays.asList(aggrs));
// true then sort table by group&pivot columns
if (isProcessInMemory()) {
exec.setMessage("Sorting group table");
final boolean[] sortDirection = new boolean[groupAndPivotCols.size()];
// ensure that missing values are at the end by sorting in ascending order
Arrays.fill(sortDirection, true);
final SortedTable sortedGroupByTable = new SortedTable(groupByTable.getBufferedTable(), groupAndPivotCols, sortDirection, groupAndPivotExec.createSubExecutionContext(progMainTableInMemSort / progMainTotal));
groupTable = sortedGroupByTable.getBufferedDataTable();
} else {
groupTable = groupByTable.getBufferedTable();
}
} else {
exec.setMessage("Grouping main table");
final GroupByTable groupByTable = createGroupByTable(groupAndPivotExec.createSubExecutionContext(progMainTableGroup / progMainTotal), table, groupAndPivotCols, isProcessInMemory(), false, getColumnAggregators());
groupTable = groupByTable.getBufferedTable();
orderPivotColumnName = null;
}
final List<String> pivotCols = m_pivotCols.getIncludeList();
final int[] pivotIdx = new int[pivotCols.size()];
final DataTableSpec groupSpec = groupTable.getSpec();
final Set<String>[] combPivots = createCombinedPivots(groupSpec, pivotCols);
for (int i = 0; i < pivotIdx.length; i++) {
pivotIdx[i] = groupSpec.findColumnIndex(pivotCols.get(i));
}
exec.setProgress("Determining pivots...");
ExecutionContext fillExec = groupAndPivotExec.createSubExecutionContext(progMainTableGetPivots / progMainTotal);
final long groupTableSize = groupTable.size();
long groupIndex = 0;
for (final DataRow row : groupTable) {
for (int i = 0; i < pivotIdx.length; i++) {
if (combPivots[i] == null) {
combPivots[i] = new LinkedHashSet<String>();
}
final DataCell cell = row.getCell(pivotIdx[i]);
if (cell.isMissing()) {
if (!m_ignoreMissValues.getBooleanValue()) {
combPivots[i].add(cell.toString());
}
} else {
combPivots[i].add(cell.toString());
}
}
fillExec.setProgress(groupIndex++ / (double) groupTableSize, String.format("Group \"%s\" (%d/%d)", row.getKey(), groupIndex, groupTableSize));
fillExec.checkCanceled();
}
final Map<String, Integer> pivotStarts = new LinkedHashMap<String, Integer>();
final DataTableSpec outSpec = createOutSpec(groupSpec, combPivots, pivotStarts, orderPivotColumnName);
exec.setProgress("Filling pivot table");
BufferedDataTable pivotTable = fillPivotTable(groupTable, outSpec, pivotStarts, groupAndPivotExec.createSubExecutionContext(progMainTableFillPivots / progMainTotal), orderPivotColumnName);
if (orderPivotColumnName != null) {
exec.setMessage("Restoring row order");
final SortedTable sortedPivotTable = new SortedTable(pivotTable, Arrays.asList(new String[] { orderPivotColumnName }), new boolean[] { true }, groupAndPivotExec.createSubExecutionContext(progMainTableRestoreSort / progMainTotal));
pivotTable = sortedPivotTable.getBufferedDataTable();
final ColumnRearranger colre = new ColumnRearranger(pivotTable.getSpec());
colre.remove(orderPivotColumnName);
pivotTable = exec.createColumnRearrangeTable(pivotTable, colre, exec.createSilentSubProgress(0.0));
}
// temp fix for bug 3286
if (isProcessInMemory()) {
// if process in memory is true, RowKey's needs to be re-computed
final BufferedDataContainer rowkeyBuf = groupAndPivotExec.createSubExecutionContext(progMainTableReplaceRowKey / progMainTotal).createDataContainer(pivotTable.getSpec());
long rowIndex = 0;
for (DataRow row : pivotTable) {
rowkeyBuf.addRowToTable(new DefaultRow(RowKey.createRowKey(rowIndex++), row));
}
rowkeyBuf.close();
pivotTable = rowkeyBuf.getTable();
}
groupAndPivotExec.setProgress(1.0);
/* Fill the 3rd port */
exec.setMessage("Determining pivot totals");
double progPivotTotal = 0.0;
double progPivotGroup = 5.0;
progPivotTotal += progPivotGroup;
double progPivotFillMissing = 1.0;
progPivotTotal += progPivotFillMissing;
double progPivotFillPivots = 1.0;
progPivotTotal += progPivotFillPivots;
double progPivotOverallTotals = m_totalAggregation.getBooleanValue() ? 5.0 : 0.0;
progPivotTotal += progPivotOverallTotals;
// create pivot table only on pivot columns (for grouping)
// perform pivoting: result in single line
final GroupByTable rowGroup = createGroupByTable(pivotExec.createSubExecutionContext(progPivotGroup / progPivotTotal), table, m_pivotCols.getIncludeList(), isProcessInMemory(), isRetainOrder(), getColumnAggregators());
final BufferedDataTable rowGroupTable = rowGroup.getBufferedTable();
// fill group columns with missing cells
final ColumnRearranger colre = new ColumnRearranger(rowGroupTable.getDataTableSpec());
for (int i = 0; i < getGroupByColumns().size(); i++) {
final DataColumnSpec cspec = outSpec.getColumnSpec(i);
final CellFactory factory = new SingleCellFactory(cspec) {
/**
* {@inheritDoc}
*/
@Override
public DataCell getCell(final DataRow row) {
return DataType.getMissingCell();
}
};
colre.insertAt(i, factory);
}
final BufferedDataTable groupedRowTable = exec.createColumnRearrangeTable(rowGroupTable, colre, pivotExec.createSubExecutionContext(progPivotFillMissing / progPivotTotal));
BufferedDataTable pivotRowsTable = fillPivotTable(groupedRowTable, outSpec, pivotStarts, pivotExec.createSubExecutionContext(progPivotFillPivots / progPivotTotal), null);
if (orderPivotColumnName != null) {
final ColumnRearranger colre2 = new ColumnRearranger(pivotRowsTable.getSpec());
colre2.remove(orderPivotColumnName);
pivotRowsTable = exec.createColumnRearrangeTable(pivotRowsTable, colre2, exec.createSilentSubProgress(0.0));
}
// total aggregation without grouping
if (m_totalAggregation.getBooleanValue()) {
@SuppressWarnings("unchecked") final GroupByTable totalGroup = createGroupByTable(pivotExec.createSubExecutionContext(progPivotOverallTotals / progPivotTotal), table, Collections.EMPTY_LIST, isProcessInMemory(), isRetainOrder(), getColumnAggregators());
final BufferedDataTable totalGroupTable = totalGroup.getBufferedTable();
final DataTableSpec pivotsRowsSpec = pivotRowsTable.getSpec();
final DataTableSpec totalGroupSpec = totalGroupTable.getSpec();
final DataTableSpec overallTotalSpec = new DataTableSpec(pivotsRowsSpec, totalGroupSpec);
final BufferedDataContainer buf = exec.createDataContainer(overallTotalSpec);
if (pivotRowsTable.size() > 0) {
final List<DataCell> pivotTotalsCells = new ArrayList<DataCell>();
final DataRow pivotsRow = pivotRowsTable.iterator().next();
for (final DataCell cell : pivotsRow) {
pivotTotalsCells.add(cell);
}
final DataRow totalGroupRow = totalGroupTable.iterator().next();
for (final DataCell cell : totalGroupRow) {
pivotTotalsCells.add(cell);
}
buf.addRowToTable(new DefaultRow(new RowKey("Totals"), pivotTotalsCells));
}
buf.close();
pivotRowsTable = buf.getTable();
}
pivotExec.setProgress(1.0);
/* Fill the 2nd port: important to create this last since it will create
* the final hilite handler (mapping) for port #1 AND #2 (bug 3270) */
exec.setMessage("Creating group totals");
// create group table only on group columns; no pivoting
final BufferedDataTable columnGroupTable = createGroupByTable(groupExec, table, getGroupByColumns()).getBufferedTable();
return new PortObject[] { // pivot table
pivotTable, // group totals
columnGroupTable, // pivot and overall totals
pivotRowsTable };
}
use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class BinnerNodeModel method createStreamableOperator.
/**
* {@inheritDoc}
*/
@Override
public StreamableOperator createStreamableOperator(final PartitionInfo partitionInfo, final PortObjectSpec[] inSpecs) throws InvalidSettingsException {
return new StreamableOperator() {
@Override
public void runFinal(final PortInput[] inputs, final PortOutput[] outputs, final ExecutionContext exec) throws Exception {
ColumnRearranger colre = createColumnRearranger((DataTableSpec) inSpecs[0]);
colre.createStreamableFunction(0, 0).runFinal(inputs, outputs, exec);
if (m_pmmlOutEnabled) {
// handle the optional PMML in port (can be null)
PMMLPortObject inPMMLPort = m_pmmlInEnabled ? (PMMLPortObject) ((PortObjectInput) inputs[1]).getPortObject() : null;
PMMLPortObject outPMMLPort = createPMMLModel(inPMMLPort, (DataTableSpec) inSpecs[0], colre.createSpec());
((PortObjectOutput) outputs[1]).setPortObject(outPMMLPort);
}
}
};
}
Aggregations