use of org.knime.core.node.streamable.RowInput in project knime-core by knime.
the class AppendedRowsNodeModel method run.
void run(final RowInput[] inputs, final RowOutput output, final ExecutionMonitor exec, final long totalRowCount) throws Exception {
RowInput[] corrected;
if (m_isIntersection) {
final RowInput[] noNullArray = noNullArray(inputs);
corrected = new RowInput[noNullArray.length];
DataTableSpec[] inSpecs = new DataTableSpec[noNullArray.length];
for (int i = 0; i < noNullArray.length; i++) {
inSpecs[i] = noNullArray[i].getDataTableSpec();
}
String[] intersection = getIntersection(inSpecs);
for (int i = 0; i < noNullArray.length; i++) {
corrected[i] = new FilterColumnRowInput(noNullArray[i], intersection);
}
} else {
corrected = inputs;
}
AppendedRowsTable.DuplicatePolicy duplPolicy;
if (m_isFailOnDuplicate) {
duplPolicy = AppendedRowsTable.DuplicatePolicy.Fail;
} else if (m_isAppendSuffix) {
duplPolicy = AppendedRowsTable.DuplicatePolicy.AppendSuffix;
} else {
duplPolicy = AppendedRowsTable.DuplicatePolicy.Skip;
}
AppendedRowsRowInput appendedInput = AppendedRowsRowInput.create(corrected, duplPolicy, m_suffix, exec, totalRowCount);
try {
DataRow next;
// note, this iterator throws runtime exceptions when canceled.
while ((next = appendedInput.poll()) != null) {
// may throw exception, also sets progress
output.push(next);
}
} catch (AppendedRowsIterator.RuntimeCanceledExecutionException rcee) {
throw rcee.getCause();
} finally {
output.close();
}
if (appendedInput.getNrRowsSkipped() > 0) {
setWarningMessage("Filtered out " + appendedInput.getNrRowsSkipped() + " duplicate row(s).");
}
if (m_enableHiliting) {
Map<RowKey, Set<RowKey>> map = createHiliteTranslationMap(appendedInput.getDuplicateNameMap());
m_hiliteTranslator.setMapper(new DefaultHiLiteMapper(map));
}
}
use of org.knime.core.node.streamable.RowInput in project knime-core by knime.
the class AppendedRowsNodeModel method execute.
/**
* {@inheritDoc}
*/
@Override
protected BufferedDataTable[] execute(final BufferedDataTable[] rawInData, final ExecutionContext exec) throws Exception {
// remove all null tables first (optional input data)
BufferedDataTable[] noNullArray = noNullArray(rawInData);
DataTableSpec[] noNullSpecs = new DataTableSpec[noNullArray.length];
for (int i = 0; i < noNullArray.length; i++) {
noNullSpecs[i] = noNullArray[i].getDataTableSpec();
}
// table can only be wrapped if a suffix is to be append or the node fails in case of duplicate row ID's
if (m_isAppendSuffix || m_isFailOnDuplicate) {
// just wrap the tables virtually instead of traversing it and copying the rows
// virtually create the concatenated table (no traverse necessary)
Optional<String> suffix = m_isAppendSuffix ? Optional.of(m_suffix) : Optional.empty();
BufferedDataTable concatTable = exec.createConcatenateTable(exec, suffix, m_isFailOnDuplicate, noNullArray);
if (m_isIntersection) {
// wrap the table and filter the non-intersecting columns
DataTableSpec actualOutSpec = getOutputSpec(noNullSpecs);
DataTableSpec currentOutSpec = concatTable.getDataTableSpec();
String[] intersectCols = getIntersection(actualOutSpec, currentOutSpec);
ColumnRearranger cr = new ColumnRearranger(currentOutSpec);
cr.keepOnly(intersectCols);
concatTable = exec.createColumnRearrangeTable(concatTable, cr, exec);
}
if (m_enableHiliting) {
AppendedRowsTable tmp = new AppendedRowsTable(DuplicatePolicy.Fail, null, noNullArray);
Map<RowKey, Set<RowKey>> map = createHiliteTranslationMap(createDuplicateMap(tmp, exec, m_suffix == null ? "" : m_suffix));
m_hiliteTranslator.setMapper(new DefaultHiLiteMapper(map));
}
return new BufferedDataTable[] { concatTable };
} else {
// traverse the table and copy the rows
long totalRowCount = 0L;
RowInput[] inputs = new RowInput[noNullArray.length];
for (int i = 0; i < noNullArray.length; i++) {
totalRowCount += noNullArray[i].size();
inputs[i] = new DataTableRowInput(noNullArray[i]);
}
DataTableSpec outputSpec = getOutputSpec(noNullSpecs);
BufferedDataTableRowOutput output = new BufferedDataTableRowOutput(exec.createDataContainer(outputSpec));
run(inputs, output, exec, totalRowCount);
return new BufferedDataTable[] { output.getDataTable() };
}
}
use of org.knime.core.node.streamable.RowInput in project knime-core by knime.
the class NormalizerApplyNodeModel method createStreamableOperator.
/**
* {@inheritDoc}
*/
@Override
public StreamableOperator createStreamableOperator(final PartitionInfo partitionInfo, final PortObjectSpec[] inSpecs) throws InvalidSettingsException {
if (getNrOutPorts() == 2) {
// by default call the default implementation of this method
return super.createStreamableOperator(partitionInfo, inSpecs);
} else {
return new StreamableOperator() {
@Override
public void runFinal(final PortInput[] inputs, final PortOutput[] outputs, final ExecutionContext exec) throws Exception {
assert outputs.length == 1;
NormalizerPortObject model = (NormalizerPortObject) ((PortObjectInput) inputs[0]).getPortObject();
RowInput rowInput = (RowInput) inputs[1];
AffineTransTable t = new AffineTransTable(rowInput, getAffineTrans(model.getConfiguration()));
RowOutput rowOutput = (RowOutput) outputs[0];
RowIterator it = t.iterator();
while (it.hasNext()) {
rowOutput.push(it.next());
}
if (t.getErrorMessage() != null) {
// TODO collect error message from remote nodes if run distributed
setWarningMessage(t.getErrorMessage());
}
rowInput.close();
rowOutput.close();
}
};
}
}
use of org.knime.core.node.streamable.RowInput in project knime-core by knime.
the class NominalValueRowFilterNodeModel method createStreamableOperator.
/**
* {@inheritDoc}
*/
@Override
public StreamableOperator createStreamableOperator(final PartitionInfo partitionInfo, final PortObjectSpec[] inSpecs) throws InvalidSettingsException {
return new StreamableOperator() {
@SuppressWarnings("null")
@Override
public void runFinal(final PortInput[] inputs, final PortOutput[] outputs, final ExecutionContext exec) throws Exception {
RowInput in = (RowInput) inputs[0];
RowOutput match = (RowOutput) outputs[0];
RowOutput miss = m_splitter ? (RowOutput) outputs[1] : null;
try {
long rowIdx = -1;
DataRow row;
while ((row = in.poll()) != null) {
rowIdx++;
exec.setProgress("Adding row " + rowIdx + ".");
exec.checkCanceled();
if (matches(row)) {
match.push(row);
} else if (m_splitter) {
miss.push(row);
}
}
} finally {
match.close();
if (m_splitter) {
miss.close();
}
}
}
};
}
use of org.knime.core.node.streamable.RowInput in project knime-core by knime.
the class LagColumnStreamableOperator method execute.
BufferedDataTable execute(final BufferedDataTable table, final ExecutionContext exec) throws Exception {
long maxRows = table.size();
int maxLag = m_configuration.getLag() * m_configuration.getLagInterval();
if (m_configuration.isSkipInitialIncompleteRows()) {
maxRows -= maxLag;
}
if (!m_configuration.isSkipLastIncompleteRows()) {
maxRows += maxLag;
}
m_maxRows = maxRows;
BufferedDataContainer output = exec.createDataContainer(m_outSpec);
RowInput wrappedInput = new DataTableRowInput(table);
DataContainerPortOutput wrappedOutput = new DataContainerPortOutput(output);
runFinal(new PortInput[] { wrappedInput }, new PortOutput[] { wrappedOutput }, exec);
return wrappedOutput.getTable();
}
Aggregations