use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class VirtualSubNodeInputNodeModel method createStreamableOperator.
@Override
public StreamableOperator createStreamableOperator(final PartitionInfo partitionInfo, final PortObjectSpec[] inSpecs) throws InvalidSettingsException {
return new StreamableOperator() {
@Override
public void runFinal(final PortInput[] inputs, final PortOutput[] outputs, final ExecutionContext exec) throws Exception {
assert inputs.length == 0;
PortObject[] dataFromParent = ArrayUtils.remove(m_subNodeContainer.fetchInputDataFromParent(), 0);
for (int i = 0; i < outputs.length; i++) {
if (BufferedDataTable.TYPE.equals(getOutPortType(i))) {
// stream port content if it's data
BufferedDataTable bdt = (BufferedDataTable) (dataFromParent[i]);
RowOutput rowOutput = (RowOutput) outputs[i];
for (DataRow dr : bdt) {
rowOutput.push(dr);
}
rowOutput.close();
} else {
((PortObjectOutput) outputs[i]).setPortObject(dataFromParent[i]);
}
}
}
};
}
use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class JavaRowSplitterNodeModel method execute.
/**
* {@inheritDoc}
*/
@Override
protected BufferedDataTable[] execute(final BufferedDataTable[] inData, final ExecutionContext exec) throws Exception {
final int rowCount = inData[0].getRowCount();
m_rowCount = rowCount;
DataTableRowInput input = new DataTableRowInput(inData[0]);
DataTableSpec spec = inData[0].getDataTableSpec();
BufferedDataContainer trueMatch = exec.createDataContainer(spec);
BufferedDataTableRowOutput[] outputs;
BufferedDataContainer falseMatch = null;
if (getNrOutPorts() == 2) {
falseMatch = exec.createDataContainer(spec);
}
outputs = Stream.of(trueMatch, falseMatch).filter(f -> f != null).map(f -> new BufferedDataTableRowOutput(f)).toArray(BufferedDataTableRowOutput[]::new);
execute(input, outputs, exec);
BufferedDataTable[] outTables = Stream.of(trueMatch, falseMatch).filter(f -> f != null).map(f -> f.getTable()).toArray(BufferedDataTable[]::new);
return outTables;
}
use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class PivotNodeModel method execute.
/**
* {@inheritDoc}
*/
@Override
protected BufferedDataTable[] execute(final BufferedDataTable[] inData, final ExecutionContext exec) throws Exception {
final DataTableSpec inspec = inData[0].getDataTableSpec();
final int group = inspec.findColumnIndex(m_group.getStringValue());
final int pivot = inspec.findColumnIndex(m_pivot.getStringValue());
final int aggre = (m_makeAgg.getStringValue().equals(PivotNodeDialogPane.MAKE_AGGREGATION[1]) ? inspec.findColumnIndex(m_agg.getStringValue()) : -1);
PivotAggregationMethod aggMethod;
if (aggre < 0) {
aggMethod = PivotAggregationMethod.COUNT;
} else {
aggMethod = PivotAggregationMethod.METHODS.get(m_aggMethod.getStringValue());
}
// pair contains group and pivot plus the aggregation value
final Map<Pair<String, String>, Double[]> map = new LinkedHashMap<Pair<String, String>, Double[]>();
// list of pivot values
final Set<String> pivotList = new LinkedHashSet<String>();
final DataColumnSpec pivotSpec = inspec.getColumnSpec(pivot);
if (pivotSpec.getDomain().hasValues()) {
for (DataCell domValue : pivotSpec.getDomain().getValues()) {
pivotList.add(domValue.toString());
}
}
// list of group values
final Set<String> groupList = new LinkedHashSet<String>();
final LinkedHashMap<RowKey, Set<RowKey>> mapping = new LinkedHashMap<RowKey, Set<RowKey>>();
final double nrRows = inData[0].getRowCount();
int rowCnt = 0;
ExecutionContext subExec = exec.createSubExecutionContext(0.75);
// final all group, pivot pair and aggregate the values of each group
for (final DataRow row : inData[0]) {
subExec.checkCanceled();
subExec.setProgress(++rowCnt / nrRows, "Aggregating row: \"" + row.getKey().getString() + "\" (" + rowCnt + "\\" + (int) nrRows + ")");
final String groupString = row.getCell(group).toString();
groupList.add(groupString);
final DataCell pivotCell = row.getCell(pivot);
// if missing values should be ignored
if (pivotCell.isMissing()) {
if (m_ignoreMissValues.getBooleanValue()) {
continue;
}
}
final String pivotString = pivotCell.toString();
pivotList.add(pivotString);
final Pair<String, String> pair = new Pair<String, String>(groupString, pivotString);
Double[] aggValue = map.get(pair);
if (aggValue == null) {
aggValue = aggMethod.init();
map.put(pair, aggValue);
}
if (aggre < 0) {
aggMethod.compute(aggValue, null);
} else {
final DataCell value = row.getCell(aggre);
aggMethod.compute(aggValue, value);
}
if (m_hiliting.getBooleanValue()) {
final RowKey groupKey = new RowKey(groupString);
Set<RowKey> set = mapping.get(groupKey);
if (set == null) {
set = new LinkedHashSet<RowKey>();
mapping.put(groupKey, set);
}
set.add(row.getKey());
}
}
final DataTableSpec outspec = initSpec(pivotList);
// will contain the final pivoting table
final BufferedDataContainer buf = exec.createDataContainer(outspec);
final double nrElements = groupList.size();
int elementCnt = 0;
subExec = exec.createSubExecutionContext(0.25);
for (final String groupString : groupList) {
subExec.checkCanceled();
subExec.setProgress(++elementCnt / nrElements, "Computing aggregation of group \"" + groupString + "\" (" + elementCnt + "\\" + (int) nrElements + ")");
// contains the aggregated values
final DataCell[] aggValues = new DataCell[pivotList.size()];
// pivot index
int idx = 0;
for (final String pivotString : pivotList) {
final Pair<String, String> newPair = new Pair<String, String>(groupString, pivotString);
final Double[] aggValue = map.get(newPair);
aggValues[idx] = aggMethod.done(aggValue);
idx++;
}
// create new row with the given group id and aggregation values
buf.addRowToTable(new DefaultRow(groupString, aggValues));
}
buf.close();
if (m_hiliting.getBooleanValue()) {
m_translator.setMapper(new DefaultHiLiteMapper(mapping));
}
return new BufferedDataTable[] { buf.getTable() };
}
use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class CSVReaderNodeModel method createFileTable.
protected FileTable createFileTable(final ExecutionContext exec) throws Exception {
// prepare the settings for the file analyzer
FileReaderNodeSettings settings = new FileReaderNodeSettings();
CheckUtils.checkSourceFile(m_config.getLocation());
URL url = FileUtil.toURL(m_config.getLocation());
settings.setDataFileLocationAndUpdateTableName(url);
String colDel = m_config.getColDelimiter();
if (colDel != null && !colDel.isEmpty()) {
settings.addDelimiterPattern(colDel, false, false, false);
}
settings.setDelimiterUserSet(true);
String rowDel = m_config.getRowDelimiter();
if (rowDel != null && !rowDel.isEmpty()) {
settings.addRowDelimiter(rowDel, true);
}
String quote = m_config.getQuoteString();
if (quote != null && !quote.isEmpty()) {
settings.addQuotePattern(quote, quote);
}
settings.setQuoteUserSet(true);
String commentStart = m_config.getCommentStart();
if (commentStart != null && !commentStart.isEmpty()) {
settings.addSingleLineCommentPattern(commentStart, false, false);
}
settings.setCommentUserSet(true);
boolean hasColHeader = m_config.hasColHeader();
settings.setFileHasColumnHeaders(hasColHeader);
settings.setFileHasColumnHeadersUserSet(true);
boolean hasRowHeader = m_config.hasRowHeader();
settings.setFileHasRowHeaders(hasRowHeader);
settings.setFileHasRowHeadersUserSet(true);
settings.setWhiteSpaceUserSet(true);
boolean supportShortLines = m_config.isSupportShortLines();
settings.setSupportShortLines(supportShortLines);
int skipFirstLinesCount = m_config.getSkipFirstLinesCount();
settings.setSkipFirstLines(skipFirstLinesCount);
final long limitRowsCount = m_config.getLimitRowsCount();
settings.setMaximumNumberOfRowsToRead(limitRowsCount);
settings.setCharsetName(m_config.getCharSetName());
settings.setCharsetUserSet(true);
settings.setConnectTimeout(m_config.getConnectTimeout());
final int limitAnalysisCount = m_config.getLimitAnalysisCount();
final ExecutionMonitor analyseExec = exec.createSubProgress(0.5);
final ExecutionContext readExec = exec.createSubExecutionContext(0.5);
exec.setMessage("Analyzing file");
if (limitAnalysisCount >= 0) {
final FileReaderExecutionMonitor fileReaderExec = new FileReaderExecutionMonitor();
fileReaderExec.getProgressMonitor().addProgressListener(new NodeProgressListener() {
@Override
public void progressChanged(final NodeProgressEvent pe) {
try {
// if the node was canceled, cancel (interrupt) the analysis
analyseExec.checkCanceled();
// otherwise update the node progress
NodeProgress nodeProgress = pe.getNodeProgress();
analyseExec.setProgress(nodeProgress.getProgress(), nodeProgress.getMessage());
} catch (CanceledExecutionException e) {
fileReaderExec.setExecuteInterrupted();
}
}
});
fileReaderExec.setShortCutLines(limitAnalysisCount);
fileReaderExec.setExecuteCanceled();
settings = FileAnalyzer.analyze(settings, fileReaderExec);
} else {
settings = FileAnalyzer.analyze(settings, analyseExec);
}
SettingsStatus status = settings.getStatusOfSettings();
if (status.getNumOfErrors() > 0) {
throw new IllegalStateException(status.getErrorMessage(0));
}
final DataTableSpec tableSpec = settings.createDataTableSpec();
if (tableSpec == null) {
final SettingsStatus status2 = settings.getStatusOfSettings(true, null);
if (status2.getNumOfErrors() > 0) {
throw new IllegalStateException(status2.getErrorMessage(0));
} else {
throw new IllegalStateException("Unknown error during file analysis.");
}
}
exec.setMessage("Buffering file");
return new FileTable(tableSpec, settings, readExec);
}
use of org.knime.core.node.ExecutionContext in project knime-core by knime.
the class CSVWriterNodeModel method createStreamableOperator.
/**
* {@inheritDoc}
*/
@Override
public StreamableOperator createStreamableOperator(final PartitionInfo partitionInfo, final PortObjectSpec[] inSpecs) throws InvalidSettingsException {
return new StreamableOperator() {
@Override
public void runFinal(final PortInput[] inputs, final PortOutput[] outputs, final ExecutionContext exec) throws Exception {
assert outputs.length == 0;
RowInput input = (RowInput) inputs[0];
doIt(null, input, exec);
return;
}
};
}
Aggregations