use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class BatchExecutor method saveWorkflow.
/**
* Saves the workflow after execution.
*
* @param wfm the workflow manager
* @param config the corresponding workflow configuration
* @throws IOException if an I/O error occurs while saving th workflow
* @throws CanceledExecutionException if loading the workflow is canceled by the user (should not happen in batch
* mode)
* @throws LockFailedException if the workflow cannot be locked
* @since 2.7
*/
protected void saveWorkflow(final WorkflowManager wfm, final WorkflowConfiguration config) throws IOException, CanceledExecutionException, LockFailedException {
if (!config.noSave) {
// save in place when no output (file or dir) given
if ((config.outputDir == null) && (config.outputFile == null)) {
wfm.save(config.workflowLocation, new ExecutionMonitor(), true);
LOGGER.debug("Workflow saved: " + config.workflowLocation.getAbsolutePath());
if (config.inputWorkflow.isFile()) {
// if input is a Zip file, overwrite input flow
// (Zip) workflow dir contains temp workflow dir
FileUtil.zipDir(config.inputWorkflow, config.workflowLocation, 9, WORKFLOW_ZIP_FILTER, null);
LOGGER.info("Saved workflow availabe at: " + config.inputWorkflow.getAbsolutePath());
}
} else if (config.outputFile != null) {
// save as Zip
File outputTempDir = FileUtil.createTempDir("BatchExecutorOutput");
File workflowOutDir = new File(outputTempDir, config.outputFile.getName().replaceAll("\\.(?:zip|ZIP)$", ""));
wfm.save(workflowOutDir, new ExecutionMonitor(), true);
LOGGER.debug("Workflow saved: " + outputTempDir.getAbsolutePath());
// to be saved into new output zip file
FileUtil.zipDir(config.outputFile, workflowOutDir, 9, WORKFLOW_ZIP_FILTER, null);
LOGGER.info("Saved workflow availabe at: " + config.outputFile.getAbsolutePath());
} else if (config.outputDir != null) {
// save into dir
// copy current workflow dir
wfm.save(config.outputDir, new ExecutionMonitor(), true);
LOGGER.info("Saved workflow availabe at: " + config.outputDir.getAbsolutePath());
}
}
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class CSVReaderNodeModel method createFileTable.
protected FileTable createFileTable(final ExecutionContext exec) throws Exception {
// prepare the settings for the file analyzer
FileReaderNodeSettings settings = new FileReaderNodeSettings();
CheckUtils.checkSourceFile(m_config.getLocation());
URL url = FileUtil.toURL(m_config.getLocation());
settings.setDataFileLocationAndUpdateTableName(url);
String colDel = m_config.getColDelimiter();
if (colDel != null && !colDel.isEmpty()) {
settings.addDelimiterPattern(colDel, false, false, false);
}
settings.setDelimiterUserSet(true);
String rowDel = m_config.getRowDelimiter();
if (rowDel != null && !rowDel.isEmpty()) {
settings.addRowDelimiter(rowDel, true);
}
String quote = m_config.getQuoteString();
if (quote != null && !quote.isEmpty()) {
settings.addQuotePattern(quote, quote);
}
settings.setQuoteUserSet(true);
String commentStart = m_config.getCommentStart();
if (commentStart != null && !commentStart.isEmpty()) {
settings.addSingleLineCommentPattern(commentStart, false, false);
}
settings.setCommentUserSet(true);
boolean hasColHeader = m_config.hasColHeader();
settings.setFileHasColumnHeaders(hasColHeader);
settings.setFileHasColumnHeadersUserSet(true);
boolean hasRowHeader = m_config.hasRowHeader();
settings.setFileHasRowHeaders(hasRowHeader);
settings.setFileHasRowHeadersUserSet(true);
settings.setWhiteSpaceUserSet(true);
boolean supportShortLines = m_config.isSupportShortLines();
settings.setSupportShortLines(supportShortLines);
int skipFirstLinesCount = m_config.getSkipFirstLinesCount();
settings.setSkipFirstLines(skipFirstLinesCount);
final long limitRowsCount = m_config.getLimitRowsCount();
settings.setMaximumNumberOfRowsToRead(limitRowsCount);
settings.setCharsetName(m_config.getCharSetName());
settings.setCharsetUserSet(true);
settings.setConnectTimeout(m_config.getConnectTimeout());
final int limitAnalysisCount = m_config.getLimitAnalysisCount();
final ExecutionMonitor analyseExec = exec.createSubProgress(0.5);
final ExecutionContext readExec = exec.createSubExecutionContext(0.5);
exec.setMessage("Analyzing file");
if (limitAnalysisCount >= 0) {
final FileReaderExecutionMonitor fileReaderExec = new FileReaderExecutionMonitor();
fileReaderExec.getProgressMonitor().addProgressListener(new NodeProgressListener() {
@Override
public void progressChanged(final NodeProgressEvent pe) {
try {
// if the node was canceled, cancel (interrupt) the analysis
analyseExec.checkCanceled();
// otherwise update the node progress
NodeProgress nodeProgress = pe.getNodeProgress();
analyseExec.setProgress(nodeProgress.getProgress(), nodeProgress.getMessage());
} catch (CanceledExecutionException e) {
fileReaderExec.setExecuteInterrupted();
}
}
});
fileReaderExec.setShortCutLines(limitAnalysisCount);
fileReaderExec.setExecuteCanceled();
settings = FileAnalyzer.analyze(settings, fileReaderExec);
} else {
settings = FileAnalyzer.analyze(settings, analyseExec);
}
SettingsStatus status = settings.getStatusOfSettings();
if (status.getNumOfErrors() > 0) {
throw new IllegalStateException(status.getErrorMessage(0));
}
final DataTableSpec tableSpec = settings.createDataTableSpec();
if (tableSpec == null) {
final SettingsStatus status2 = settings.getStatusOfSettings(true, null);
if (status2.getNumOfErrors() > 0) {
throw new IllegalStateException(status2.getErrorMessage(0));
} else {
throw new IllegalStateException("Unknown error during file analysis.");
}
}
exec.setMessage("Buffering file");
return new FileTable(tableSpec, settings, readExec);
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class DBPivotNodeModel method configure.
/**
* {@inheritDoc}
*/
@Override
protected PortObjectSpec[] configure(final PortObjectSpec[] inSpecs) throws InvalidSettingsException {
final DatabasePortObjectSpec dbSpec = (DatabasePortObjectSpec) inSpecs[0];
final DataTableSpec tableSpec = dbSpec.getDataTableSpec();
final DatabaseQueryConnectionSettings connection = dbSpec.getConnectionSettings(getCredentialsProvider());
final String dbIdentifier = connection.getDatabaseIdentifier();
final List<DBColumnAggregationFunctionRow> columnFunctions = DBColumnAggregationFunctionRow.loadFunctions(m_settings, DBPivotNodeModel.CFG_AGGREGATION_FUNCTIONS, dbIdentifier, tableSpec);
final ArrayList<DBColumnAggregationFunctionRow> invalidColAggrs = new ArrayList<>(1);
final Set<String> usedColNames = new HashSet<>(tableSpec.getNumColumns());
usedColNames.addAll(m_groupByCols.getIncludeList());
usedColNames.addAll(m_pivotCols.getIncludeList());
m_aggregationFunction2Use.clear();
for (DBColumnAggregationFunctionRow row : columnFunctions) {
final DataColumnSpec columnSpec = row.getColumnSpec();
final DataColumnSpec inputSpec = tableSpec.getColumnSpec(columnSpec.getName());
final AggregationFunction function = row.getFunction();
if (inputSpec == null || !inputSpec.getType().equals(columnSpec.getType())) {
invalidColAggrs.add(row);
continue;
}
if (function instanceof InvalidAggregationFunction) {
throw new InvalidSettingsException(((InvalidAggregationFunction) function).getErrorMessage());
}
if (function.hasOptionalSettings()) {
try {
function.configure(tableSpec);
} catch (InvalidSettingsException e) {
throw new InvalidSettingsException("Wrong aggregation function configuration '" + function.getLabel() + "' of column '" + row.getColumnSpec().getName() + "': " + e.getMessage(), e);
}
}
usedColNames.add(row.getColumnSpec().getName());
m_aggregationFunction2Use.add(row);
}
if (m_aggregationFunction2Use.isEmpty()) {
throw new InvalidSettingsException("No aggregation columns selected.");
}
if (m_groupByCols.getIncludeList().isEmpty()) {
setWarningMessage("No grouping column included. Aggregate complete table");
}
if (m_pivotCols.getIncludeList().isEmpty()) {
throw new InvalidSettingsException("No pivot columns selected.");
}
if (!invalidColAggrs.isEmpty()) {
setWarningMessage(invalidColAggrs.size() + " aggregation functions ignored due to incompatible columns.");
}
final DatabasePortObjectSpec resultSpec;
if (connection.getRetrieveMetadataInConfigure()) {
try {
resultSpec = createDbOutSpec(dbSpec, new ExecutionMonitor());
} catch (CanceledExecutionException e) {
throw new InvalidSettingsException(e.getMessage());
}
} else {
resultSpec = null;
}
return new PortObjectSpec[] { resultSpec };
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class DBPivotNodeModel method createQuery.
private String createQuery(final DatabaseQueryConnectionSettings connectionSettings, final DataTableSpec dataTableSpec, final ExecutionMonitor exec) throws SQLException, CanceledExecutionException {
final StatementManipulator manipulator = connectionSettings.getUtility().getStatementManipulator();
final String query = connectionSettings.getQuery();
exec.setMessage("Getting pivot values.");
ExecutionMonitor subExec = exec.createSubProgress(0.7);
final List<String> pivotColumns = m_pivotCols.getIncludeList();
final Map<DataColumnSpec, Set<Object>> pivotElements = connectionSettings.execute(getCredentialsProvider(), conn -> {
int counter = 1;
final Map<DataColumnSpec, Set<Object>> pivotMap = new LinkedHashMap<>();
for (String pivotColumn : pivotColumns) {
subExec.setProgress(counter / (double) pivotColumns.size(), "Fetching unique values for column " + pivotColumn + ". There are " + (pivotColumns.size() - counter) + " columns left.");
DataColumnSpec columnSpec = dataTableSpec.getColumnSpec(pivotColumn);
final String valueQuery = "SELECT DISTINCT " + manipulator.quoteIdentifier(pivotColumn) + " FROM (" + query + ") T";
try (ResultSet valueSet = conn.createStatement().executeQuery(valueQuery)) {
exec.checkCanceled();
final Set<Object> vals = new HashSet<>();
while (valueSet.next()) {
final Object dbVal = valueSet.getObject(1);
if (!valueSet.wasNull()) {
vals.add(dbVal);
}
}
pivotMap.put(columnSpec, vals);
counter++;
}
}
return pivotMap;
});
exec.setProgress(0.8, "Getting aggregation methods and columns.");
List<String> groupByColumns = m_groupByCols.getIncludeList();
final List<Pair<String, DBAggregationFunction>> aggValues = new LinkedList<>();
for (int i = 0; i < m_aggregationFunction2Use.size(); i++) {
exec.checkCanceled();
final DBColumnAggregationFunctionRow aggregationFunction = m_aggregationFunction2Use.get(i);
String colName = aggregationFunction.getColumnSpec().getName();
DBAggregationFunction function = aggregationFunction.getFunction();
aggValues.add(new Pair<>(colName, function));
}
final ColumnNamePolicy pivotColPoliciy = ColumnNamePolicy.getPolicy4Label(m_columnNamePolicy.getStringValue());
PivotColumnNameGenerator pivotColName = new PivotColumnNameGenerator() {
@Override
public String createColumnName(final String columnName, final DBAggregationFunction function, final List<Object> pivotValues) {
String vals = "";
Iterator<Object> iterator = pivotValues.iterator();
while (iterator.hasNext()) {
vals = vals + iterator.next() + "_";
}
vals = vals.substring(0, vals.length() - 1);
String method = function.getColumnName();
switch(pivotColPoliciy) {
case KEEP_ORIGINAL_NAME:
return vals + "+" + columnName;
case AGGREGATION_METHOD_COLUMN_NAME:
return vals + "+" + method + "(" + columnName + ")";
case COLUMN_NAME_AGGREGATION_METHOD:
return vals + "+" + columnName + " (" + method + ")";
default:
throw new IllegalStateException("Unhandled column naming policy: " + pivotColPoliciy);
}
}
};
exec.setProgress(0.9, "Creating query.");
exec.checkCanceled();
return manipulator.getPivotStatement(query, groupByColumns, pivotElements, aggValues, pivotColName);
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class Normalizer2NodeModel method calculate.
/**
* New normalized {@link org.knime.core.data.DataTable} is created depending
* on the mode.
*/
/**
* @param inData The input data.
* @param exec For BufferedDataTable creation and progress.
* @return the result of the calculation
* @throws Exception If the node calculation fails for any reason.
*/
protected CalculationResult calculate(final PortObject[] inData, final ExecutionContext exec) throws Exception {
BufferedDataTable inTable = (BufferedDataTable) inData[0];
DataTableSpec inSpec = inTable.getSpec();
// extract selected numeric columns
updateNumericColumnSelection(inSpec);
Normalizer2 ntable = new Normalizer2(inTable, m_columns);
long rowcount = inTable.size();
ExecutionContext prepareExec = exec.createSubExecutionContext(0.3);
AffineTransTable outTable;
boolean fixDomainBounds = false;
switch(m_mode) {
case NONORM_MODE:
return new CalculationResult(inTable, new DataTableSpec(), new AffineTransConfiguration());
case MINMAX_MODE:
fixDomainBounds = true;
outTable = ntable.doMinMaxNorm(m_max, m_min, prepareExec);
break;
case ZSCORE_MODE:
outTable = ntable.doZScoreNorm(prepareExec);
break;
case DECIMALSCALING_MODE:
outTable = ntable.doDecimalScaling(prepareExec);
break;
default:
throw new Exception("No mode set");
}
if (outTable.getErrorMessage() != null) {
// something went wrong, report and throw an exception
throw new Exception(outTable.getErrorMessage());
}
if (ntable.getErrorMessage() != null) {
// something went wrong during initialization, report.
setWarningMessage(ntable.getErrorMessage());
}
DataTableSpec modelSpec = FilterColumnTable.createFilterTableSpec(inSpec, m_columns);
AffineTransConfiguration configuration = outTable.getConfiguration();
DataTableSpec spec = outTable.getDataTableSpec();
// the same transformation, which is not guaranteed to snap to min/max)
if (fixDomainBounds) {
DataColumnSpec[] newColSpecs = new DataColumnSpec[spec.getNumColumns()];
for (int i = 0; i < newColSpecs.length; i++) {
newColSpecs[i] = spec.getColumnSpec(i);
}
for (int i = 0; i < m_columns.length; i++) {
int index = spec.findColumnIndex(m_columns[i]);
DataColumnSpecCreator creator = new DataColumnSpecCreator(newColSpecs[index]);
DataColumnDomainCreator domCreator = new DataColumnDomainCreator(newColSpecs[index].getDomain());
domCreator.setLowerBound(new DoubleCell(m_min));
domCreator.setUpperBound(new DoubleCell(m_max));
creator.setDomain(domCreator.createDomain());
newColSpecs[index] = creator.createSpec();
}
spec = new DataTableSpec(spec.getName(), newColSpecs);
}
ExecutionMonitor normExec = exec.createSubProgress(.7);
BufferedDataContainer container = exec.createDataContainer(spec);
long count = 1;
for (DataRow row : outTable) {
normExec.checkCanceled();
normExec.setProgress(count / (double) rowcount, "Normalizing row no. " + count + " of " + rowcount + " (\"" + row.getKey() + "\")");
container.addRowToTable(row);
count++;
}
container.close();
return new CalculationResult(container.getTable(), modelSpec, configuration);
}
Aggregations