use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class SubgroupMinerModel method execute.
/**
* {@inheritDoc}
*/
@Override
protected BufferedDataTable[] execute(final BufferedDataTable[] inData, final ExecutionContext exec) throws Exception {
DataTable input = (BufferedDataTable) inData[0];
ExecutionMonitor exec1 = exec.createSubProgress(0.5);
ExecutionMonitor exec2 = exec.createSubProgress(0.5);
List<BitVectorValue> transactions = preprocess(input, exec1);
m_nameMapping = input.getDataTableSpec().getColumnSpec(m_bitVectorColumn.getStringValue()).getElementNames();
m_apriori = AprioriAlgorithmFactory.getAprioriAlgorithm(AprioriAlgorithmFactory.AlgorithmDataStructure.valueOf(m_underlyingStruct.getStringValue()), m_maxBitsetLength, m_nrOfRows);
LOGGER.debug("support: " + m_minSupport);
LOGGER.debug(m_minSupport + " start apriori: " + new Date());
m_apriori.findFrequentItemSets(transactions, m_minSupport.getDoubleValue(), m_maxItemSetLength.getIntValue(), FrequentItemSet.Type.valueOf(m_itemSetType.getStringValue()), exec2);
LOGGER.debug("ended apriori: " + new Date());
m_itemSetTable = createOutputTable(exec);
return new BufferedDataTable[] { m_itemSetTable };
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class MissingValueHandlingTable method createMissingValueHandlingTable.
// getColSetting(DataTableSpec, ColSetting[])
/**
* Does missing value handling to the argument table given the col settings
* in an array and also reports progress.
*
* @param table the table to do missing value handling on
* @param colSettings the settings
* @param exec for progress/cancel and to create the buffered data table
* @param warningBuffer To which potential warning messages are added.
* @return a cache table, cleaned up
* @throws CanceledExecutionException if canceled
*/
public static BufferedDataTable createMissingValueHandlingTable(final DataTable table, final ColSetting[] colSettings, final ExecutionContext exec, final StringBuffer warningBuffer) throws CanceledExecutionException {
ColSetting[] colSetting;
try {
colSetting = getColSetting(table.getDataTableSpec(), colSettings, false);
} catch (InvalidSettingsException ise) {
LOGGER.coding("getColSetting method is not supposed to throw " + "an exception, ignoring settings", ise);
DataTableSpec s = table.getDataTableSpec();
colSetting = new ColSetting[s.getNumColumns()];
for (int i = 0; i < s.getNumColumns(); i++) {
colSetting[i] = new ColSetting(s.getColumnSpec(i));
colSetting[i].setMethod(ColSetting.METHOD_NO_HANDLING);
}
}
boolean needStatistics = false;
int mostFrequentColCount = 0;
for (int i = 0; i < colSetting.length; i++) {
ColSetting c = colSetting[i];
switch(c.getMethod()) {
case ColSetting.METHOD_MOST_FREQUENT:
mostFrequentColCount++;
case ColSetting.METHOD_MAX:
case ColSetting.METHOD_MIN:
case ColSetting.METHOD_MEAN:
needStatistics = true;
break;
default:
}
}
int[] mostFrequentCols = new int[mostFrequentColCount];
if (mostFrequentColCount > 0) {
int index = 0;
for (int i = 0; i < colSetting.length; i++) {
ColSetting c = colSetting[i];
switch(c.getMethod()) {
case ColSetting.METHOD_MOST_FREQUENT:
mostFrequentCols[index++] = i;
break;
default:
}
}
}
DataTable t;
ExecutionMonitor e;
if (needStatistics && !(table instanceof StatisticsTable)) {
// for creating statistics table
ExecutionMonitor subExec = exec.createSubProgress(0.5);
t = new MyStatisticsTable(table, subExec, mostFrequentCols);
if (((MyStatisticsTable) t).m_warningMessage != null) {
warningBuffer.append(((MyStatisticsTable) t).m_warningMessage);
}
// for the iterator
e = exec.createSubProgress(0.5);
} else {
t = table;
e = exec;
}
MissingValueHandlingTable mvht = new MissingValueHandlingTable(t, colSetting);
BufferedDataContainer container = exec.createDataContainer(mvht.getDataTableSpec());
e.setMessage("Adding rows...");
int count = 0;
try {
MissingValueHandlingTableIterator it = new MissingValueHandlingTableIterator(mvht, e);
while (it.hasNext()) {
DataRow next;
next = it.next();
e.setMessage("Adding row " + (count + 1) + " (\"" + next.getKey() + "\")");
container.addRowToTable(next);
count++;
}
} catch (MissingValueHandlingTableIterator.RuntimeCanceledExecutionException rcee) {
throw rcee.getCause();
} finally {
container.close();
}
return container.getTable();
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class HiLiteCollectorNodeModel method execute.
/**
* {@inheritDoc}
*/
@Override
protected PortObject[] execute(final PortObject[] inData, final ExecutionContext exec) throws Exception {
if (m_annotationMap.isEmpty()) {
return inData;
}
DataTableSpec inSpec = (DataTableSpec) inData[0].getSpec();
final DataColumnSpec[] cspecs = createSpecs(inSpec);
ColumnRearranger cr = new ColumnRearranger(inSpec);
cr.append(new CellFactory() {
/**
* {@inheritDoc}
*/
@Override
public DataCell[] getCells(final DataRow row) {
if (m_annotationMap.isEmpty()) {
return new DataCell[0];
}
DataCell[] cells = new DataCell[m_lastIndex + 1];
for (int i = 0; i < cells.length; i++) {
Map<Integer, String> map = m_annotationMap.get(row.getKey());
if (map == null) {
cells[i] = DataType.getMissingCell();
} else {
String str = map.get(i);
if (str == null) {
cells[i] = DataType.getMissingCell();
} else {
cells[i] = new StringCell(str);
}
}
}
return cells;
}
@Override
public DataColumnSpec[] getColumnSpecs() {
return cspecs;
}
/**
* {@inheritDoc}
*/
@Override
public void setProgress(final int curRowNr, final int rowCount, final RowKey lastKey, final ExecutionMonitor em) {
em.setProgress((double) curRowNr / rowCount);
}
});
return new BufferedDataTable[] { exec.createColumnRearrangeTable((BufferedDataTable) inData[0], cr, exec) };
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class FileUtil method zipDir.
/**
* Packs all files and directories passed in the includeList into a zip
* stream. Recursively adds all files contained in directories. Files in the
* include list are placed in the root of the archive. Files and directories
* in the include list must not have the same (simple) name - otherwise an
* I/O Exception is thrown. The passed stream is not closed when the method
* returns. The stream should have the appropriate compression level set.
*
* @param zout a zipped output stream. Zip entries for each file are added
* to the stream. The compression level is not changed by this
* method. The stream remains open after the method returns!
* @param includeList list of files or directories to add to the zip
* archive. Directories will be added with their content
* (recursively). Files are placed in the root of the archive
* (i.e. their path is not preserved).
* @param zipEntryPrefix an optional parameter to specify the parent entry of
* the added directory content. In most cases this parameter is
* "" or null but can also be, e.g. "subfolder1/subfolder2/" as
* parent hierarchy. Callers should then create the respective
* (empty) zip entries up-front and should include the '/'
* at the end of this string
* @param filter each file (and directory) contained is only included in the
* zip archive if it is accepted by the filter. If a directory is
* not accepted, it entire content is excluded from the zip. Must
* not be null.
* @param exec receives progress messages and is checked for cancel
* requests. Optional, can be null.
*
* @return <code>true</code> if all files and dirs accepted by the filter
* are included, <code>false</code> if an error occurs reading a
* file in a directory, if a directory is unreadable.
* @throws CanceledExecutionException if the operation was canceled through
* the <code>exec</code>
* @throws IOException if an I/O error occurs when writing the zip file, or
* if two files or directories in the include list have the same
* (simple) name, or an element in the include list doesn't
* exist.
* @since 3.2
*/
public static boolean zipDir(final ZipOutputStream zout, final Collection<File> includeList, final String zipEntryPrefix, final ZipFileFilter filter, final ExecutionMonitor exec) throws IOException, CanceledExecutionException {
ExecutionMonitor execMon = exec;
if (execMon == null) {
execMon = new ExecutionMonitor();
}
// traverse the source to get a good progress estimate
long size = 0;
if (exec != null) {
for (File f : includeList) {
size += getFileSizeRec(f);
}
} else {
size = Long.MAX_VALUE;
}
ZipWrapper zipper = new ZipWrapper(zout, zipEntryPrefix);
// the read buffer, re-used for each file
final byte[] buff = new byte[BUFF_SIZE];
// false if unable to look into a sub dir or an I/O error occurs
boolean complete = true;
for (File f : includeList) {
if (!filter.include(f)) {
continue;
}
if (f.isFile()) {
complete &= addZipEntry(buff, zipper, f, f.getName(), execMon, size);
} else if (f.isDirectory()) {
complete &= addOneDir(zipper, f, filter, execMon, size, buff);
} else {
throw new IOException("File " + f.getAbsolutePath() + " not added to zip archive");
}
}
return complete;
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class BatchExecutor method loadWorkflow.
/**
* Loads a single workflow.
*
* @param config the workflow configuration
* @return the workflow manager representing the loaded workflow
* @throws IOException if an I/O error occurs while loading the workflow
* @throws InvalidSettingsException if some node or workflow settings are invalid
* @throws CanceledExecutionException if loading the workflow is canceled by the user (should not happen in batch
* mode)
* @throws UnsupportedWorkflowVersionException if the workflow version is not supported
* @throws LockFailedException if the workflow cannot be locked
* @throws IllegalOptionException if a node option is invalid
* @since 2.7
*/
protected WorkflowManager loadWorkflow(final WorkflowConfiguration config) throws IOException, InvalidSettingsException, CanceledExecutionException, UnsupportedWorkflowVersionException, LockFailedException, IllegalOptionException {
if (config.inputWorkflow.isFile()) {
File dir = FileUtil.createTempDir("BatchExecutorInput");
FileUtil.unzip(config.inputWorkflow, dir);
config.workflowLocation = dir;
} else {
config.workflowLocation = config.inputWorkflow;
}
// exported to a zip using the wizard)
if (!new File(config.workflowLocation, WorkflowPersistor.WORKFLOW_FILE).exists()) {
File[] children = config.workflowLocation.listFiles();
if (children.length == 0) {
throw new IOException("No workflow directory at " + config.workflowLocation);
} else {
config.workflowLocation = config.workflowLocation.listFiles()[0];
}
}
BatchExecWorkflowLoadHelper batchLH = new BatchExecWorkflowLoadHelper(config.credentials, config.workflowLocation);
WorkflowLoadResult loadResult = WorkflowManager.loadProject(config.workflowLocation, new ExecutionMonitor(), batchLH);
WorkflowManager wfm = loadResult.getWorkflowManager();
if (config.failOnLoadError && loadResult.hasErrors()) {
if (wfm != null) {
wfm.getParent().removeProject(wfm.getID());
}
LOGGER.error(loadResult.getFilteredError("", LoadResultEntryType.Error));
throw new IOException("Error(s) during workflow loading. Check log file for details.");
}
BatchExecWorkflowTemplateLoadHelper batchTemplateLH = new BatchExecWorkflowTemplateLoadHelper(batchLH);
if (config.updateMetanodeLinks) {
LOGGER.debug("Checking for metanode link updates...");
try {
wfm.updateMetaNodeLinks(batchTemplateLH, config.failOnLoadError, new ExecutionMonitor());
} catch (IOException ex) {
wfm.getParent().removeProject(wfm.getID());
throw ex;
}
LOGGER.debug("Checking for metanode link updates... done");
}
if (!config.flowVariables.isEmpty()) {
applyWorkflowVariables(wfm, config.reset, config.flowVariables);
}
if (config.reset) {
wfm.resetAndConfigureAll();
LOGGER.debug("Workflow reset done.");
}
try {
setNodeOptions(config.nodeOptions, wfm);
} catch (IllegalOptionException ex) {
wfm.getParent().removeProject(wfm.getID());
throw ex;
} catch (InvalidSettingsException ex) {
wfm.getParent().removeProject(wfm.getID());
throw ex;
}
return wfm;
}
Aggregations