use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class MDSManager method init.
/**
* Initializes the lower dimensional data points randomly.
*
* @param seed The random seed to use.
* @throws CanceledExecutionException If execution was canceled by the user.
*/
public void init(final long seed) throws CanceledExecutionException {
m_isInit = true;
Random rand = new Random(seed);
ExecutionMonitor exec = m_exec.createSubProgress(0.1);
// init all data points
RowIterator it = m_inData.iterator();
while (it.hasNext()) {
exec.checkCanceled();
DataRow row = it.next();
DataPoint p = new DataPoint(m_dimension);
for (int j = 0; j < m_dimension; j++) {
p.setElementAt(j, rand.nextDouble());
}
m_points.put(row.getKey(), p);
exec.setProgress("Initialising data points.");
}
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class MDSManager method train.
/**
* Does the training by adjusting the lower dimensional data points
* according to their distances and the distances of the original data.
*
* @param epochs The number of epochs to train.
* @param learningrate The learn rate, specifying the step size of
* adjustment.
* @throws CanceledExecutionException If execution was canceled by the user.
*/
public void train(final int epochs, final double learningrate) throws CanceledExecutionException {
if (!m_isInit) {
init(DEFAULT_SEED);
}
ExecutionMonitor exec = m_exec.createSubProgress(0.9);
m_learningrate = learningrate;
m_initialLearningrate = learningrate;
m_epochs = epochs;
for (int e = 1; e <= epochs; e++) {
exec.setMessage("Start training");
exec.checkCanceled();
doEpoch(e, exec);
double prog = (double) e / (double) epochs;
exec.setProgress(prog, "Training epoch " + e + " of " + epochs);
}
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class MDSProjectionManager method init.
/**
* Initializes the lower dimensional data points randomly.
*
* @param seed The random seed to use.
* @throws CanceledExecutionException If execution was canceled by the user.
*/
public void init(final long seed) throws CanceledExecutionException {
m_isInit = true;
Random rand = new Random(seed);
ExecutionMonitor exec = m_exec.createSubProgress(0.1);
// init all data points
RowIterator it = m_inData.iterator();
while (it.hasNext()) {
exec.checkCanceled();
DataRow row = it.next();
DataPoint p = new DataPoint(m_dimension);
for (int j = 0; j < m_dimension; j++) {
p.setElementAt(j, rand.nextDouble());
}
m_points.put(row.getKey(), p);
exec.setProgress("Initialising data points.");
}
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class WorkflowManager method createExecutionResult.
/**
* {@inheritDoc}
*/
@Override
public WorkflowExecutionResult createExecutionResult(final ExecutionMonitor exec) throws CanceledExecutionException {
try (WorkflowLock lock = lock()) {
WorkflowExecutionResult result = new WorkflowExecutionResult(getID());
super.saveExecutionResult(result);
Set<NodeID> bfsSortedSet = m_workflow.createBreadthFirstSortedList(m_workflow.getNodeIDs(), true).keySet();
boolean success = false;
for (NodeID id : bfsSortedSet) {
NodeContainer nc = getNodeContainer(id);
exec.setMessage(nc.getNameWithID());
ExecutionMonitor subExec = exec.createSubProgress(1.0 / bfsSortedSet.size());
NodeContainerExecutionResult subResult = nc.createExecutionResult(subExec);
if (subResult.isSuccess()) {
success = true;
}
result.addNodeExecutionResult(id, subResult);
}
// (important for no-child workflows)
if (success) {
result.setSuccess(true);
}
return result;
}
}
use of org.knime.core.node.ExecutionMonitor in project knime-core by knime.
the class WorkflowManager method load.
/**
* Loads the content of the argument persistor into this node.
*
* @param persistor The persistor containing the node(s) to be loaded as children to this node.
* @param exec For progress/cancellation (currently not supported)
* @param keepNodeMessages Whether to keep the messages that are associated with the nodes in the loaded workflow
* (mostly false but true when remotely computed results are loaded).
* @return A workflow load result, which also contains the loaded node(s).
* @throws IOException If errors reading the "important" files fails due to I/O problems (file not present, e.g.)
* @throws InvalidSettingsException If parsing the "important" files fails.
* @throws CanceledExecutionException If canceled.
* @throws UnsupportedWorkflowVersionException If the version of the workflow is unknown (future version)
*/
public WorkflowLoadResult load(final FileWorkflowPersistor persistor, final ExecutionMonitor exec, final boolean keepNodeMessages) throws IOException, InvalidSettingsException, CanceledExecutionException, UnsupportedWorkflowVersionException {
final ReferencedFile refDirectory = persistor.getMetaPersistor().getNodeContainerDirectory();
final File directory = refDirectory.getFile();
final WorkflowLoadResult result = new WorkflowLoadResult(directory.getName());
load(persistor, result, exec, keepNodeMessages);
final WorkflowManager manager = result.getWorkflowManager();
if (!directory.canWrite()) {
result.addWarning("Workflow directory \"" + directory.getName() + "\" is read-only; saving a modified workflow " + "will not be possible");
manager.m_isWorkflowDirectoryReadonly = true;
}
boolean fixDataLoadProblems = false;
// check for it and silently overwrite the workflow
switch(result.getType()) {
case DataLoadError:
if (!persistor.mustWarnOnDataLoadError() && !manager.m_isWorkflowDirectoryReadonly) {
LOGGER.debug("Workflow was apparently ex/imported without " + "data, silently fixing states and writing changes");
try {
manager.save(directory, new ExecutionMonitor(), true);
fixDataLoadProblems = true;
} catch (Throwable t) {
LOGGER.warn("Failed in an attempt to write workflow to file (workflow was ex/imported " + "without data; could not write the \"corrected\" flow.)", t);
}
}
break;
default:
}
StringBuilder message = new StringBuilder("Loaded workflow from \"");
message.append(directory.getAbsolutePath()).append("\" ");
switch(result.getType()) {
case Ok:
message.append(" with no errors");
break;
case Warning:
message.append(" with warnings");
break;
case DataLoadError:
message.append(" with errors during data load. ");
if (fixDataLoadProblems) {
message.append("Problems were fixed and (silently) saved.");
} else {
message.append("Problems were fixed but not saved!");
}
break;
case Error:
message.append(" with errors");
break;
default:
message.append("with ").append(result.getType());
}
LOGGER.debug(message.toString());
return result;
}
Aggregations