use of org.knime.core.data.container.ContainerTable in project knime-core by knime.
the class Node method cleanOutPorts.
/**
* Sets output objects to null.
* @param isLoopRestart If true, does not clear tables that are part
* of the internally held tables (loop start nodes implements the
* {@link BufferedDataTableHolder} interface). This can only be true
* between two loop iterations.
* @noreference This method is not intended to be referenced by clients.
*/
public void cleanOutPorts(final boolean isLoopRestart) {
if (isLoopRestart) {
// just as an assertion
FlowObjectStack inStack = getFlowObjectStack();
FlowLoopContext flc = inStack.peek(FlowLoopContext.class);
if (flc != null && flc.isInactiveScope()) {
LOGGER.coding("Encountered an inactive FlowLoopContext in a loop restart.");
// continue with historically "correct" solution:
flc = inStack.peekScopeContext(FlowLoopContext.class, false);
}
if (flc == null && !this.isModelCompatibleTo(LoopStartNode.class)) {
LOGGER.coding("Encountered a loop restart action but there is" + " no loop context on the flow object stack (node " + getName() + ")");
}
}
LOGGER.debug("clean output ports.");
Set<BufferedDataTable> disposableTables = new LinkedHashSet<BufferedDataTable>();
for (int i = 0; i < m_outputs.length; i++) {
PortObject portObject = m_outputs[i].object;
if (portObject instanceof BufferedDataTable) {
final BufferedDataTable table = (BufferedDataTable) portObject;
table.collectTableAndReferencesOwnedBy(this, disposableTables);
}
m_outputs[i].spec = null;
m_outputs[i].object = null;
m_outputs[i].summary = null;
}
if (m_internalHeldPortObjects != null) {
Set<BufferedDataTable> internalTableSet = collectTableAndReferences(m_internalHeldPortObjects);
// internal table reference that must not be cleared).
if (isLoopRestart) {
disposableTables.removeAll(internalTableSet);
} else {
disposableTables.addAll(internalTableSet);
m_internalHeldPortObjects = null;
}
}
for (BufferedDataTable disposable : disposableTables) {
disposable.clearSingle(this);
}
// clear temporary tables that have been created during execute
for (ContainerTable t : m_localTempTables) {
t.clear();
}
m_localTempTables.clear();
}
use of org.knime.core.data.container.ContainerTable in project knime-core by knime.
the class BufferedDataContainer method getTable.
/**
* Returns the content of this container in a BufferedDataTable. The result
* can be returned, e.g. in a NodeModel's execute method.
* {@inheritDoc}
*/
@Override
public BufferedDataTable getTable() {
if (m_resultTable == null) {
ContainerTable buffer = getBufferedTable();
m_resultTable = new BufferedDataTable(buffer, buffer.getBufferID());
m_resultTable.setOwnerRecursively(m_node);
}
return m_resultTable;
}
use of org.knime.core.data.container.ContainerTable in project knime-core by knime.
the class TableContentModel method setDataTableIntern.
/**
* Sets new data for this table. The table argument may be
* <code>null</code> to indicate invalid data (nothing displayed).
*/
private void setDataTableIntern(final DataTable originalData, final DataTable data, final TableSortOrder sortOrder) {
assert SwingUtilities.isEventDispatchThread();
if (m_data == data) {
// do not start event storm
return;
}
boolean clearOldTable = m_tableSortOrder != null;
if (m_tableSorterWorker != null) {
m_tableSorterWorker.cancel(true);
m_tableSorterWorker = null;
}
m_tableSortOrder = sortOrder;
cancelRowCountingInBackground();
int oldColCount = getColumnCount();
int newColCount = data != null ? data.getDataTableSpec().getNumColumns() : 0;
int oldRowCount = getRowCount();
DataTable oldData = m_data;
m_originalUnsortedTable = originalData;
m_data = data;
m_cachedRows = null;
m_hilitSet = null;
if (m_iterator instanceof CloseableRowIterator) {
((CloseableRowIterator) m_iterator).close();
}
m_iterator = null;
m_rowCountOfInterestInIterator = 0;
m_rowCountOfInterest = 0;
m_maxRowCount = 0;
cancelRowCountingInBackground();
m_isMaxRowCountFinal = true;
m_isRowCountOfInterestFinal = true;
boolean structureChanged = oldColCount != newColCount;
if (oldColCount == newColCount) {
if (oldRowCount > 0) {
fireTableRowsDeleted(0, oldRowCount - 1);
}
if (newColCount > 0) {
structureChanged = !data.getDataTableSpec().equalStructure(oldData.getDataTableSpec());
}
}
if (data != null) {
// new data available, release old stuff
// assume that there are rows, may change in cacheNextRow() below
m_isMaxRowCountFinal = false;
m_isRowCountOfInterestFinal = false;
final long rowCountFromTable;
if (data instanceof BufferedDataTable) {
rowCountFromTable = ((BufferedDataTable) data).size();
} else if (data instanceof ContainerTable) {
rowCountFromTable = ((ContainerTable) data).size();
} else {
// unknown
rowCountFromTable = -1;
}
if (rowCountFromTable >= 0) {
m_isMaxRowCountFinal = true;
if (rowCountFromTable > Integer.MAX_VALUE) {
NodeLogger.getLogger(getClass()).warn("Table view will show only the first " + Integer.MAX_VALUE + " rows of " + rowCountFromTable + ".");
m_maxRowCount = Integer.MAX_VALUE;
} else {
m_maxRowCount = (int) rowCountFromTable;
}
if (!m_tableFilter.performsFiltering()) {
m_rowCountOfInterest = m_maxRowCount;
m_isRowCountOfInterestFinal = true;
}
}
int cacheSize = getCacheSize();
m_cachedRows = new DataRow[cacheSize];
m_hilitSet = new BitSet(cacheSize);
// will instantiate a new iterator.
clearCache();
// will also set m_isRowCountOfInterestFinal etc. accordingly
cacheNextRow();
}
if (structureChanged) {
// notify listeners
fireTableStructureChanged();
} else {
int newRowCount = getRowCount();
if (newRowCount > 0) {
fireTableRowsInserted(0, newRowCount);
}
}
m_propertySupport.firePropertyChange(PROPERTY_DATA, oldData, m_data);
if (clearOldTable && oldData instanceof ContainerTable) {
((ContainerTable) oldData).clear();
}
}
use of org.knime.core.data.container.ContainerTable in project knime-core by knime.
the class Bug5405_WorkflowLocationAfterSaveAs method testExecAfterLoad.
/**
* Basic tests that execution works and the NC dir and workflow context folder are the same.
*/
@Test
public void testExecAfterLoad() throws Exception {
WorkflowManager manager = getManager();
ContainerTable fileReaderTable = getExecuteFileReaderTable();
// tables are not extracted to workflow temp space after load (lazy init)
Assert.assertFalse(fileReaderTable.isOpen());
checkState(m_fileReader2, InternalNodeContainerState.EXECUTED);
checkStateOfMany(InternalNodeContainerState.CONFIGURED, m_fileReader1, m_diffChecker3);
Assert.assertNotNull(manager.getContext());
Assert.assertEquals(manager.getNodeContainerDirectory().getFile(), manager.getContext().getCurrentLocation());
executeAndWait(m_diffChecker3);
Assert.assertTrue(fileReaderTable.isOpen());
checkStateOfMany(InternalNodeContainerState.EXECUTED, m_fileReader2, m_fileReader1, m_diffChecker3);
}
use of org.knime.core.data.container.ContainerTable in project knime-core by knime.
the class PolyRegLearnerNodeModel method loadInternals.
/**
* {@inheritDoc}
*/
@Override
protected void loadInternals(final File nodeInternDir, final ExecutionMonitor exec) throws IOException, CanceledExecutionException {
File f = new File(nodeInternDir, "data.zip");
final DataArray rowContainer;
if (f.exists()) {
ContainerTable t = DataContainer.readFromZip(f);
int rowCount = t.getRowCount();
rowContainer = new DefaultDataArray(t, 1, rowCount, exec);
} else {
throw new FileNotFoundException("Internals do not exist");
}
f = new File(nodeInternDir, "internals.xml");
if (f.exists()) {
NodeSettingsRO internals = NodeSettings.loadFromXML(new BufferedInputStream(new FileInputStream(f)));
try {
double[] betas = internals.getDoubleArray("betas");
String[] columnNames = internals.getStringArray("columnNames");
double squaredError = internals.getDouble("squaredError");
double adjustedR2 = internals.getDouble("adjustedSquaredError", Double.NaN);
double[] meanValues = internals.getDoubleArray("meanValues");
double[] emptyArray = new double[betas.length];
Arrays.fill(emptyArray, Double.NaN);
double[] stdErrs = internals.getDoubleArray("stdErrors", emptyArray);
double[] tValues = internals.getDoubleArray("tValues", emptyArray);
double[] pValues = internals.getDoubleArray("pValues", emptyArray);
m_viewData = new PolyRegViewData(meanValues, betas, stdErrs, tValues, pValues, squaredError, adjustedR2, columnNames, m_settings.getDegree(), m_settings.getTargetColumn(), rowContainer);
} catch (InvalidSettingsException ex) {
throw new IOException("Old or corrupt internals", ex);
}
} else {
throw new FileNotFoundException("Internals do not exist");
}
}
Aggregations