use of org.knime.core.data.container.BufferedContainerTable in project knime-core by knime.
the class Bug5405_WorkflowLocationAfterSaveAs method testExecAfterLoad.
/**
* Basic tests that execution works and the NC dir and workflow context folder are the same.
*/
@Test
public void testExecAfterLoad() throws Exception {
WorkflowManager manager = getManager();
ContainerTable fileReaderTable = getExecuteFileReaderTable();
// tables are not extracted to workflow temp space after load (lazy init)
Assert.assertFalse(((BufferedContainerTable) fileReaderTable).isOpen());
checkState(m_fileReader2, InternalNodeContainerState.EXECUTED);
checkStateOfMany(InternalNodeContainerState.CONFIGURED, m_fileReader1, m_diffChecker3);
Assert.assertNotNull(manager.getContext());
Assert.assertEquals(manager.getNodeContainerDirectory().getFile(), manager.getContext().getCurrentLocation());
executeAndWait(m_diffChecker3);
Assert.assertTrue(((BufferedContainerTable) fileReaderTable).isOpen());
checkStateOfMany(InternalNodeContainerState.EXECUTED, m_fileReader2, m_fileReader1, m_diffChecker3);
}
use of org.knime.core.data.container.BufferedContainerTable in project knime-core by knime.
the class BufferedDataTable method save.
/**
* Saves the table to a directory and writes some settings to the argument
* NodeSettingsWO object. It will also write the reference table in case
* this node is responsible for it (i.e. this node created the reference
* table).
* @param dir The directory to write to.
* @param savedTableIDs Ids of tables that were previously saved, used to identify
* tables that are referenced by the same nodes multiple times.
* @param exec The progress monitor for cancellation.
* @throws IOException If writing fails.
* @throws CanceledExecutionException If canceled.
*/
void save(final File dir, final Set<Integer> savedTableIDs, final ExecutionMonitor exec) throws IOException, CanceledExecutionException {
NodeSettings s = new NodeSettings(CFG_TABLE_META);
Integer bufferedTableID = getBufferedTableId();
s.addInt(CFG_TABLE_ID, bufferedTableID);
File outFile = new File(dir, TABLE_FILE);
if (!savedTableIDs.add(bufferedTableID)) {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_REFERENCE_IN_SAME_NODE);
} else if (m_delegate instanceof BufferedContainerTable) {
final TableStoreFormat format = ((BufferedContainerTable) m_delegate).getTableStoreFormat();
if (!DefaultTableStoreFormat.class.equals(format.getClass())) {
// use different identifier to cause old versions of KNIME to fail loading newer workflows
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_CONTAINER_CUSTOM);
s.addString(CFG_TABLE_CONTAINER_FORMAT, format.getClass().getName());
s.addString(CFG_TABLE_CONTAINER_FORMAT_VERSION, format.getVersion());
} else {
final DefaultTableStoreFormat defaultFormat = (DefaultTableStoreFormat) format;
if (!Arrays.asList(NONE, GZIP).contains(defaultFormat.getCompressionFormat())) {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_CONTAINER_COMPRESS);
s.addString(CFG_TABLE_COMPRESSION_FORMAT, defaultFormat.getCompressionFormat().toString());
} else {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_CONTAINER);
}
}
m_delegate.saveToFile(outFile, s, exec);
} else {
if (m_delegate instanceof RearrangeColumnsTable) {
final ContainerTable containerTable = ((RearrangeColumnsTable) m_delegate).getAppendTable();
if (containerTable != null && containerTable instanceof BufferedContainerTable) {
final BufferedContainerTable appendTable = (BufferedContainerTable) containerTable;
final TableStoreFormat format = appendTable.getTableStoreFormat();
if (!DefaultTableStoreFormat.class.equals(format.getClass())) {
// use different identifier to cause old versions of KNIME to fail loading newer workflows
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_REARRANGE_COLUMN_CUSTOM);
s.addString(CFG_TABLE_CONTAINER_FORMAT, appendTable.getTableStoreFormat().getClass().getName());
s.addString(CFG_TABLE_CONTAINER_FORMAT_VERSION, appendTable.getTableStoreFormat().getVersion());
} else {
final DefaultTableStoreFormat defaultFormat = (DefaultTableStoreFormat) format;
if (!Arrays.asList(NONE, GZIP).contains(defaultFormat.getCompressionFormat())) {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_REARRANGE_COLUMN_COMPRESS);
s.addString(CFG_TABLE_COMPRESSION_FORMAT, defaultFormat.getCompressionFormat().toString());
} else {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_REARRANGE_COLUMN);
}
}
} else {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_REARRANGE_COLUMN);
}
} else if (m_delegate instanceof TableSpecReplacerTable) {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_NEW_SPEC);
} else if (m_delegate instanceof WrappedTable) {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_WRAPPED);
} else if (m_delegate instanceof JoinedTable) {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_JOINED);
} else if (m_delegate instanceof VoidTable) {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_VOID);
} else if (m_delegate instanceof ConcatenateTable) {
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_CONCATENATE);
} else {
assert m_delegate instanceof ExtensionTable;
s.addString(CFG_TABLE_TYPE, TABLE_TYPE_EXTENSION);
}
BufferedDataTable[] references = m_delegate.getReferenceTables();
ArrayList<String> referenceDirs = new ArrayList<String>();
for (BufferedDataTable reference : references) {
if (reference.getOwner() == getOwner() && !savedTableIDs.contains(reference.getBufferedTableId())) {
int index = referenceDirs.size();
String dirName = "r" + index;
File subDir = new File(dir, dirName);
if (!subDir.mkdir() && !subDir.isDirectory()) {
throw new IOException("Could not create directory " + subDir.getAbsolutePath());
}
if (!subDir.canWrite()) {
throw new IOException("Unable to write directory " + subDir.getAbsolutePath());
}
referenceDirs.add(dirName);
reference.save(subDir, savedTableIDs, exec);
}
}
s.addStringArray(CFG_TABLE_REFERENCE, referenceDirs.toArray(new String[referenceDirs.size()]));
m_delegate.saveToFile(outFile, s, exec);
}
// only write the data file to the settings if it has been created
if (outFile.exists()) {
s.addString(CFG_TABLE_FILE_NAME, TABLE_FILE);
} else {
s.addString(CFG_TABLE_FILE_NAME, null);
}
saveSpec(getDataTableSpec(), dir);
File dataXML = new File(dir, TABLE_DESCRIPTION_FILE);
try (OutputStream out = new BufferedOutputStream(new FileOutputStream(dataXML))) {
s.saveToXML(out);
}
}
use of org.knime.core.data.container.BufferedContainerTable in project knime-core by knime.
the class AbstractTableStoreReader method createBlobWrapperCell.
/**
* Method for deserializig blob cells.
*
* @param address the address of the blob
* @param type the type of the blob cell
* @return the deserialized blob cell
* @throws IOException if something goes wrong during deserialization
*/
public final BlobWrapperDataCell createBlobWrapperCell(final BlobAddress address, final CellClassInfo type) throws IOException {
Buffer blobBuffer = getBuffer();
if (address.getBufferID() != blobBuffer.getBufferID()) {
Optional<ContainerTable> cnTbl = blobBuffer.getDataRepository().getTable(address.getBufferID());
if (!cnTbl.isPresent()) {
throw new IOException("Unable to retrieve table that owns the blob cell");
}
blobBuffer = ((BufferedContainerTable) cnTbl.get()).getBuffer();
}
return new BlobWrapperDataCell(blobBuffer, address, type);
}
use of org.knime.core.data.container.BufferedContainerTable in project knime-core by knime.
the class Bug5405_WorkflowLocationAfterSaveAs method testExecAfterSaveAs.
/**
* Loads the workflow, saves it to new location, then executes.
*/
@Test
public void testExecAfterSaveAs() throws Exception {
WorkflowManager manager = getManager();
ContainerTable fileReaderTable = getExecuteFileReaderTable();
// tables are not extracted to workflow temp space after load (lazy init)
Assert.assertFalse(((BufferedContainerTable) fileReaderTable).isOpen());
Assert.assertNotNull(manager.getContext());
Assert.assertEquals(manager.getNodeContainerDirectory().getFile(), manager.getContext().getCurrentLocation());
File saveAsFolder = FileUtil.createTempDir(getClass().getName());
saveAsFolder.delete();
WorkflowContext.Factory fac = manager.getContext().createCopy().setCurrentLocation(saveAsFolder);
manager.saveAs(fac.createContext(), new ExecutionMonitor());
Assert.assertEquals(saveAsFolder, manager.getNodeContainerDirectory().getFile());
Assert.assertEquals(saveAsFolder, manager.getContext().getCurrentLocation());
// if this fails (= assertion thrown) this means the workflow format has changed and all nodes are dirty
// when save-as is called.
Assert.assertFalse(((BufferedContainerTable) fileReaderTable).isOpen());
executeAndWait(m_diffChecker3);
checkStateOfMany(InternalNodeContainerState.EXECUTED, m_fileReader2, m_fileReader1, m_diffChecker3);
Assert.assertTrue(((BufferedContainerTable) fileReaderTable).isOpen());
}
Aggregations