use of org.knime.core.data.filestore.internal.IWriteFileStoreHandler in project knime-core by knime.
the class NativeNodeContainer method mimicRemotePreExecute.
/**
* {@inheritDoc}
*/
@Override
void mimicRemotePreExecute() {
synchronized (m_nodeMutex) {
getProgressMonitor().reset();
switch(getInternalState()) {
case EXECUTED_MARKEDFOREXEC:
case CONFIGURED_MARKEDFOREXEC:
case UNCONFIGURED_MARKEDFOREXEC:
// ideally opening the file store handler would be done in "mimicRemoteExecuting" (consistently to
// performStateTransitionEXECUTING) but remote execution isn't split up that nicely - there is only
// pre-execute and executed
IWriteFileStoreHandler fsh = initFileStore(getParent().getFileStoreHandlerRepository());
m_node.setFileStoreHandler(fsh);
setInternalState(InternalNodeContainerState.PREEXECUTE);
break;
case EXECUTED:
// ignore executed nodes
break;
default:
throwIllegalStateException();
}
}
}
use of org.knime.core.data.filestore.internal.IWriteFileStoreHandler in project knime-core by knime.
the class NativeNodeContainer method performStateTransitionEXECUTING.
/**
* {@inheritDoc}
*/
@Override
void performStateTransitionEXECUTING() {
synchronized (m_nodeMutex) {
switch(getInternalState()) {
case PREEXECUTE:
this.getNode().clearLoopContext();
if (findJobManager() instanceof ThreadNodeExecutionJobManager) {
setInternalState(InternalNodeContainerState.EXECUTING);
} else {
setInternalState(InternalNodeContainerState.EXECUTINGREMOTELY);
}
IWriteFileStoreHandler fsh = initFileStore(getParent().getFileStoreHandlerRepository());
m_node.setFileStoreHandler(fsh);
break;
default:
throwIllegalStateException();
}
}
}
use of org.knime.core.data.filestore.internal.IWriteFileStoreHandler in project knime-core by knime.
the class NativeNodeContainer method initFileStore.
private IWriteFileStoreHandler initFileStore(final WorkflowFileStoreHandlerRepository fileStoreHandlerRepository) {
final FlowObjectStack flowObjectStack = getFlowObjectStack();
FlowLoopContext upstreamFLC = flowObjectStack.peek(FlowLoopContext.class);
if (upstreamFLC == null) {
// if node is contained in subnode check if the subnode is in a loop (see AP-5667)
final FlowSubnodeScopeContext subnodeSC = flowObjectStack.peek(FlowSubnodeScopeContext.class);
if (subnodeSC != null) {
upstreamFLC = subnodeSC.getOuterFlowLoopContext();
}
}
NodeID outerStartNodeID = upstreamFLC == null ? null : upstreamFLC.getHeadNode();
// loop start nodes will put their loop context on the outgoing flow object stack
assert !getID().equals(outerStartNodeID) : "Loop start on incoming flow stack can't be node itself";
final FlowLoopContext innerFLC = getOutgoingFlowObjectStack().peek(FlowLoopContext.class);
NodeID innerStartNodeID = innerFLC == null ? null : innerFLC.getHeadNode();
// if there is a loop context on this node's stack, this node must be the start
assert !(this.isModelCompatibleTo(LoopStartNode.class)) || getID().equals(innerStartNodeID);
IFileStoreHandler oldFSHandler = m_node.getFileStoreHandler();
IWriteFileStoreHandler newFSHandler;
if (innerFLC == null && upstreamFLC == null) {
// node is not a start node and not contained in a loop
if (oldFSHandler instanceof IWriteFileStoreHandler) {
clearFileStoreHandler();
/*assert false : "Node " + getNameWithID() + " must not have file store handler at this point (not a "
+ "loop start and not contained in loop), disposing old handler";*/
}
newFSHandler = new WriteFileStoreHandler(getNameWithID(), UUID.randomUUID());
newFSHandler.addToRepository(fileStoreHandlerRepository);
} else if (innerFLC != null) {
// node is a loop start node
int loopIteration = innerFLC.getIterationIndex();
if (loopIteration == 0) {
if (oldFSHandler instanceof IWriteFileStoreHandler) {
assert false : "Loop Start " + getNameWithID() + " must not have file store handler at this point " + "(no iteration ran), disposing old handler";
clearFileStoreHandler();
}
if (upstreamFLC != null) {
ILoopStartWriteFileStoreHandler upStreamFSHandler = upstreamFLC.getFileStoreHandler();
newFSHandler = new LoopStartReferenceWriteFileStoreHandler(upStreamFSHandler, innerFLC);
} else {
newFSHandler = new LoopStartWritableFileStoreHandler(this, UUID.randomUUID(), innerFLC);
}
newFSHandler.addToRepository(fileStoreHandlerRepository);
innerFLC.setFileStoreHandler((ILoopStartWriteFileStoreHandler) newFSHandler);
} else {
assert oldFSHandler instanceof IWriteFileStoreHandler : "Loop Start " + getNameWithID() + " must have file store handler in iteration " + loopIteration;
newFSHandler = (IWriteFileStoreHandler) oldFSHandler;
// keep the old one
}
} else {
// ordinary node contained in loop
assert innerFLC == null && upstreamFLC != null;
ILoopStartWriteFileStoreHandler upStreamFSHandler = upstreamFLC.getFileStoreHandler();
if (this.isModelCompatibleTo(LoopEndNode.class)) {
if (upstreamFLC.getIterationIndex() > 0) {
newFSHandler = (IWriteFileStoreHandler) oldFSHandler;
} else {
newFSHandler = new LoopEndWriteFileStoreHandler(upStreamFSHandler);
newFSHandler.addToRepository(fileStoreHandlerRepository);
}
} else {
newFSHandler = new ReferenceWriteFileStoreHandler(upStreamFSHandler);
newFSHandler.addToRepository(fileStoreHandlerRepository);
}
}
return newFSHandler;
}
use of org.knime.core.data.filestore.internal.IWriteFileStoreHandler in project knime-core by knime.
the class DataContainer method addRowToTable.
/**
* {@inheritDoc}
*/
@Override
public void addRowToTable(final DataRow row) {
if (!isOpen()) {
throw new IllegalStateException("Cannot add row: container has" + " not been initialized (opened).");
}
if (row == null) {
throw new NullPointerException("Can't add null rows to container");
}
if (m_buffer == null) {
int bufID = createInternalBufferID();
Map<Integer, ContainerTable> globalTableRep = getGlobalTableRepository();
Map<Integer, ContainerTable> localTableRep = getLocalTableRepository();
IWriteFileStoreHandler fileStoreHandler = getFileStoreHandler();
m_buffer = m_bufferCreator.createBuffer(m_spec, m_maxRowsInMemory, bufID, globalTableRep, localTableRep, fileStoreHandler);
if (m_buffer == null) {
throw new NullPointerException("Implementation error, must not return a null buffer.");
}
}
if (m_isSynchronousWrite) {
if (MemoryAlertSystem.getInstance().isMemoryLow()) {
m_buffer.flushBuffer();
}
addRowToTableWrite(row);
} else {
checkAsyncWriteThrowable();
if (MemoryAlertSystem.getInstance().isMemoryLow()) {
offerToAsynchronousQueue(FLUSH_CACHE);
}
offerToAsynchronousQueue(row);
}
m_size += 1;
}
use of org.knime.core.data.filestore.internal.IWriteFileStoreHandler in project knime-core by knime.
the class Buffer method mustBeFlushedPriorSave.
private boolean mustBeFlushedPriorSave(final DataCell cell) {
if (cell instanceof FileStoreCell) {
FileStore fileStore = FileStoreUtil.getFileStore((FileStoreCell) cell);
return ((IWriteFileStoreHandler) m_fileStoreHandler).mustBeFlushedPriorSave(fileStore);
} else if (cell instanceof CollectionDataValue) {
for (DataCell c : (CollectionDataValue) cell) {
if (mustBeFlushedPriorSave(c)) {
return true;
}
}
} else if (cell instanceof BlobWrapperDataCell) {
final BlobWrapperDataCell blobWrapperCell = (BlobWrapperDataCell) cell;
Class<? extends BlobDataCell> blobClass = blobWrapperCell.getBlobClass();
if (CollectionDataValue.class.isAssignableFrom(blobClass)) {
return mustBeFlushedPriorSave(blobWrapperCell.getCell());
}
}
return false;
}
Aggregations