use of org.knime.core.data.container.filter.FilterDelegateRowIterator in project knime-core by knime.
the class AbstractTableStoreReader method iteratorWithFilter.
/**
* Provides a {@link TableStoreCloseableRowIterator} that is filtered according to a given {@link TableFilter} and
* can be iterated over. During iteration, a given {@link ExecutionMonitor} will update its progress.
*
* @param filter the filter to be applied
* @param exec the execution monitor that shall be updated with progress or null if no progress updates are desired
* @return a filtered iterator
* @since 4.0
*/
@SuppressWarnings("resource")
public TableStoreCloseableRowIterator iteratorWithFilter(final TableFilter filter, final ExecutionMonitor exec) {
final TableStoreCloseableRowIterator delegate = iterator();
final long size = getBuffer() == null ? Long.MAX_VALUE : getBuffer().size();
final FilterDelegateRowIterator filterDelegate = new FilterDelegateRowIterator(delegate, filter, size, exec);
return new TableStoreCloseableRowIterator() {
@Override
public DataRow next() {
return filterDelegate.next();
}
@Override
public boolean hasNext() {
return filterDelegate.hasNext();
}
@Override
public void setBuffer(final Buffer buffer) {
super.setBuffer(buffer);
delegate.setBuffer(buffer);
}
@Override
public boolean performClose() throws IOException {
return delegate.performClose();
}
};
}
use of org.knime.core.data.container.filter.FilterDelegateRowIterator in project knime-core by knime.
the class Buffer method iteratorWithFilter.
// This method might return a FilterDelegateRowIterator that wraps a CloseableRowIterator. This leads to a warning
// about the wrapped iterator potentially not being closed. It is safe to disregard this warning though, since
// the FilterDelegateRowIterator takes care of closing the wrapped iterator.
@SuppressWarnings("resource")
final synchronized CloseableRowIterator iteratorWithFilter(final TableFilter filter, final ExecutionMonitor exec) {
if (m_isClearedLock.booleanValue()) {
throw new IllegalStateException("Cannot iterate over table: buffer has been cleared.");
}
final List<BlobSupportDataRow> list = obtainListFromCacheOrBackIntoMemoryIterator();
if (list == null) {
// Case 1: We don't have have the table in memory and want to iterate it back into memory.
if (m_useBackIntoMemoryIterator) {
m_useBackIntoMemoryIterator = false;
final TableStoreCloseableRowIterator tableStoreIt = m_outputReader.iterator();
tableStoreIt.setBuffer(this);
m_nrOpenInputStreams.incrementAndGet();
final BackIntoMemoryIterator backIntoMemoryIt = new BackIntoMemoryIterator(tableStoreIt, size());
m_openResources.register(tableStoreIt, backIntoMemoryIt);
m_backIntoMemoryIteratorRef = new WeakReference<>(backIntoMemoryIt);
final FromListIterator listIt = new FromListIterator(backIntoMemoryIt.getList(), exec);
listIt.setBackIntoMemoryIterator(backIntoMemoryIt);
return filter == null ? listIt : new FilterDelegateRowIterator(listIt, filter, size(), exec);
}
// Case 2: We don't have have the table in memory.
final TableStoreCloseableRowIterator tableStoreIt = filter == null ? m_outputReader.iterator() : m_outputReader.iteratorWithFilter(filter, exec);
// register the table store iterator with this buffer
tableStoreIt.setBuffer(this);
m_nrOpenInputStreams.incrementAndGet();
m_openResources.register(tableStoreIt, tableStoreIt);
return tableStoreIt;
} else {
final BackIntoMemoryIterator backIntoMemoryIt = m_backIntoMemoryIteratorRef != null ? m_backIntoMemoryIteratorRef.get() : null;
if (filter != null && size() > 0) {
// Case 3: We have the full table in memory and want to apply a filter.
if (backIntoMemoryIt == null) {
// We never store more than 2^31 rows in memory, therefore it's safe to cast to int.
final int fromIndex = (int) Math.min(Integer.MAX_VALUE, filter.getFromRowIndex().orElse(0l));
final int toIndex = (int) Math.min(Integer.MAX_VALUE, filter.getToRowIndex().orElse(size() - 1));
final FromListRangeIterator rangeIterator = new FromListRangeIterator(list, fromIndex, toIndex, exec);
/**
* In a future world (a world of predicates, see AP-11805), the filter might be configured to keep
* only rows with an index between 1000 and 2000 and a value greater than 42 in column 13. The
* rangeIterator will take care of only returning rows with an index between 1000 and 2000. In
* fact, it will return the row with index 1000 as its first row. Therefore, the
* FilterDelegateRowIterator that handles the column-13-greater-than-42-predicate, has to be
* provided with a copied filter with adjusted from- and toRowIndices.
*/
final TableFilter offsetFilter = //
new TableFilter.Builder(filter).withFromRowIndex(//
0).withToRowIndex(//
toIndex - fromIndex).build();
return new FilterDelegateRowIterator(rangeIterator, offsetFilter, size(), exec);
} else // Case 4: We are currently iterating the table back into memory and want to apply a filter.
{
final FromListIterator listIt = new FromListIterator(list, exec);
listIt.setBackIntoMemoryIterator(backIntoMemoryIt);
return new FilterDelegateRowIterator(listIt, filter, size(), exec);
}
}
// Case 5: We have at least parts of the table in memory and don't want to apply a filter.
final FromListIterator listIt = new FromListIterator(list, exec);
if (backIntoMemoryIt != null) {
listIt.setBackIntoMemoryIterator(backIntoMemoryIt);
}
return listIt;
}
}
Aggregations