use of io.crate.execution.engine.distribution.StreamBucket in project crate by crate.
the class FetchCollector method collect.
public StreamBucket collect(IntArrayList docIds) {
boolean collectSequential = isSequential(docIds);
StreamBucket.Builder builder = new StreamBucket.Builder(streamers, ramAccounting);
try (var borrowed = fetchTask.searcher(readerId)) {
var searcher = borrowed.item();
List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves();
var readerContexts = new IntObjectHashMap<ReaderContext>(leaves.size());
for (var cursor : docIds) {
int docId = cursor.value;
int readerIndex = readerIndex(docId, leaves);
LeafReaderContext subReaderContext = leaves.get(readerIndex);
try {
var readerContext = readerContexts.get(readerIndex);
if (readerContext == null) {
if (collectSequential) {
var storedFieldReader = sequentialStoredFieldReader(subReaderContext);
readerContext = new ReaderContext(subReaderContext, storedFieldReader::visitDocument);
} else {
readerContext = new ReaderContext(subReaderContext);
}
readerContexts.put(readerIndex, readerContext);
}
setNextDocId(readerContext, docId - subReaderContext.docBase);
} catch (IOException e) {
Exceptions.rethrowRuntimeException(e);
}
builder.add(row);
}
}
return builder.build();
}
use of io.crate.execution.engine.distribution.StreamBucket in project crate by crate.
the class NodeFetchOperation method doFetch.
private CompletableFuture<? extends IntObjectMap<StreamBucket>> doFetch(FetchTask fetchTask, IntObjectMap<IntArrayList> toFetch) throws Exception {
HashMap<RelationName, TableFetchInfo> tableFetchInfos = getTableFetchInfos(fetchTask);
// RamAccounting is per doFetch call instead of per FetchTask/fetchPhase
// To be able to free up the memory count when the operation is complete
final var ramAccounting = ConcurrentRamAccounting.forCircuitBreaker("fetch-" + fetchTask.id(), circuitBreaker);
ArrayList<Supplier<StreamBucket>> collectors = new ArrayList<>(toFetch.size());
for (IntObjectCursor<IntArrayList> toFetchCursor : toFetch) {
final int readerId = toFetchCursor.key;
final IntArrayList docIds = toFetchCursor.value;
RelationName ident = fetchTask.tableIdent(readerId);
final TableFetchInfo tfi = tableFetchInfos.get(ident);
assert tfi != null : "tfi must not be null";
var collector = tfi.createCollector(readerId, new BlockBasedRamAccounting(ramAccounting::addBytes, BlockBasedRamAccounting.MAX_BLOCK_SIZE_IN_BYTES));
collectors.add(() -> collector.collect(docIds));
}
return ThreadPools.runWithAvailableThreads(executor, ThreadPools.numIdleThreads(executor, numProcessors), collectors).thenApply(buckets -> {
var toFetchIt = toFetch.iterator();
assert toFetch.size() == buckets.size() : "Must have a bucket per reader and they must be in the same order";
IntObjectHashMap<StreamBucket> bucketByReader = new IntObjectHashMap<>(toFetch.size());
for (var bucket : buckets) {
assert toFetchIt.hasNext() : "toFetchIt must have an element if there is one in buckets";
int readerId = toFetchIt.next().key;
bucketByReader.put(readerId, bucket);
}
return bucketByReader;
}).whenComplete((result, err) -> ramAccounting.close());
}
use of io.crate.execution.engine.distribution.StreamBucket in project crate by crate.
the class NodeFetchOperation method fetch.
public CompletableFuture<? extends IntObjectMap<StreamBucket>> fetch(UUID jobId, int phaseId, @Nullable IntObjectMap<IntArrayList> docIdsToFetch, boolean closeTaskOnFinish) {
if (docIdsToFetch == null) {
if (closeTaskOnFinish) {
tryCloseTask(jobId, phaseId);
}
jobsLogs.operationStarted(phaseId, jobId, "fetch", () -> -1);
jobsLogs.operationFinished(phaseId, jobId, null);
return CompletableFuture.completedFuture(new IntObjectHashMap<>(0));
}
RootTask context = tasksService.getTask(jobId);
FetchTask fetchTask = context.getTask(phaseId);
jobsLogs.operationStarted(phaseId, jobId, "fetch", () -> -1);
BiConsumer<? super IntObjectMap<StreamBucket>, ? super Throwable> whenComplete = (res, err) -> {
if (closeTaskOnFinish) {
if (err == null) {
fetchTask.close();
} else {
fetchTask.kill(err);
}
}
if (err == null) {
jobsLogs.operationFinished(phaseId, jobId, null);
} else {
jobsLogs.operationFinished(phaseId, jobId, SQLExceptions.messageOf(err));
}
};
try {
return doFetch(fetchTask, docIdsToFetch).whenComplete(whenComplete);
} catch (Throwable t) {
whenComplete.accept(null, t);
return CompletableFuture.failedFuture(t);
}
}
Aggregations