use of io.crate.breaker.BlockBasedRamAccounting in project crate by crate.
the class JobSetup method registerContextPhases.
private void registerContextPhases(Iterable<? extends NodeOperation> nodeOperations, Context context) {
for (NodeOperation nodeOperation : nodeOperations) {
// context for nodeOperations without dependencies can be built immediately (e.g. FetchPhase)
if (nodeOperation.downstreamExecutionPhaseId() == NodeOperation.NO_DOWNSTREAM) {
LOGGER.trace("Building context for nodeOp without downstream: {}", nodeOperation);
createContexts(nodeOperation.executionPhase(), context);
context.opCtx.builtNodeOperations.set(nodeOperation.executionPhase().phaseId());
}
if (ExecutionPhases.hasDirectResponseDownstream(nodeOperation.downstreamNodes())) {
var executionPhase = nodeOperation.executionPhase();
CircuitBreaker breaker = breaker();
int ramAccountingBlockSizeInBytes = BlockBasedRamAccounting.blockSizeInBytes(breaker.getLimit());
var ramAccounting = new BlockBasedRamAccounting(b -> breaker.addEstimateBytesAndMaybeBreak(b, executionPhase.label()), ramAccountingBlockSizeInBytes);
Streamer<?>[] streamers = StreamerVisitor.streamersFromOutputs(executionPhase);
SingleBucketBuilder bucketBuilder = new SingleBucketBuilder(streamers, ramAccounting);
context.directResponseFutures.add(bucketBuilder.completionFuture().whenComplete((res, err) -> ramAccounting.close()));
context.registerBatchConsumer(nodeOperation.downstreamExecutionPhaseId(), bucketBuilder);
}
}
}
use of io.crate.breaker.BlockBasedRamAccounting in project crate by crate.
the class ReaderBucketsTest method test_reader_bucket_accounts_memory_for_added_rows.
@Test
public void test_reader_bucket_accounts_memory_for_added_rows() throws Exception {
var e = SQLExecutor.builder(clusterService).addTable("create table t1 (x text)").build();
var t1 = e.resolveTableInfo("t1");
var x = (Reference) e.asSymbol("x");
var fetchSource = new FetchSource();
fetchSource.addFetchIdColumn(new InputColumn(0, DataTypes.LONG));
fetchSource.addRefToFetch(x);
var fetchRows = FetchRows.create(CoordinatorTxnCtx.systemTransactionContext(), TestingHelpers.createNodeContext(), Map.of(t1.ident(), fetchSource), List.of(new FetchReference(new InputColumn(0, DataTypes.LONG), x), new InputColumn(1, DataTypes.INTEGER)));
var bytesAccounted = new AtomicLong();
var ramAccounting = new BlockBasedRamAccounting(bytes -> bytesAccounted.addAndGet(bytes), 1024);
int readerId = 1;
var readerBuckets = new ReaderBuckets(fetchRows, reader -> fetchSource, new EstimateCellsSize(List.of(DataTypes.LONG, DataTypes.INTEGER)), ramAccounting);
long fetchId = FetchId.encode(readerId, 1);
readerBuckets.add(new RowN(fetchId, 42));
assertThat(bytesAccounted.get(), is(1024L));
assertThat(readerBuckets.ramBytesUsed(), is(40L));
IntObjectHashMap<Bucket> bucketsByReader = new IntObjectHashMap<>();
bucketsByReader.put(readerId, new CollectionBucket(List.<Object[]>of(new Object[] { "I eat memory for breakfast" })));
IntHashSet readerIds = new IntHashSet(2);
readerIds.add(readerId);
readerBuckets.generateToFetch(readerIds);
try (var outputRows = readerBuckets.getOutputRows(List.of(bucketsByReader))) {
assertThat(bytesAccounted.get(), is(1024L));
assertThat(readerBuckets.ramBytesUsed(), is(136L));
}
assertThat("After outputRows are closed the readerBuckets are released", readerBuckets.ramBytesUsed(), is(0L));
}
use of io.crate.breaker.BlockBasedRamAccounting in project crate by crate.
the class NodeFetchOperation method doFetch.
private CompletableFuture<? extends IntObjectMap<StreamBucket>> doFetch(FetchTask fetchTask, IntObjectMap<IntArrayList> toFetch) throws Exception {
HashMap<RelationName, TableFetchInfo> tableFetchInfos = getTableFetchInfos(fetchTask);
// RamAccounting is per doFetch call instead of per FetchTask/fetchPhase
// To be able to free up the memory count when the operation is complete
final var ramAccounting = ConcurrentRamAccounting.forCircuitBreaker("fetch-" + fetchTask.id(), circuitBreaker);
ArrayList<Supplier<StreamBucket>> collectors = new ArrayList<>(toFetch.size());
for (IntObjectCursor<IntArrayList> toFetchCursor : toFetch) {
final int readerId = toFetchCursor.key;
final IntArrayList docIds = toFetchCursor.value;
RelationName ident = fetchTask.tableIdent(readerId);
final TableFetchInfo tfi = tableFetchInfos.get(ident);
assert tfi != null : "tfi must not be null";
var collector = tfi.createCollector(readerId, new BlockBasedRamAccounting(ramAccounting::addBytes, BlockBasedRamAccounting.MAX_BLOCK_SIZE_IN_BYTES));
collectors.add(() -> collector.collect(docIds));
}
return ThreadPools.runWithAvailableThreads(executor, ThreadPools.numIdleThreads(executor, numProcessors), collectors).thenApply(buckets -> {
var toFetchIt = toFetch.iterator();
assert toFetch.size() == buckets.size() : "Must have a bucket per reader and they must be in the same order";
IntObjectHashMap<StreamBucket> bucketByReader = new IntObjectHashMap<>(toFetch.size());
for (var bucket : buckets) {
assert toFetchIt.hasNext() : "toFetchIt must have an element if there is one in buckets";
int readerId = toFetchIt.next().key;
bucketByReader.put(readerId, bucket);
}
return bucketByReader;
}).whenComplete((result, err) -> ramAccounting.close());
}
use of io.crate.breaker.BlockBasedRamAccounting in project crate by crate.
the class ReservoirSampler method getSamples.
public Samples getSamples(RelationName relationName, List<Reference> columns, int maxSamples) {
TableInfo table;
try {
table = schemas.getTableInfo(relationName);
} catch (RelationUnknown e) {
return Samples.EMPTY;
}
if (!(table instanceof DocTableInfo)) {
return Samples.EMPTY;
}
DocTableInfo docTable = (DocTableInfo) table;
Random random = Randomness.get();
Metadata metadata = clusterService.state().metadata();
CoordinatorTxnCtx coordinatorTxnCtx = CoordinatorTxnCtx.systemTransactionContext();
List<Streamer> streamers = Arrays.asList(Symbols.streamerArray(columns));
List<Engine.Searcher> searchersToRelease = new ArrayList<>();
CircuitBreaker breaker = circuitBreakerService.getBreaker(HierarchyCircuitBreakerService.QUERY);
RamAccounting ramAccounting = new BlockBasedRamAccounting(b -> breaker.addEstimateBytesAndMaybeBreak(b, "Reservoir-sampling"), MAX_BLOCK_SIZE_IN_BYTES);
try {
return getSamples(columns, maxSamples, docTable, random, metadata, coordinatorTxnCtx, streamers, searchersToRelease, ramAccounting);
} finally {
ramAccounting.close();
for (Engine.Searcher searcher : searchersToRelease) {
searcher.close();
}
}
}
use of io.crate.breaker.BlockBasedRamAccounting in project crate by crate.
the class SqlHttpHandler method executeSimpleRequest.
private CompletableFuture<XContentBuilder> executeSimpleRequest(Session session, String stmt, List<Object> args, boolean includeTypes) throws IOException {
long startTimeInNs = System.nanoTime();
session.parse(UNNAMED, stmt, emptyList());
session.bind(UNNAMED, UNNAMED, args == null ? emptyList() : args, null);
DescribeResult description = session.describe('P', UNNAMED);
List<Symbol> resultFields = description.getFields();
ResultReceiver<XContentBuilder> resultReceiver;
if (resultFields == null) {
resultReceiver = new RestRowCountReceiver(JsonXContent.contentBuilder(), startTimeInNs, includeTypes);
} else {
CircuitBreaker breaker = circuitBreakerProvider.apply(HierarchyCircuitBreakerService.QUERY);
RamAccounting ramAccounting = new BlockBasedRamAccounting(b -> breaker.addEstimateBytesAndMaybeBreak(b, "http-result"), MAX_BLOCK_SIZE_IN_BYTES);
resultReceiver = new RestResultSetReceiver(JsonXContent.contentBuilder(), resultFields, startTimeInNs, new RowAccountingWithEstimators(Symbols.typeView(resultFields), ramAccounting), includeTypes);
resultReceiver.completionFuture().whenComplete((result, error) -> ramAccounting.close());
}
session.execute(UNNAMED, 0, resultReceiver);
return session.sync().thenCompose(ignored -> resultReceiver.completionFuture());
}
Aggregations