Search in sources :

Example 1 with NodeLimits

use of io.crate.execution.jobs.NodeLimits in project crate by crate.

the class ProjectionToProjectorVisitorTest method prepare.

@Before
public void prepare() {
    nodeCtx = createNodeContext();
    MockitoAnnotations.initMocks(this);
    visitor = new ProjectionToProjectorVisitor(clusterService, new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoneCircuitBreakerService(), nodeCtx, THREAD_POOL, Settings.EMPTY, mock(TransportActionProvider.class, Answers.RETURNS_DEEP_STUBS), new InputFactory(nodeCtx), EvaluatingNormalizer.functionOnlyNormalizer(nodeCtx), t -> null, t -> null);
    memoryManager = new OnHeapMemoryManager(usedBytes -> {
    });
    avgSignature = Signature.aggregate("avg", DataTypes.INTEGER.getTypeSignature(), DataTypes.DOUBLE.getTypeSignature());
}
Also used : TransactionContext(io.crate.metadata.TransactionContext) Arrays(java.util.Arrays) EvaluatingNormalizer(io.crate.expression.eval.EvaluatingNormalizer) CrateDummyClusterServiceUnitTest(io.crate.test.integration.CrateDummyClusterServiceUnitTest) NodeLimits(io.crate.execution.jobs.NodeLimits) Projector(io.crate.data.Projector) MockitoAnnotations(org.mockito.MockitoAnnotations) Settings(org.elasticsearch.common.settings.Settings) Is.is(org.hamcrest.core.Is.is) TestingHelpers.createNodeContext(io.crate.testing.TestingHelpers.createNodeContext) Bucket(io.crate.data.Bucket) NodeContext(io.crate.metadata.NodeContext) Aggregation(io.crate.expression.symbol.Aggregation) InMemoryBatchIterator(io.crate.data.InMemoryBatchIterator) AggregationProjection(io.crate.execution.dsl.projection.AggregationProjection) SortingProjector(io.crate.execution.engine.sort.SortingProjector) UUID(java.util.UUID) EqOperator(io.crate.expression.operator.EqOperator) Signature(io.crate.metadata.functions.Signature) RandomizedTest.$(com.carrotsearch.randomizedtesting.RandomizedTest.$) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) List(java.util.List) Row(io.crate.data.Row) Matchers.contains(org.hamcrest.Matchers.contains) Symbol(io.crate.expression.symbol.Symbol) AggregateMode(io.crate.expression.symbol.AggregateMode) DataTypes(io.crate.types.DataTypes) TopNProjection(io.crate.execution.dsl.projection.TopNProjection) CountAggregation(io.crate.execution.engine.aggregation.impl.CountAggregation) TestingRowConsumer(io.crate.testing.TestingRowConsumer) Mockito.mock(org.mockito.Mockito.mock) CoordinatorTxnCtx(io.crate.metadata.CoordinatorTxnCtx) OrderedTopNProjection(io.crate.execution.dsl.projection.OrderedTopNProjection) InputColumn(io.crate.expression.symbol.InputColumn) SENTINEL(io.crate.data.SentinelRow.SENTINEL) BatchIterator(io.crate.data.BatchIterator) SearchPath(io.crate.metadata.SearchPath) ArrayList(java.util.ArrayList) NoneCircuitBreakerService(org.elasticsearch.indices.breaker.NoneCircuitBreakerService) SortingTopNProjector(io.crate.execution.engine.sort.SortingTopNProjector) Before(org.junit.Before) Answers(org.mockito.Answers) GroupProjection(io.crate.execution.dsl.projection.GroupProjection) FilterProjection(io.crate.execution.dsl.projection.FilterProjection) Test(org.junit.Test) GroupingProjector(io.crate.execution.engine.aggregation.GroupingProjector) Function(io.crate.expression.symbol.Function) TestingHelpers.isRow(io.crate.testing.TestingHelpers.isRow) RamAccounting(io.crate.breaker.RamAccounting) TransportActionProvider(io.crate.execution.TransportActionProvider) TestingBatchIterators(io.crate.testing.TestingBatchIterators) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) CollectionBucket(io.crate.data.CollectionBucket) RowGranularity(io.crate.metadata.RowGranularity) Literal(io.crate.expression.symbol.Literal) OnHeapMemoryManager(io.crate.memory.OnHeapMemoryManager) AggregationPipe(io.crate.execution.engine.aggregation.AggregationPipe) InputFactory(io.crate.expression.InputFactory) Collections(java.util.Collections) InputFactory(io.crate.expression.InputFactory) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) OnHeapMemoryManager(io.crate.memory.OnHeapMemoryManager) NodeLimits(io.crate.execution.jobs.NodeLimits) NoneCircuitBreakerService(org.elasticsearch.indices.breaker.NoneCircuitBreakerService) Before(org.junit.Before)

Example 2 with NodeLimits

use of io.crate.execution.jobs.NodeLimits in project crate by crate.

the class ProjectorsTest method prepare.

@Before
public void prepare() throws Exception {
    nodeCtx = createNodeContext();
    memoryManager = new OnHeapMemoryManager(bytes -> {
    });
    projectorFactory = new ProjectionToProjectorVisitor(clusterService, new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoneCircuitBreakerService(), nodeCtx, THREAD_POOL, Settings.EMPTY, mock(TransportActionProvider.class, Answers.RETURNS_DEEP_STUBS), new InputFactory(nodeCtx), new EvaluatingNormalizer(nodeCtx, RowGranularity.SHARD, r -> Literal.ofUnchecked(r.valueType(), r.valueType().sanitizeValue("1")), null), t -> null, t -> null, Version.CURRENT, new ShardId("dummy", UUID.randomUUID().toString(), 0), null);
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) InputColumn(io.crate.expression.symbol.InputColumn) Arrays(java.util.Arrays) EvaluatingNormalizer(io.crate.expression.eval.EvaluatingNormalizer) CrateDummyClusterServiceUnitTest(io.crate.test.integration.CrateDummyClusterServiceUnitTest) NodeLimits(io.crate.execution.jobs.NodeLimits) ArrayList(java.util.ArrayList) Settings(org.elasticsearch.common.settings.Settings) NoneCircuitBreakerService(org.elasticsearch.indices.breaker.NoneCircuitBreakerService) TestingHelpers.createNodeContext(io.crate.testing.TestingHelpers.createNodeContext) Before(org.junit.Before) NodeContext(io.crate.metadata.NodeContext) Answers(org.mockito.Answers) GroupProjection(io.crate.execution.dsl.projection.GroupProjection) FilterProjection(io.crate.execution.dsl.projection.FilterProjection) GroupingProjector(io.crate.execution.engine.aggregation.GroupingProjector) Test(org.junit.Test) UUID(java.util.UUID) RamAccounting(io.crate.breaker.RamAccounting) TransportActionProvider(io.crate.execution.TransportActionProvider) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) RowGranularity(io.crate.metadata.RowGranularity) Version(org.elasticsearch.Version) Literal(io.crate.expression.symbol.Literal) AggregateMode(io.crate.expression.symbol.AggregateMode) OnHeapMemoryManager(io.crate.memory.OnHeapMemoryManager) Matchers.is(org.hamcrest.Matchers.is) InputFactory(io.crate.expression.InputFactory) Collections(java.util.Collections) CoordinatorTxnCtx(io.crate.metadata.CoordinatorTxnCtx) Mockito.mock(org.mockito.Mockito.mock) ShardId(org.elasticsearch.index.shard.ShardId) InputFactory(io.crate.expression.InputFactory) OnHeapMemoryManager(io.crate.memory.OnHeapMemoryManager) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) EvaluatingNormalizer(io.crate.expression.eval.EvaluatingNormalizer) NodeLimits(io.crate.execution.jobs.NodeLimits) NoneCircuitBreakerService(org.elasticsearch.indices.breaker.NoneCircuitBreakerService) Before(org.junit.Before)

Example 3 with NodeLimits

use of io.crate.execution.jobs.NodeLimits in project crate by crate.

the class InsertFromValues method execute.

private CompletableFuture<ShardResponse.CompressedResult> execute(NodeLimits nodeLimits, ClusterState state, Collection<ShardUpsertRequest> shardUpsertRequests, TransportShardUpsertAction shardUpsertAction, ScheduledExecutorService scheduler) {
    ShardResponse.CompressedResult compressedResult = new ShardResponse.CompressedResult();
    if (shardUpsertRequests.isEmpty()) {
        return CompletableFuture.completedFuture(compressedResult);
    }
    CompletableFuture<ShardResponse.CompressedResult> result = new CompletableFuture<>();
    AtomicInteger numRequests = new AtomicInteger(shardUpsertRequests.size());
    AtomicReference<Throwable> lastFailure = new AtomicReference<>(null);
    Consumer<ShardUpsertRequest> countdown = request -> {
        if (numRequests.decrementAndGet() == 0) {
            Throwable throwable = lastFailure.get();
            if (throwable == null) {
                result.complete(compressedResult);
            } else {
                throwable = SQLExceptions.unwrap(throwable, t -> t instanceof RuntimeException);
                // we want to report duplicate key exceptions
                if (!SQLExceptions.isDocumentAlreadyExistsException(throwable) && (partitionWasDeleted(throwable, request.index()) || partitionClosed(throwable, request.index()) || mixedArgumentTypesFailure(throwable))) {
                    result.complete(compressedResult);
                } else {
                    result.completeExceptionally(throwable);
                }
            }
        }
    };
    for (ShardUpsertRequest request : shardUpsertRequests) {
        String nodeId;
        try {
            nodeId = state.routingTable().shardRoutingTable(request.shardId()).primaryShard().currentNodeId();
        } catch (IndexNotFoundException e) {
            lastFailure.set(e);
            if (!IndexParts.isPartitioned(request.index())) {
                synchronized (compressedResult) {
                    compressedResult.markAsFailed(request.items());
                }
            }
            countdown.accept(request);
            continue;
        }
        final ConcurrencyLimit nodeLimit = nodeLimits.get(nodeId);
        final long startTime = nodeLimit.startSample();
        ActionListener<ShardResponse> listener = new ActionListener<>() {

            @Override
            public void onResponse(ShardResponse shardResponse) {
                Throwable throwable = shardResponse.failure();
                if (throwable == null) {
                    nodeLimit.onSample(startTime, false);
                    synchronized (compressedResult) {
                        compressedResult.update(shardResponse);
                    }
                } else {
                    nodeLimit.onSample(startTime, true);
                    lastFailure.set(throwable);
                }
                countdown.accept(request);
            }

            @Override
            public void onFailure(Exception e) {
                nodeLimit.onSample(startTime, true);
                Throwable t = SQLExceptions.unwrap(e);
                if (!partitionWasDeleted(t, request.index())) {
                    synchronized (compressedResult) {
                        compressedResult.markAsFailed(request.items());
                    }
                }
                lastFailure.set(t);
                countdown.accept(request);
            }
        };
        shardUpsertAction.execute(request, new RetryListener<>(scheduler, l -> shardUpsertAction.execute(request, l), listener, BackoffPolicy.limitedDynamic(nodeLimit)));
    }
    return result;
}
Also used : GeneratedColumns(io.crate.execution.dml.upsert.GeneratedColumns) IndexParts(io.crate.metadata.IndexParts) INDEX_CLOSED_BLOCK(org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_CLOSED_BLOCK) Arrays(java.util.Arrays) TransportShardUpsertAction(io.crate.execution.dml.upsert.TransportShardUpsertAction) ShardIterator(org.elasticsearch.cluster.routing.ShardIterator) ShardedRequests(io.crate.execution.engine.indexing.ShardedRequests) TableFunctionRelation(io.crate.analyze.relations.TableFunctionRelation) NodeLimits(io.crate.execution.jobs.NodeLimits) TransportCreatePartitionsAction(org.elasticsearch.action.admin.indices.create.TransportCreatePartitionsAction) RetryListener(io.crate.execution.support.RetryListener) DependencyCarrier(io.crate.planner.DependencyCarrier) ClusterState(org.elasticsearch.cluster.ClusterState) RowN(io.crate.data.RowN) SymbolEvaluator(io.crate.analyze.SymbolEvaluator) TableStats(io.crate.statistics.TableStats) ClusterBlock(org.elasticsearch.cluster.block.ClusterBlock) ColumnIndexWriterProjection(io.crate.execution.dsl.projection.ColumnIndexWriterProjection) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IntArrayList(com.carrotsearch.hppc.IntArrayList) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) Map(java.util.Map) TypeGuessEstimateRowSize(io.crate.breaker.TypeGuessEstimateRowSize) ConcurrencyLimit(io.crate.concurrent.limits.ConcurrencyLimit) SelectSymbol(io.crate.expression.symbol.SelectSymbol) GroupRowsByShard(io.crate.execution.engine.indexing.GroupRowsByShard) DocTableInfo(io.crate.metadata.doc.DocTableInfo) Collection(java.util.Collection) InMemoryBatchIterator(io.crate.data.InMemoryBatchIterator) Set(java.util.Set) UUID(java.util.UUID) InputRow(io.crate.expression.InputRow) ShardRequest(io.crate.execution.dml.ShardRequest) ExecutionPlan(io.crate.planner.ExecutionPlan) List(java.util.List) OrderBy(io.crate.analyze.OrderBy) Row(io.crate.data.Row) Symbol(io.crate.expression.symbol.Symbol) RowShardResolver(io.crate.execution.engine.collect.RowShardResolver) Assignments(io.crate.expression.symbol.Assignments) Row1(io.crate.data.Row1) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) Input(io.crate.data.Input) SENTINEL(io.crate.data.SentinelRow.SENTINEL) ClusterService(org.elasticsearch.cluster.service.ClusterService) CollectExpression(io.crate.execution.engine.collect.CollectExpression) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) Operation(io.crate.metadata.table.Operation) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) Supplier(java.util.function.Supplier) InsertSourceFromCells(io.crate.execution.dml.upsert.InsertSourceFromCells) ArrayList(java.util.ArrayList) BackoffPolicy(org.elasticsearch.action.bulk.BackoffPolicy) Metadata(org.elasticsearch.cluster.metadata.Metadata) ClusterBlockException(org.elasticsearch.cluster.block.ClusterBlockException) ShardLocation(io.crate.execution.engine.indexing.ShardLocation) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) StreamSupport(java.util.stream.StreamSupport) ColumnValidationException(io.crate.exceptions.ColumnValidationException) Nullable(javax.annotation.Nullable) FutureActionListener(io.crate.action.FutureActionListener) ProjectionBuilder(io.crate.execution.dsl.projection.builder.ProjectionBuilder) BULK_REQUEST_TIMEOUT_SETTING(io.crate.execution.engine.indexing.ShardingUpsertExecutor.BULK_REQUEST_TIMEOUT_SETTING) Iterator(java.util.Iterator) Reference(io.crate.metadata.Reference) DataType(io.crate.types.DataType) AcknowledgedResponse(org.elasticsearch.action.support.master.AcknowledgedResponse) RamAccounting(io.crate.breaker.RamAccounting) Consumer(java.util.function.Consumer) RowConsumer(io.crate.data.RowConsumer) ShardResponse(io.crate.execution.dml.ShardResponse) ShardUpsertRequest(io.crate.execution.dml.upsert.ShardUpsertRequest) CollectionBucket(io.crate.data.CollectionBucket) TableFunctionImplementation(io.crate.metadata.tablefunctions.TableFunctionImplementation) IndexNameResolver(io.crate.execution.engine.indexing.IndexNameResolver) NotSerializableExceptionWrapper(org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper) AbstractTableRelation(io.crate.analyze.relations.AbstractTableRelation) PlannerContext(io.crate.planner.PlannerContext) InputColumns(io.crate.execution.dsl.projection.builder.InputColumns) SQLExceptions(io.crate.exceptions.SQLExceptions) InputFactory(io.crate.expression.InputFactory) CreatePartitionsRequest(org.elasticsearch.action.admin.indices.create.CreatePartitionsRequest) ActionListener(org.elasticsearch.action.ActionListener) ConcurrencyLimit(io.crate.concurrent.limits.ConcurrencyLimit) ShardUpsertRequest(io.crate.execution.dml.upsert.ShardUpsertRequest) AtomicReference(java.util.concurrent.atomic.AtomicReference) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) ClusterBlockException(org.elasticsearch.cluster.block.ClusterBlockException) ColumnValidationException(io.crate.exceptions.ColumnValidationException) ShardResponse(io.crate.execution.dml.ShardResponse) CompletableFuture(java.util.concurrent.CompletableFuture) FutureActionListener(io.crate.action.FutureActionListener) ActionListener(org.elasticsearch.action.ActionListener) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException)

Example 4 with NodeLimits

use of io.crate.execution.jobs.NodeLimits in project crate by crate.

the class IndexWriterProjectorTest method testIndexWriter.

@Test
public void testIndexWriter() throws Throwable {
    execute("create table bulk_import (id int primary key, name string) with (number_of_replicas=0)");
    ensureGreen();
    InputCollectExpression sourceInput = new InputCollectExpression(1);
    List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
    RelationName bulkImportIdent = new RelationName(sqlExecutor.getCurrentSchema(), "bulk_import");
    ClusterState state = clusterService().state();
    Settings tableSettings = TableSettingsResolver.get(state.getMetadata(), bulkImportIdent, false);
    ThreadPool threadPool = internalCluster().getInstance(ThreadPool.class);
    IndexWriterProjector writerProjector = new IndexWriterProjector(clusterService(), new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoopCircuitBreaker("dummy"), RamAccounting.NO_ACCOUNTING, threadPool.scheduler(), threadPool.executor(ThreadPool.Names.SEARCH), CoordinatorTxnCtx.systemTransactionContext(), new NodeContext(internalCluster().getInstance(Functions.class)), Settings.EMPTY, IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(tableSettings), NumberOfReplicas.fromSettings(tableSettings, state.getNodes().getSize()), internalCluster().getInstance(TransportCreatePartitionsAction.class), internalCluster().getInstance(TransportShardUpsertAction.class)::execute, IndexNameResolver.forTable(bulkImportIdent), new Reference(new ReferenceIdent(bulkImportIdent, DocSysColumns.RAW), RowGranularity.DOC, DataTypes.STRING, 0, null), Collections.singletonList(ID_IDENT), Collections.<Symbol>singletonList(new InputColumn(0)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID(), UpsertResultContext.forRowCount(), false);
    BatchIterator rowsIterator = InMemoryBatchIterator.of(IntStream.range(0, 100).mapToObj(i -> new RowN(new Object[] { i, "{\"id\": " + i + ", \"name\": \"Arthur\"}" })).collect(Collectors.toList()), SENTINEL, true);
    TestingRowConsumer consumer = new TestingRowConsumer();
    consumer.accept(writerProjector.apply(rowsIterator), null);
    Bucket objects = consumer.getBucket();
    assertThat(objects, contains(isRow(100L)));
    execute("refresh table bulk_import");
    execute("select count(*) from bulk_import");
    assertThat(response.rowCount(), is(1L));
    assertThat(response.rows()[0][0], is(100L));
}
Also used : TransportCreatePartitionsAction(org.elasticsearch.action.admin.indices.create.TransportCreatePartitionsAction) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) NodeContext(io.crate.metadata.NodeContext) Reference(io.crate.metadata.Reference) ThreadPool(org.elasticsearch.threadpool.ThreadPool) BatchIterator(io.crate.data.BatchIterator) InMemoryBatchIterator(io.crate.data.InMemoryBatchIterator) CollectExpression(io.crate.execution.engine.collect.CollectExpression) InputCollectExpression(io.crate.execution.engine.collect.InputCollectExpression) ReferenceIdent(io.crate.metadata.ReferenceIdent) RowN(io.crate.data.RowN) InputCollectExpression(io.crate.execution.engine.collect.InputCollectExpression) Bucket(io.crate.data.Bucket) InputColumn(io.crate.expression.symbol.InputColumn) NodeLimits(io.crate.execution.jobs.NodeLimits) RelationName(io.crate.metadata.RelationName) NoopCircuitBreaker(org.elasticsearch.common.breaker.NoopCircuitBreaker) Settings(org.elasticsearch.common.settings.Settings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) TestingRowConsumer(io.crate.testing.TestingRowConsumer) Test(org.junit.Test)

Example 5 with NodeLimits

use of io.crate.execution.jobs.NodeLimits in project crate by crate.

the class IndexWriterProjectorUnitTest method testNullPKValue.

@Test
public void testNullPKValue() throws Throwable {
    InputCollectExpression sourceInput = new InputCollectExpression(0);
    List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
    TransportCreatePartitionsAction transportCreatePartitionsAction = mock(TransportCreatePartitionsAction.class);
    IndexWriterProjector indexWriter = new IndexWriterProjector(clusterService, new NodeLimits(new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), new NoopCircuitBreaker("dummy"), RamAccounting.NO_ACCOUNTING, scheduler, executor, CoordinatorTxnCtx.systemTransactionContext(), createNodeContext(), Settings.EMPTY, 5, 1, transportCreatePartitionsAction, (request, listener) -> {
    }, IndexNameResolver.forTable(BULK_IMPORT_IDENT), RAW_SOURCE_REFERENCE, Collections.singletonList(ID_IDENT), Collections.<Symbol>singletonList(new InputColumn(1)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID(), UpsertResultContext.forRowCount(), false);
    RowN rowN = new RowN(new Object[] { new BytesRef("{\"y\": \"x\"}"), null });
    BatchIterator<Row> batchIterator = InMemoryBatchIterator.of(Collections.singletonList(rowN), SENTINEL, true);
    batchIterator = indexWriter.apply(batchIterator);
    TestingRowConsumer testingBatchConsumer = new TestingRowConsumer();
    testingBatchConsumer.accept(batchIterator, null);
    List<Object[]> result = testingBatchConsumer.getResult();
    // Zero affected rows as a NULL as a PK value will result in an exception.
    // It must never bubble up as other rows might already have been written.
    assertThat(result.get(0)[0], is(0L));
}
Also used : TransportCreatePartitionsAction(org.elasticsearch.action.admin.indices.create.TransportCreatePartitionsAction) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) CollectExpression(io.crate.execution.engine.collect.CollectExpression) InputCollectExpression(io.crate.execution.engine.collect.InputCollectExpression) RowN(io.crate.data.RowN) InputCollectExpression(io.crate.execution.engine.collect.InputCollectExpression) InputColumn(io.crate.expression.symbol.InputColumn) NodeLimits(io.crate.execution.jobs.NodeLimits) Row(io.crate.data.Row) NoopCircuitBreaker(org.elasticsearch.common.breaker.NoopCircuitBreaker) BytesRef(org.apache.lucene.util.BytesRef) TestingRowConsumer(io.crate.testing.TestingRowConsumer) CrateDummyClusterServiceUnitTest(io.crate.test.integration.CrateDummyClusterServiceUnitTest) Test(org.junit.Test)

Aggregations

NodeLimits (io.crate.execution.jobs.NodeLimits)8 ClusterSettings (org.elasticsearch.common.settings.ClusterSettings)7 Test (org.junit.Test)7 CrateDummyClusterServiceUnitTest (io.crate.test.integration.CrateDummyClusterServiceUnitTest)6 InputColumn (io.crate.expression.symbol.InputColumn)5 RamAccounting (io.crate.breaker.RamAccounting)4 InMemoryBatchIterator (io.crate.data.InMemoryBatchIterator)4 Row (io.crate.data.Row)4 InputFactory (io.crate.expression.InputFactory)4 NodeContext (io.crate.metadata.NodeContext)4 Settings (org.elasticsearch.common.settings.Settings)4 BatchIterator (io.crate.data.BatchIterator)3 RowN (io.crate.data.RowN)3 SENTINEL (io.crate.data.SentinelRow.SENTINEL)3 CollectExpression (io.crate.execution.engine.collect.CollectExpression)3 TestingRowConsumer (io.crate.testing.TestingRowConsumer)3 TransportCreatePartitionsAction (org.elasticsearch.action.admin.indices.create.TransportCreatePartitionsAction)3 RowConsumer (io.crate.data.RowConsumer)2 TransportActionProvider (io.crate.execution.TransportActionProvider)2 FilterProjection (io.crate.execution.dsl.projection.FilterProjection)2