use of io.crate.data.CollectionBucket in project crate by crate.
the class PageDownstreamContextTest method testCantSetSameBucketTwiceWithoutReceivingFullPage.
@Test
public void testCantSetSameBucketTwiceWithoutReceivingFullPage() throws Throwable {
TestingBatchConsumer batchConsumer = new TestingBatchConsumer();
PageBucketReceiver ctx = getPageDownstreamContext(batchConsumer, PassThroughPagingIterator.oneShot(), 3);
PageResultListener pageResultListener = mock(PageResultListener.class);
Bucket bucket = new CollectionBucket(Collections.singletonList(new Object[] { "foo" }));
ctx.setBucket(1, bucket, false, pageResultListener);
ctx.setBucket(1, bucket, false, pageResultListener);
expectedException.expect(IllegalStateException.class);
expectedException.expectMessage("Same bucket of a page set more than once. node=n1 method=setBucket phaseId=1 bucket=1");
batchConsumer.getResult();
}
use of io.crate.data.CollectionBucket in project crate by crate.
the class HandlerSideLevelCollectTest method collect.
private Bucket collect(RoutedCollectPhase collectPhase) throws Exception {
TestingBatchConsumer consumer = new TestingBatchConsumer();
CrateCollector collector = operation.createCollector(collectPhase, consumer, mock(JobCollectContext.class));
operation.launchCollector(collector, JobCollectContext.threadPoolName(collectPhase, clusterService().localNode().getId()));
return new CollectionBucket(consumer.getResult());
}
use of io.crate.data.CollectionBucket in project crate by crate.
the class InsertFromValues method execute.
@Override
public void execute(DependencyCarrier dependencies, PlannerContext plannerContext, RowConsumer consumer, Row params, SubQueryResults subQueryResults) {
DocTableInfo tableInfo = dependencies.schemas().getTableInfo(writerProjection.tableIdent(), Operation.INSERT);
// For instance, the target table of the insert from values
// statement is the table with the following schema:
//
// CREATE TABLE users (
// dep_id TEXT,
// name TEXT,
// id INT,
// country_id INT,
// PRIMARY KEY (dep_id, id, country_id))
// CLUSTERED BY (dep_id)
// PARTITIONED BY (country_id)
//
// The insert from values statement below would have the column
// index writer projection of its plan that contains the column
// idents and symbols required to create corresponding inputs.
// The diagram below shows the projection's column symbols used
// in the plan and relation between symbols sub-/sets.
//
// +------------------------+
// | +-------------+ PK symbols
// cluster by +------+ | | +------+
// symbol | | | |
// + + + +
// INSERT INTO users (dep_id, name, id, country_id) VALUES (?, ?, ?, ?)
// + + + + +
// +-------+ | | | |
// all target +--------------+ | | +---+ partitioned by
// column +-------------------+ | symbols
// symbols +-------------------------+
InputFactory inputFactory = new InputFactory(dependencies.nodeContext());
InputFactory.Context<CollectExpression<Row, ?>> context = inputFactory.ctxForInputColumns(plannerContext.transactionContext());
var allColumnSymbols = InputColumns.create(writerProjection.allTargetColumns(), new InputColumns.SourceSymbols(writerProjection.allTargetColumns()));
ArrayList<Input<?>> insertInputs = new ArrayList<>(allColumnSymbols.size());
for (Symbol symbol : allColumnSymbols) {
insertInputs.add(context.add(symbol));
}
ArrayList<Input<?>> partitionedByInputs = new ArrayList<>(writerProjection.partitionedBySymbols().size());
for (Symbol partitionedBySymbol : writerProjection.partitionedBySymbols()) {
partitionedByInputs.add(context.add(partitionedBySymbol));
}
ArrayList<Input<?>> primaryKeyInputs = new ArrayList<>(writerProjection.ids().size());
for (Symbol symbol : writerProjection.ids()) {
primaryKeyInputs.add(context.add(symbol));
}
Input<?> clusterByInput;
if (writerProjection.clusteredBy() != null) {
clusterByInput = context.add(writerProjection.clusteredBy());
} else {
clusterByInput = null;
}
String[] updateColumnNames;
Symbol[] assignmentSources;
if (writerProjection.onDuplicateKeyAssignments() == null) {
updateColumnNames = null;
assignmentSources = null;
} else {
Assignments assignments = Assignments.convert(writerProjection.onDuplicateKeyAssignments(), dependencies.nodeContext());
assignmentSources = assignments.bindSources(tableInfo, params, subQueryResults);
updateColumnNames = assignments.targetNames();
}
var indexNameResolver = IndexNameResolver.create(writerProjection.tableIdent(), writerProjection.partitionIdent(), partitionedByInputs);
GroupRowsByShard<ShardUpsertRequest, ShardUpsertRequest.Item> grouper = createRowsByShardGrouper(assignmentSources, insertInputs, indexNameResolver, context, plannerContext, dependencies.clusterService());
ArrayList<Row> rows = new ArrayList<>();
evaluateValueTableFunction(tableFunctionRelation.functionImplementation(), tableFunctionRelation.function().arguments(), writerProjection.allTargetColumns(), tableInfo, params, plannerContext, subQueryResults).forEachRemaining(rows::add);
List<Symbol> returnValues = this.writerProjection.returnValues();
ShardUpsertRequest.Builder builder = new ShardUpsertRequest.Builder(plannerContext.transactionContext().sessionSettings(), BULK_REQUEST_TIMEOUT_SETTING.get(dependencies.settings()), writerProjection.isIgnoreDuplicateKeys() ? ShardUpsertRequest.DuplicateKeyAction.IGNORE : ShardUpsertRequest.DuplicateKeyAction.UPDATE_OR_FAIL, // continueOnErrors
rows.size() > 1, updateColumnNames, writerProjection.allTargetColumns().toArray(new Reference[0]), returnValues.isEmpty() ? null : returnValues.toArray(new Symbol[0]), plannerContext.jobId(), false);
var shardedRequests = new ShardedRequests<>(builder::newRequest, RamAccounting.NO_ACCOUNTING);
HashMap<String, InsertSourceFromCells> validatorsCache = new HashMap<>();
for (Row row : rows) {
grouper.accept(shardedRequests, row);
try {
checkPrimaryKeyValuesNotNull(primaryKeyInputs);
checkClusterByValueNotNull(clusterByInput);
checkConstraintsOnGeneratedSource(row.materialize(), indexNameResolver.get(), tableInfo, plannerContext, validatorsCache);
} catch (Throwable t) {
consumer.accept(null, t);
return;
}
}
validatorsCache.clear();
var actionProvider = dependencies.transportActionProvider();
createIndices(actionProvider.transportBulkCreateIndicesAction(), shardedRequests.itemsByMissingIndex().keySet(), dependencies.clusterService(), plannerContext.jobId()).thenCompose(acknowledgedResponse -> {
var shardUpsertRequests = resolveAndGroupShardRequests(shardedRequests, dependencies.clusterService()).values();
return execute(dependencies.nodeLimits(), dependencies.clusterService().state(), shardUpsertRequests, actionProvider.transportShardUpsertAction(), dependencies.scheduler());
}).whenComplete((response, t) -> {
if (t == null) {
if (returnValues.isEmpty()) {
consumer.accept(InMemoryBatchIterator.of(new Row1((long) response.numSuccessfulWrites()), SENTINEL), null);
} else {
consumer.accept(InMemoryBatchIterator.of(new CollectionBucket(response.resultRows()), SENTINEL, false), null);
}
} else {
consumer.accept(null, t);
}
});
}
use of io.crate.data.CollectionBucket in project crate by crate.
the class ProjectionToProjectorVisitorTest method testFilterProjection.
@Test
public void testFilterProjection() throws Exception {
List<Symbol> arguments = Arrays.asList(Literal.of(2), new InputColumn(1));
EqOperator op = (EqOperator) nodeCtx.functions().get(null, EqOperator.NAME, arguments, SearchPath.pathWithPGCatalogAndDoc());
Function function = new Function(op.signature(), arguments, EqOperator.RETURN_TYPE);
FilterProjection projection = new FilterProjection(function, Arrays.asList(new InputColumn(0), new InputColumn(1)));
Projector projector = visitor.create(projection, txnCtx, RamAccounting.NO_ACCOUNTING, memoryManager, UUID.randomUUID());
assertThat(projector, instanceOf(FilterProjector.class));
List<Object[]> rows = new ArrayList<>();
rows.add($("human", 2));
rows.add($("vogon", 1));
BatchIterator<Row> filteredBI = projector.apply(InMemoryBatchIterator.of(new CollectionBucket(rows), SENTINEL, true));
TestingRowConsumer consumer = new TestingRowConsumer();
consumer.accept(filteredBI, null);
Bucket bucket = consumer.getBucket();
assertThat(bucket.size(), is(1));
}
use of io.crate.data.CollectionBucket in project crate by crate.
the class ProjectionToProjectorVisitorTest method testAggregationProjector.
@Test
public void testAggregationProjector() throws Exception {
AggregationProjection projection = new AggregationProjection(Arrays.asList(new Aggregation(avgSignature, avgSignature.getReturnType().createType(), Collections.singletonList(new InputColumn(1))), new Aggregation(CountAggregation.SIGNATURE, CountAggregation.SIGNATURE.getReturnType().createType(), Collections.singletonList(new InputColumn(0)))), RowGranularity.SHARD, AggregateMode.ITER_FINAL);
Projector projector = visitor.create(projection, txnCtx, RamAccounting.NO_ACCOUNTING, memoryManager, UUID.randomUUID());
assertThat(projector, instanceOf(AggregationPipe.class));
BatchIterator<Row> batchIterator = projector.apply(InMemoryBatchIterator.of(new CollectionBucket(Arrays.asList($("foo", 10), $("bar", 20))), SENTINEL, true));
TestingRowConsumer consumer = new TestingRowConsumer();
consumer.accept(batchIterator, null);
Bucket rows = consumer.getBucket();
assertThat(rows.size(), is(1));
assertThat(rows, contains(isRow(15.0, 2L)));
}
Aggregations