use of io.crate.analyze.symbol.InputColumn in project crate by crate.
the class RowTransformingBatchIteratorTest method createInputs.
@Before
public void createInputs() throws Exception {
InputFactory inputFactory = new InputFactory(getFunctions());
InputFactory.Context<CollectExpression<Row, ?>> ctx = inputFactory.ctxForInputColumns();
inputs = Collections.singletonList(ctx.add(AddFunction.of(new InputColumn(0), Literal.of(2L))));
expressions = ctx.expressions();
}
use of io.crate.analyze.symbol.InputColumn in project crate by crate.
the class DeleteStatementPlanner method collectWithDeleteProjection.
private static Plan collectWithDeleteProjection(TableInfo tableInfo, WhereClause whereClause, Planner.Context plannerContext) {
// for delete, we always need to collect the `_uid`
Reference idReference = tableInfo.getReference(DocSysColumns.ID);
DeleteProjection deleteProjection = new DeleteProjection(new InputColumn(0, DataTypes.STRING));
Routing routing = plannerContext.allocateRouting(tableInfo, whereClause, Preference.PRIMARY.type());
RoutedCollectPhase collectPhase = new RoutedCollectPhase(plannerContext.jobId(), plannerContext.nextExecutionPhaseId(), "collect", routing, tableInfo.rowGranularity(), ImmutableList.of(idReference), ImmutableList.of(deleteProjection), whereClause, DistributionInfo.DEFAULT_BROADCAST);
Collect collect = new Collect(collectPhase, TopN.NO_LIMIT, 0, 1, 1, null);
return Merge.ensureOnHandler(collect, plannerContext, Collections.singletonList(MergeCountProjection.INSTANCE));
}
use of io.crate.analyze.symbol.InputColumn in project crate by crate.
the class UpdateConsumer method upsertByQuery.
private static Plan upsertByQuery(UpdateAnalyzedStatement.NestedAnalyzedStatement nestedAnalysis, Planner.Context plannerContext, DocTableInfo tableInfo, WhereClause whereClause) {
Symbol versionSymbol = null;
if (whereClause.hasVersions()) {
versionSymbol = VersionRewriter.get(whereClause.query());
whereClause = new WhereClause(whereClause.query(), whereClause.docKeys().orElse(null), whereClause.partitions());
}
if (!whereClause.noMatch() || !(tableInfo.isPartitioned() && whereClause.partitions().isEmpty())) {
// for updates, we always need to collect the `_id`
Reference idReference = tableInfo.getReference(DocSysColumns.ID);
Tuple<String[], Symbol[]> assignments = Assignments.convert(nestedAnalysis.assignments());
Long version = null;
if (versionSymbol != null) {
version = ValueSymbolVisitor.LONG.process(versionSymbol);
}
UpdateProjection updateProjection = new UpdateProjection(new InputColumn(0, DataTypes.STRING), assignments.v1(), assignments.v2(), version);
Routing routing = plannerContext.allocateRouting(tableInfo, whereClause, Preference.PRIMARY.type());
return createPlan(plannerContext, routing, tableInfo, idReference, updateProjection, whereClause);
} else {
return null;
}
}
use of io.crate.analyze.symbol.InputColumn in project crate by crate.
the class GroupingBytesRefCollectorBenchmark method createGroupByMinBytesRefCollector.
private GroupingCollector createGroupByMinBytesRefCollector(Functions functions) {
InputCollectExpression keyInput = new InputCollectExpression(0);
List<Input<?>> keyInputs = Arrays.<Input<?>>asList(keyInput);
CollectExpression[] collectExpressions = new CollectExpression[] { keyInput };
FunctionIdent minBytesRefFuncIdent = new FunctionIdent(MinimumAggregation.NAME, Arrays.asList(DataTypes.STRING));
FunctionInfo minBytesRefFuncInfo = new FunctionInfo(minBytesRefFuncIdent, DataTypes.INTEGER, FunctionInfo.Type.AGGREGATE);
AggregationFunction minAgg = (AggregationFunction) functions.get(minBytesRefFuncIdent);
Aggregation aggregation = Aggregation.finalAggregation(minBytesRefFuncInfo, Arrays.asList(new InputColumn(0)), Aggregation.Step.ITER);
Aggregator[] aggregators = new Aggregator[] { new Aggregator(RAM_ACCOUNTING_CONTEXT, aggregation, minAgg, new Input[] { keyInput }) };
return GroupingCollector.singleKey(collectExpressions, aggregators, RAM_ACCOUNTING_CONTEXT, keyInputs.get(0), DataTypes.STRING);
}
use of io.crate.analyze.symbol.InputColumn in project crate by crate.
the class IndexWriterProjectorTest method testIndexWriter.
@Test
public void testIndexWriter() throws Throwable {
execute("create table bulk_import (id int primary key, name string) with (number_of_replicas=0)");
ensureGreen();
InputCollectExpression sourceInput = new InputCollectExpression(1);
List<CollectExpression<Row, ?>> collectExpressions = Collections.<CollectExpression<Row, ?>>singletonList(sourceInput);
IndexWriterProjector writerProjector = new IndexWriterProjector(internalCluster().getInstance(ClusterService.class), internalCluster().getInstance(Functions.class), new IndexNameExpressionResolver(Settings.EMPTY), Settings.EMPTY, internalCluster().getInstance(TransportBulkCreateIndicesAction.class), internalCluster().getInstance(TransportShardUpsertAction.class)::execute, IndexNameResolver.forTable(new TableIdent(null, "bulk_import")), internalCluster().getInstance(BulkRetryCoordinatorPool.class), new Reference(new ReferenceIdent(bulkImportIdent, DocSysColumns.RAW), RowGranularity.DOC, DataTypes.STRING), Arrays.asList(ID_IDENT), Arrays.<Symbol>asList(new InputColumn(0)), null, null, sourceInput, collectExpressions, 20, null, null, false, false, UUID.randomUUID());
BatchIterator rowsIterator = RowsBatchIterator.newInstance(IntStream.range(0, 100).mapToObj(i -> new RowN(new Object[] { i, new BytesRef("{\"id\": " + i + ", \"name\": \"Arthur\"}") })).collect(Collectors.toList()), 2);
TestingBatchConsumer consumer = new TestingBatchConsumer();
consumer.accept(writerProjector.apply(rowsIterator), null);
Bucket objects = consumer.getBucket();
assertThat(objects, contains(isRow(100L)));
execute("refresh table bulk_import");
execute("select count(*) from bulk_import");
assertThat(response.rowCount(), is(1L));
assertThat(response.rows()[0][0], is(100L));
}
Aggregations