use of io.questdb.griffin.engine.groupby.SimpleMapValue in project Mycat2 by MyCATApache.
the class NoKeysAggPlan method createAggContext.
private AggContext createAggContext(RootContext rootContext) {
int columnCount = schema().getFields().size();
int length = aggregateExprs.length;
AggContext aggContext = new AggContext() {
SimpleMapValue simpleMapValue;
AggregateVectorExpression[] aggregateVectorExpressions = new AggregateVectorExpression[aggregateExprs.length];
@Override
public void initContext() {
int columnCount = aggregateExprs.length;
int longSize = RecordUtil.getContextSize(aggregateExprs);
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) {
aggregateVectorExpressions[columnIndex] = aggregateExprs[columnIndex].toAggregateVectorExpression();
}
simpleMapValue = new SimpleMapValue(longSize);
}
@Override
public AggContext reduce(VectorSchemaRoot root) {
int columnCount = aggregateExprs.length;
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) {
AggregateVectorExpression aggregateExpr = aggregateVectorExpressions[columnIndex];
int inputColumnIndex = aggregateExpr.getInputColumnIndex();
FieldVector inputVector = root.getVector(inputColumnIndex);
InnerType type = aggregateExpr.getType();
switch(type) {
case BOOLEAN_TYPE:
case INT8_TYPE:
case INT16_TYPE:
case CHAR_TYPE:
case INT32_TYPE:
case INT64_TYPE:
aggregateExpr.computeUpdateValue(simpleMapValue, inputVector);
break;
case FLOAT_TYPE:
break;
case DOUBLE_TYPE:
aggregateExpr.computeUpdateValue(simpleMapValue, inputVector);
break;
case STRING_TYPE:
break;
case BINARY_TYPE:
break;
case UINT8_TYPE:
break;
case UINT16_TYPE:
break;
case UINT32_TYPE:
break;
case UINT64_TYPE:
break;
case TIME_MILLI_TYPE:
break;
case DATE_TYPE:
break;
case DATETIME_MILLI_TYPE:
break;
case SYMBOL_TYPE:
break;
case OBJECT_TYPE:
break;
case NULL_TYPE:
break;
}
}
return this;
}
@Override
public VectorSchemaRoot finalToVectorSchemaRoot() {
Schema schema = schema();
VectorSchemaRoot output = rootContext.getVectorSchemaRoot(schema, 1);
output.setRowCount(1);
int columnCount = aggregateExprs.length;
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) {
AggregateVectorExpression aggregateExpr = aggregateVectorExpressions[columnIndex];
InnerType type = aggregateExpr.getType();
switch(type) {
case BOOLEAN_TYPE:
case INT8_TYPE:
case INT16_TYPE:
case CHAR_TYPE:
case INT32_TYPE:
case INT64_TYPE:
{
((BigIntVector) output.getVector(columnIndex)).set(0, aggregateExpr.computeFinalLongValue(simpleMapValue));
break;
}
case FLOAT_TYPE:
break;
case DOUBLE_TYPE:
{
((Float8Vector) output.getVector(columnIndex)).set(0, aggregateExpr.computeFinalDoubleValue(simpleMapValue));
break;
}
case STRING_TYPE:
break;
case BINARY_TYPE:
break;
case UINT8_TYPE:
break;
case UINT16_TYPE:
break;
case UINT32_TYPE:
break;
case UINT64_TYPE:
break;
case TIME_MILLI_TYPE:
break;
case DATE_TYPE:
break;
case DATETIME_MILLI_TYPE:
break;
case SYMBOL_TYPE:
break;
case OBJECT_TYPE:
break;
case NULL_TYPE:
break;
}
}
return output;
}
};
aggContext.initContext();
return aggContext;
}
use of io.questdb.griffin.engine.groupby.SimpleMapValue in project Mycat2 by MyCATApache.
the class GroupByKeyWithAggPlan method execute.
@Override
public Observable<VectorSchemaRoot> execute(RootContext rootContext) {
List<Field> fields = schema().getFields();
InnerType[] innerTypes = schema().getFields().stream().map(i -> InnerType.from(i.getType())).toArray(n -> new InnerType[n]);
if (groupByKeys.length > 0) {
ColumnTypes arrayColumnTypes = RecordUtil.getArrayColumnTypes(accumulators);
Map map = MapFactory.createMap2(innerTypes, arrayColumnTypes);
RecordSink[] recordSinks = buildRecordSink(fields);
return physicalPlan.execute(rootContext).reduce(map, (mapKey, input) -> {
int rowCount = input.getRowCount();
VectorBatchRecord record = new VectorBatchRecord(input);
for (RecordSink recordSink : recordSinks) {
for (int rowId = 0; rowId < rowCount; rowId++) {
record.setPosition(rowId);
MapKey key = mapKey.withKey();
RecordSetter recordSinkSPI = RecordSinkFactory.INSTANCE.getRecordSinkSPI(key);
recordSink.copy(record, recordSinkSPI);
MapValue value = key.createValue();
if (value.isNew()) {
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeFirst(value, record);
}
} else {
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeNext(value, record);
}
}
}
}
physicalPlan.eachFree(input);
return mapKey;
}).map(map1 -> {
int size = (int) map1.size();
VectorSchemaRoot output = rootContext.getVectorSchemaRoot(schema(), size);
RecordCursor cursor = map1.getCursor();
cursor.toTop();
int index = 0;
while (cursor.hasNext()) {
Record record = cursor.getRecord();
functionSink.copy(accumulators, RecordUtil.wrapAsAggRecord(record), index++, output);
}
output.setRowCount(index);
map1.clear();
return output;
}).toObservable().doOnComplete(() -> map.close());
} else {
SimpleMapValue mapValue = new SimpleMapValue(RecordUtil.getContextSize(accumulators));
return physicalPlan.execute(rootContext).reduce(mapValue, new BiFunction<SimpleMapValue, VectorSchemaRoot, SimpleMapValue>() {
AtomicBoolean first = new AtomicBoolean(true);
@Override
public SimpleMapValue apply(SimpleMapValue simpleMapValue, VectorSchemaRoot root) throws Throwable {
int rowCount = root.getRowCount();
VectorBatchRecord record = new VectorBatchRecord(root);
if (first.compareAndSet(true, false)) {
record.setPosition(0);
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeFirst(mapValue, record);
}
for (int i = 1; i < rowCount; i++) {
record.setPosition(i);
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeNext(mapValue, record);
}
}
} else {
for (int i = 0; i < rowCount; i++) {
record.setPosition(i);
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeNext(mapValue, record);
}
}
}
root.close();
return simpleMapValue;
}
}).map(new Function<SimpleMapValue, VectorSchemaRoot>() {
@Override
public VectorSchemaRoot apply(SimpleMapValue simpleMapValue) throws Throwable {
VectorSchemaRoot vectorSchemaRoot = rootContext.getVectorSchemaRoot(schema(), 1);
vectorSchemaRoot.setRowCount(1);
functionSink.copy(accumulators, RecordUtil.wrapAsAggRecord(simpleMapValue), 0, vectorSchemaRoot);
return vectorSchemaRoot;
}
}).toObservable();
}
}
Aggregations