use of io.ordinate.engine.schema.InnerType in project Mycat2 by MyCATApache.
the class NoKeysAggPlan method createAggContext.
private AggContext createAggContext(RootContext rootContext) {
int columnCount = schema().getFields().size();
int length = aggregateExprs.length;
AggContext aggContext = new AggContext() {
SimpleMapValue simpleMapValue;
AggregateVectorExpression[] aggregateVectorExpressions = new AggregateVectorExpression[aggregateExprs.length];
@Override
public void initContext() {
int columnCount = aggregateExprs.length;
int longSize = RecordUtil.getContextSize(aggregateExprs);
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) {
aggregateVectorExpressions[columnIndex] = aggregateExprs[columnIndex].toAggregateVectorExpression();
}
simpleMapValue = new SimpleMapValue(longSize);
}
@Override
public AggContext reduce(VectorSchemaRoot root) {
int columnCount = aggregateExprs.length;
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) {
AggregateVectorExpression aggregateExpr = aggregateVectorExpressions[columnIndex];
int inputColumnIndex = aggregateExpr.getInputColumnIndex();
FieldVector inputVector = root.getVector(inputColumnIndex);
InnerType type = aggregateExpr.getType();
switch(type) {
case BOOLEAN_TYPE:
case INT8_TYPE:
case INT16_TYPE:
case CHAR_TYPE:
case INT32_TYPE:
case INT64_TYPE:
aggregateExpr.computeUpdateValue(simpleMapValue, inputVector);
break;
case FLOAT_TYPE:
break;
case DOUBLE_TYPE:
aggregateExpr.computeUpdateValue(simpleMapValue, inputVector);
break;
case STRING_TYPE:
break;
case BINARY_TYPE:
break;
case UINT8_TYPE:
break;
case UINT16_TYPE:
break;
case UINT32_TYPE:
break;
case UINT64_TYPE:
break;
case TIME_MILLI_TYPE:
break;
case DATE_TYPE:
break;
case DATETIME_MILLI_TYPE:
break;
case SYMBOL_TYPE:
break;
case OBJECT_TYPE:
break;
case NULL_TYPE:
break;
}
}
return this;
}
@Override
public VectorSchemaRoot finalToVectorSchemaRoot() {
Schema schema = schema();
VectorSchemaRoot output = rootContext.getVectorSchemaRoot(schema, 1);
output.setRowCount(1);
int columnCount = aggregateExprs.length;
for (int columnIndex = 0; columnIndex < columnCount; columnIndex++) {
AggregateVectorExpression aggregateExpr = aggregateVectorExpressions[columnIndex];
InnerType type = aggregateExpr.getType();
switch(type) {
case BOOLEAN_TYPE:
case INT8_TYPE:
case INT16_TYPE:
case CHAR_TYPE:
case INT32_TYPE:
case INT64_TYPE:
{
((BigIntVector) output.getVector(columnIndex)).set(0, aggregateExpr.computeFinalLongValue(simpleMapValue));
break;
}
case FLOAT_TYPE:
break;
case DOUBLE_TYPE:
{
((Float8Vector) output.getVector(columnIndex)).set(0, aggregateExpr.computeFinalDoubleValue(simpleMapValue));
break;
}
case STRING_TYPE:
break;
case BINARY_TYPE:
break;
case UINT8_TYPE:
break;
case UINT16_TYPE:
break;
case UINT32_TYPE:
break;
case UINT64_TYPE:
break;
case TIME_MILLI_TYPE:
break;
case DATE_TYPE:
break;
case DATETIME_MILLI_TYPE:
break;
case SYMBOL_TYPE:
break;
case OBJECT_TYPE:
break;
case NULL_TYPE:
break;
}
}
return output;
}
};
aggContext.initContext();
return aggContext;
}
use of io.ordinate.engine.schema.InnerType in project Mycat2 by MyCATApache.
the class FooRecordSink method copy.
@Override
public void copy(Record r, RecordSetter w) {
for (IntInnerType intPair : types) {
int i = intPair.index;
boolean aNull = r.isNull(i);
if (aNull) {
copyNullType(r, w, i);
continue;
}
InnerType innerType = intPair.type;
switch(innerType) {
case BOOLEAN_TYPE:
copyBooleanType(r, w, i);
break;
case INT8_TYPE:
copyInt8Type(r, w, i);
break;
case INT16_TYPE:
copyInt16Type(r, w, i);
break;
case CHAR_TYPE:
copyCharType(r, w, i);
break;
case INT32_TYPE:
copyInt32Type(r, w, i);
break;
case INT64_TYPE:
copyInt64Type(r, w, i);
break;
case FLOAT_TYPE:
copyFloatType(r, w, i);
break;
case DOUBLE_TYPE:
copyDoubleType(r, w, i);
break;
case STRING_TYPE:
copyStringType(r, w, i);
break;
case BINARY_TYPE:
copyBinaryType(r, w, i);
break;
case UINT8_TYPE:
copyUInt8Type(r, w, i);
break;
case UINT16_TYPE:
copyUInt16Type(r, w, i);
break;
case UINT32_TYPE:
copyUInt32Type(r, w, i);
break;
case UINT64_TYPE:
copyUInt64Type(r, w, i);
break;
case TIME_MILLI_TYPE:
copyTimeMillType(r, w, i);
break;
case DATE_TYPE:
copyDateType(r, w, i);
break;
case DATETIME_MILLI_TYPE:
copyDatetimeMilliType(r, w, i);
break;
case SYMBOL_TYPE:
copySymbolType(r, w, i);
break;
case OBJECT_TYPE:
copyObjectType(r, w, i);
break;
case NULL_TYPE:
copyNullType(r, w, i);
break;
}
}
}
use of io.ordinate.engine.schema.InnerType in project Mycat2 by MyCATApache.
the class FooRecordSink method copy.
@Override
public void copy(Record record, int rowId, VectorSchemaRoot input) {
for (IntInnerType intPair : types) {
int columnIndex = intPair.index;
FieldVector vector = input.getVector(columnIndex);
boolean aNull = record.isNull(columnIndex);
if (aNull) {
if (vector instanceof BaseFixedWidthVector) {
((BaseFixedWidthVector) vector).setNull(rowId);
} else if (vector instanceof BaseVariableWidthVector) {
((BaseVariableWidthVector) vector).setNull(rowId);
}
continue;
}
InnerType innerType = intPair.type;
switch(innerType) {
case BOOLEAN_TYPE:
BitVector bitVector = (BitVector) vector;
bitVector.set(rowId, record.getInt(columnIndex));
break;
case CHAR_TYPE:
case INT16_TYPE:
case INT8_TYPE:
case INT32_TYPE:
case INT64_TYPE:
case UINT8_TYPE:
case UINT16_TYPE:
case UINT32_TYPE:
case UINT64_TYPE:
BaseIntVector intVectors = (BaseIntVector) vector;
intVectors.setUnsafeWithPossibleTruncate(rowId, record.getLong(columnIndex));
break;
case DOUBLE_TYPE:
case FLOAT_TYPE:
FloatingPointVector vectors = (FloatingPointVector) vector;
vectors.setWithPossibleTruncate(rowId, record.getDouble(columnIndex));
break;
case SYMBOL_TYPE:
case STRING_TYPE:
VarCharVector valueVectors = (VarCharVector) vector;
valueVectors.set(rowId, record.getBinary(columnIndex).getBytes());
break;
case BINARY_TYPE:
{
VarBinaryVector varBinaryVector = (VarBinaryVector) vector;
varBinaryVector.set(rowId, record.getBinary(columnIndex).getBytes());
break;
}
case TIME_MILLI_TYPE:
{
TimeMilliVector timeStampVector = (TimeMilliVector) vector;
timeStampVector.set(rowId, (int) record.getTime(columnIndex));
break;
}
case DATE_TYPE:
{
DateMilliVector dateDayVector = (DateMilliVector) vector;
dateDayVector.set(rowId, record.getDate(columnIndex));
break;
}
case DATETIME_MILLI_TYPE:
{
TimeStampVector timeStampVector = (TimeStampVector) vector;
timeStampVector.set(rowId, record.getTime(columnIndex));
break;
}
case OBJECT_TYPE:
throw new UnsupportedOperationException();
case NULL_TYPE:
if (vector instanceof BaseFixedWidthVector) {
((BaseFixedWidthVector) vector).setNull(rowId);
} else if (vector instanceof BaseVariableWidthVector) {
((BaseVariableWidthVector) vector).setNull(rowId);
}
continue;
}
}
}
use of io.ordinate.engine.schema.InnerType in project Mycat2 by MyCATApache.
the class GroupByKeyPlan method execute.
@Override
public Observable<VectorSchemaRoot> execute(RootContext rootContext) {
List<Field> fields = schema().getFields();
InnerType[] innerTypes = schema().getFields().stream().map(i -> InnerType.from(i.getType())).toArray(n -> new InnerType[n]);
Map map = MapFactory.createMap(innerTypes);
RecordSink[] recordSinks = new RecordSink[groupByKeys.length];
int groupIndex = 0;
for (GroupKeys groupByKey : groupByKeys) {
IntInnerType[] intPairs = new IntInnerType[groupByKey.getKeys().length];
int[] keys = groupByKey.getKeys();
int index = 0;
for (int key : keys) {
Field field = fields.get(key);
intPairs[index] = IntInnerType.of(index, InnerType.from(field.getType()));
index++;
}
recordSinks[groupIndex] = RecordSinkFactory.INSTANCE.buildRecordSink(intPairs);
groupIndex++;
}
return inputPlan.execute(rootContext).flatMap(new Function<VectorSchemaRoot, ObservableSource<? extends VectorSchemaRoot>>() {
@Override
public ObservableSource<? extends VectorSchemaRoot> apply(VectorSchemaRoot input) throws Throwable {
int rowCount = input.getRowCount();
VectorBatchRecord record = new VectorBatchRecord(input);
VectorSchemaRoot output = rootContext.getVectorSchemaRoot(schema, rowCount * recordSinks.length);
int outputRowId = 0;
for (int i = 0; i < recordSinks.length; i++) {
RecordSink recordSink = recordSinks[i];
for (int rowId = 0; rowId < rowCount; rowId++) {
record.setPosition(rowId);
MapKey key = map.withKey();
RecordSetter recordSinkSPI = RecordSinkFactory.INSTANCE.getRecordSinkSPI(key);
recordSink.copy(record, recordSinkSPI);
if (key.create()) {
recordSink.copy(record, outputRowId, output);
outputRowId++;
// output
} else {
// skip
}
}
}
if (outputRowId == 0) {
output.close();
return Observable.empty();
}
output.setRowCount(outputRowId);
inputPlan.eachFree(input);
return Observable.fromArray(output);
}
}).doOnComplete(new Action() {
@Override
public void run() throws Throwable {
map.close();
}
});
}
use of io.ordinate.engine.schema.InnerType in project Mycat2 by MyCATApache.
the class GroupByKeyWithAggPlan method execute.
@Override
public Observable<VectorSchemaRoot> execute(RootContext rootContext) {
List<Field> fields = schema().getFields();
InnerType[] innerTypes = schema().getFields().stream().map(i -> InnerType.from(i.getType())).toArray(n -> new InnerType[n]);
if (groupByKeys.length > 0) {
ColumnTypes arrayColumnTypes = RecordUtil.getArrayColumnTypes(accumulators);
Map map = MapFactory.createMap2(innerTypes, arrayColumnTypes);
RecordSink[] recordSinks = buildRecordSink(fields);
return physicalPlan.execute(rootContext).reduce(map, (mapKey, input) -> {
int rowCount = input.getRowCount();
VectorBatchRecord record = new VectorBatchRecord(input);
for (RecordSink recordSink : recordSinks) {
for (int rowId = 0; rowId < rowCount; rowId++) {
record.setPosition(rowId);
MapKey key = mapKey.withKey();
RecordSetter recordSinkSPI = RecordSinkFactory.INSTANCE.getRecordSinkSPI(key);
recordSink.copy(record, recordSinkSPI);
MapValue value = key.createValue();
if (value.isNew()) {
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeFirst(value, record);
}
} else {
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeNext(value, record);
}
}
}
}
physicalPlan.eachFree(input);
return mapKey;
}).map(map1 -> {
int size = (int) map1.size();
VectorSchemaRoot output = rootContext.getVectorSchemaRoot(schema(), size);
RecordCursor cursor = map1.getCursor();
cursor.toTop();
int index = 0;
while (cursor.hasNext()) {
Record record = cursor.getRecord();
functionSink.copy(accumulators, RecordUtil.wrapAsAggRecord(record), index++, output);
}
output.setRowCount(index);
map1.clear();
return output;
}).toObservable().doOnComplete(() -> map.close());
} else {
SimpleMapValue mapValue = new SimpleMapValue(RecordUtil.getContextSize(accumulators));
return physicalPlan.execute(rootContext).reduce(mapValue, new BiFunction<SimpleMapValue, VectorSchemaRoot, SimpleMapValue>() {
AtomicBoolean first = new AtomicBoolean(true);
@Override
public SimpleMapValue apply(SimpleMapValue simpleMapValue, VectorSchemaRoot root) throws Throwable {
int rowCount = root.getRowCount();
VectorBatchRecord record = new VectorBatchRecord(root);
if (first.compareAndSet(true, false)) {
record.setPosition(0);
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeFirst(mapValue, record);
}
for (int i = 1; i < rowCount; i++) {
record.setPosition(i);
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeNext(mapValue, record);
}
}
} else {
for (int i = 0; i < rowCount; i++) {
record.setPosition(i);
for (AccumulatorFunction accumulator : accumulators) {
accumulator.computeNext(mapValue, record);
}
}
}
root.close();
return simpleMapValue;
}
}).map(new Function<SimpleMapValue, VectorSchemaRoot>() {
@Override
public VectorSchemaRoot apply(SimpleMapValue simpleMapValue) throws Throwable {
VectorSchemaRoot vectorSchemaRoot = rootContext.getVectorSchemaRoot(schema(), 1);
vectorSchemaRoot.setRowCount(1);
functionSink.copy(accumulators, RecordUtil.wrapAsAggRecord(simpleMapValue), 0, vectorSchemaRoot);
return vectorSchemaRoot;
}
}).toObservable();
}
}
Aggregations