use of com.pingcap.tidb.tipb.DAGRequest in project client-java by tikv.
the class KVMockServer method coprocessor.
@Override
public void coprocessor(org.tikv.kvproto.Coprocessor.Request requestWrap, io.grpc.stub.StreamObserver<org.tikv.kvproto.Coprocessor.Response> responseObserver) {
try {
verifyContext(requestWrap.getContext());
DAGRequest request = DAGRequest.parseFrom(requestWrap.getData());
if (request.getStartTsFallback() == 0) {
throw new Exception();
}
List<Coprocessor.KeyRange> keyRanges = requestWrap.getRangesList();
Coprocessor.Response.Builder builderWrap = Coprocessor.Response.newBuilder();
SelectResponse.Builder builder = SelectResponse.newBuilder();
org.tikv.kvproto.Errorpb.Error.Builder errBuilder = org.tikv.kvproto.Errorpb.Error.newBuilder();
for (Coprocessor.KeyRange keyRange : keyRanges) {
Integer errorCode = errorMap.remove(keyRange.getStart());
if (errorCode != null) {
if (STALE_EPOCH == errorCode) {
errBuilder.setEpochNotMatch(EpochNotMatch.getDefaultInstance());
} else if (NOT_LEADER == errorCode) {
errBuilder.setNotLeader(NotLeader.getDefaultInstance());
} else {
errBuilder.setServerIsBusy(ServerIsBusy.getDefaultInstance());
}
builderWrap.setRegionError(errBuilder.build());
break;
} else {
ByteString startKey = keyRange.getStart();
SortedMap<Key, ByteString> kvs = dataMap.tailMap(toRawKey(startKey));
builder.addAllChunks(kvs.entrySet().stream().filter(Objects::nonNull).filter(kv -> kv.getKey().compareTo(toRawKey(keyRange.getEnd())) <= 0).map(kv -> Chunk.newBuilder().setRowsData(kv.getValue()).build()).collect(Collectors.toList()));
}
}
responseObserver.onNext(builderWrap.setData(builder.build().toByteString()).build());
responseObserver.onCompleted();
} catch (Exception e) {
responseObserver.onError(Status.INTERNAL.asRuntimeException());
}
}
use of com.pingcap.tidb.tipb.DAGRequest in project tispark by pingcap.
the class TiDAGRequest method buildScan.
/**
* Unify indexScan and tableScan building logic since they are very much alike. DAGRequest for
* IndexScan should also contain filters and aggregation, so we can reuse this part of logic.
*
* <p>DAGRequest is made up of a chain of executors with strict orders: TableScan/IndexScan >
* Selection > Aggregation > TopN/Limit a DAGRequest must contain one and only one TableScan or
* IndexScan.
*
* @param buildIndexScan whether the dagRequest to build should be an {@link
* com.pingcap.tidb.tipb.IndexScan}
* @return final DAGRequest built
*/
private DAGRequest.Builder buildScan(boolean buildIndexScan, List<Integer> outputOffsets) {
long id = getPhysicalId();
checkNotNull(startTs, "startTs is null");
checkArgument(startTs.getVersion() != 0, "timestamp is 0");
clearPushDownInfo();
DAGRequest.Builder dagRequestBuilder = DAGRequest.newBuilder();
Executor.Builder executorBuilder = Executor.newBuilder();
IndexScan.Builder indexScanBuilder = IndexScan.newBuilder();
TableScan.Builder tblScanBuilder = TableScan.newBuilder();
// find a column's offset in fields
Map<String, Integer> colOffsetInFieldMap = new HashMap<>();
// find a column's position in index
Map<String, Integer> colPosInIndexMap = new HashMap<>();
if (buildIndexScan) {
// IndexScan
if (indexInfo == null) {
throw new TiClientInternalException("Index is empty for index scan");
}
List<TiColumnInfo> columnInfoList = tableInfo.getColumns();
boolean hasPk = false;
// We extract index column info
List<Integer> indexColOffsets = indexInfo.getIndexColumns().stream().map(TiIndexColumn::getOffset).collect(Collectors.toList());
int idxPos = 0;
// for index scan builder, columns are added by its order in index
for (Integer idx : indexColOffsets) {
TiColumnInfo tiColumnInfo = columnInfoList.get(idx);
ColumnInfo columnInfo = tiColumnInfo.toProto(tableInfo);
colPosInIndexMap.put(tiColumnInfo.getName(), idxPos++);
ColumnInfo.Builder colBuilder = ColumnInfo.newBuilder(columnInfo);
if (columnInfo.getColumnId() == -1) {
hasPk = true;
colBuilder.setPkHandle(true);
}
indexScanBuilder.addColumns(colBuilder);
}
int colCount = indexScanBuilder.getColumnsCount();
if (isDoubleRead()) {
// TODO: we may merge indexDoubleRead and coveringIndexRead logic
for (ColumnRef col : getFields()) {
Integer pos = colPosInIndexMap.get(col.getName());
if (pos != null) {
TiColumnInfo columnInfo = columnInfoList.get(indexColOffsets.get(pos));
if (col.matchName(columnInfo.getName())) {
colOffsetInFieldMap.put(col.getName(), pos);
}
// TODO: primary key may also be considered if pkIsHandle
}
}
// double read case
if (!hasPk) {
// add handle column
if (!tableInfo.isCommonHandle()) {
indexScanBuilder.addColumns(handleColumn);
++colCount;
} else {
for (TiIndexColumn col : tableInfo.getPrimaryKey().getIndexColumns()) {
indexScanBuilder.addColumns(tableInfo.getColumn(col.getName()).toProto(tableInfo));
++colCount;
}
}
addRequiredIndexDataType();
}
if (colCount == 0) {
throw new DAGRequestException("Incorrect index scan with zero column count");
}
if (!tableInfo.isCommonHandle()) {
outputOffsets.add(colCount - 1);
} else {
int idxColSize = tableInfo.getPrimaryKey().getIndexColumns().size();
for (int i = idxColSize; i >= 1; i--) {
outputOffsets.add(colCount - i);
}
}
} else {
boolean pkIsNeeded = false;
// offset for dagRequest should be in accordance with fields
for (ColumnRef col : getFields()) {
Integer pos = colPosInIndexMap.get(col.getName());
if (pos != null) {
TiColumnInfo columnInfo = columnInfoList.get(indexColOffsets.get(pos));
if (col.matchName(columnInfo.getName())) {
outputOffsets.add(pos);
colOffsetInFieldMap.put(col.getName(), pos);
}
} else // logically it must be the pk column. Extra check here.
if (tableInfo.getColumn(col.getName()).isPrimaryKey()) {
pkIsNeeded = true;
// offset should be processed for each primary key encountered
outputOffsets.add(colCount);
// for index scan, column offset must be in the order of index->handle
colOffsetInFieldMap.put(col.getName(), indexColOffsets.size());
} else {
throw new DAGRequestException("columns other than primary key and index key exist in fields while index single read: " + col.getName());
}
}
// pk is not included in index but still needed
if (pkIsNeeded) {
if (!tableInfo.isCommonHandle()) {
indexScanBuilder.addColumns(handleColumn);
}
}
}
executorBuilder.setTp(ExecType.TypeIndexScan);
indexScanBuilder.setTableId(id).setIndexId(indexInfo.getId());
if (tableInfo.isCommonHandle()) {
for (TiIndexColumn col : tableInfo.getPrimaryKey().getIndexColumns()) {
indexScanBuilder.addPrimaryColumnIds(tableInfo.getColumn(col.getName()).getId());
}
}
dagRequestBuilder.addExecutors(executorBuilder.setIdxScan(indexScanBuilder).build());
} else {
// TableScan
executorBuilder.setTp(ExecType.TypeTableScan);
tblScanBuilder.setTableId(id);
if (tableInfo.isCommonHandle()) {
for (TiIndexColumn col : tableInfo.getPrimaryKey().getIndexColumns()) {
tblScanBuilder.addPrimaryColumnIds(tableInfo.getColumn(col.getName()).getId());
}
}
// Step1. Add columns to first executor
int lastOffset = 0;
for (ColumnRef col : getFields()) {
// can't allow duplicated col added into executor.
if (!colOffsetInFieldMap.containsKey(col.getName())) {
tblScanBuilder.addColumns(tableInfo.getColumn(col.getName()).toProto(tableInfo));
colOffsetInFieldMap.put(col.getName(), lastOffset);
lastOffset++;
}
// column offset should be in accordance with fields
outputOffsets.add(colOffsetInFieldMap.get(col.getName()));
}
dagRequestBuilder.addExecutors(executorBuilder.setTblScan(tblScanBuilder));
}
boolean isIndexDoubleScan = buildIndexScan && isDoubleRead();
// Should build these executors when performing CoveringIndexScan/TableScan
// clear executorBuilder
executorBuilder.clear();
// Step2. Add others
// DO NOT EDIT EXPRESSION CONSTRUCTION ORDER
// Or make sure the construction order is below:
// TableScan/IndexScan > Selection > Aggregation > TopN/Limit
Expression whereExpr = mergeCNFExpressions(getFilters());
if (whereExpr != null) {
if (!isIndexDoubleScan || isExpressionCoveredByIndex(whereExpr)) {
executorBuilder.setTp(ExecType.TypeSelection);
dagRequestBuilder.addExecutors(executorBuilder.setSelection(Selection.newBuilder().addConditions(ProtoConverter.toProto(whereExpr, colOffsetInFieldMap))));
executorBuilder.clear();
addPushDownFilters();
} else {
return dagRequestBuilder;
}
}
if (!getGroupByItems().isEmpty() || !getAggregates().isEmpty()) {
// only allow table scan or covering index scan push down groupby and agg
if (!isIndexDoubleScan || (isGroupByCoveredByIndex() && isAggregateCoveredByIndex())) {
pushDownAggAndGroupBy(dagRequestBuilder, executorBuilder, outputOffsets, colOffsetInFieldMap);
} else {
return dagRequestBuilder;
}
}
if (!getOrderByItems().isEmpty()) {
if (!isIndexDoubleScan || isOrderByCoveredByIndex()) {
// only allow table scan or covering index scan push down orderby
pushDownOrderBy(dagRequestBuilder, executorBuilder, colOffsetInFieldMap);
}
} else if (getLimit() != 0) {
if (!isIndexDoubleScan) {
pushDownLimit(dagRequestBuilder, executorBuilder);
}
}
return dagRequestBuilder;
}
use of com.pingcap.tidb.tipb.DAGRequest in project tispark by pingcap.
the class RegionStoreClient method coprocessStreaming.
// TODO: wait for future fix
// coprocessStreaming doesn't handle split error
// future work should handle it and do the resolve
// locks correspondingly
public Iterator<SelectResponse> coprocessStreaming(DAGRequest req, List<Coprocessor.KeyRange> ranges, long startTs) {
boolean forWrite = false;
Supplier<Coprocessor.Request> reqToSend = () -> Coprocessor.Request.newBuilder().setContext(region.getContext(getResolvedLocks(startTs))).setTp(REQ_TYPE_DAG.getValue()).setData(req.toByteString()).addAllRanges(ranges).build();
KVErrorHandler<StreamingResponse> handler = new KVErrorHandler<>(regionManager, this, lockResolverClient, // TODO: handle all errors in streaming response
StreamingResponse::getFirstRegionError, resp -> null, resolveLockResult -> addResolvedLocks(startTs, resolveLockResult.getResolvedLocks()), startTs, forWrite);
StreamingResponse responseIterator = this.callServerStreamingWithRetry(ConcreteBackOffer.newCopNextMaxBackOff(), TikvGrpc.getCoprocessorStreamMethod(), reqToSend, handler);
return doCoprocessor(responseIterator);
}
use of com.pingcap.tidb.tipb.DAGRequest in project tispark by pingcap.
the class TiDAGRequest method buildRequest.
private DAGRequest buildRequest(DAGRequest.Builder dagRequestBuilder, List<Integer> outputOffsets) {
checkNotNull(startTs, "startTs is null");
checkArgument(startTs.getVersion() != 0, "timestamp is 0");
DAGRequest request = dagRequestBuilder.setTimeZoneOffset(timeZoneOffset).setFlags(flags).addAllOutputOffsets(outputOffsets).setEncodeType(this.encodeType).setStartTsFallback(startTs.getVersion()).build();
validateRequest(request);
return request;
}
use of com.pingcap.tidb.tipb.DAGRequest in project tispark by pingcap.
the class KVMockServer method coprocessor.
@Override
public void coprocessor(org.tikv.kvproto.Coprocessor.Request requestWrap, io.grpc.stub.StreamObserver<org.tikv.kvproto.Coprocessor.Response> responseObserver) {
try {
verifyContext(requestWrap.getContext());
DAGRequest request = DAGRequest.parseFrom(requestWrap.getData());
List<Coprocessor.KeyRange> keyRanges = requestWrap.getRangesList();
Coprocessor.Response.Builder builderWrap = Coprocessor.Response.newBuilder();
SelectResponse.Builder builder = SelectResponse.newBuilder();
org.tikv.kvproto.Errorpb.Error.Builder errBuilder = org.tikv.kvproto.Errorpb.Error.newBuilder();
for (Coprocessor.KeyRange keyRange : keyRanges) {
Integer errorCode = errorMap.remove(keyRange.getStart());
if (errorCode != null) {
if (STALE_EPOCH == errorCode) {
errBuilder.setEpochNotMatch(Errorpb.EpochNotMatch.getDefaultInstance());
} else if (NOT_LEADER == errorCode) {
errBuilder.setNotLeader(NotLeader.getDefaultInstance());
} else {
errBuilder.setServerIsBusy(ServerIsBusy.getDefaultInstance());
}
builderWrap.setRegionError(errBuilder.build());
break;
} else {
ByteString startKey = keyRange.getStart();
SortedMap<Key, ByteString> kvs = dataMap.tailMap(toRawKey(startKey));
builder.addAllChunks(kvs.entrySet().stream().filter(Objects::nonNull).filter(kv -> kv.getKey().compareTo(toRawKey(keyRange.getEnd())) <= 0).map(kv -> Chunk.newBuilder().setRowsData(kv.getValue()).build()).collect(Collectors.toList()));
}
}
responseObserver.onNext(builderWrap.setData(builder.build().toByteString()).build());
responseObserver.onCompleted();
} catch (Exception e) {
responseObserver.onError(Status.INTERNAL.asRuntimeException());
}
}
Aggregations