use of com.actiontech.dble.sqlengine.mpp.ColumnRoutePair in project dble by actiontech.
the class DruidInsertReplaceParser method routeByERParentKey.
protected static RouteResultset routeByERParentKey(RouteResultset rrs, TableConfig tc, String joinKeyVal) throws SQLNonTransientException {
if (tc.getDirectRouteTC() != null) {
Set<ColumnRoutePair> parentColVal = new HashSet<>(1);
ColumnRoutePair pair = new ColumnRoutePair(joinKeyVal);
parentColVal.add(pair);
Set<String> dataNodeSet = RouterUtil.ruleCalculate(tc.getDirectRouteTC(), parentColVal);
if (dataNodeSet.isEmpty() || dataNodeSet.size() > 1) {
throw new SQLNonTransientException("parent key can't find valid data node ,expect 1 but found: " + dataNodeSet.size());
}
String dn = dataNodeSet.iterator().next();
if (SQLJob.LOGGER.isDebugEnabled()) {
SQLJob.LOGGER.debug("found partion node (using parent partition rule directly) for child table to insert " + dn + " sql :" + rrs.getStatement());
}
return RouterUtil.routeToSingleNode(rrs, dn);
}
return null;
}
use of com.actiontech.dble.sqlengine.mpp.ColumnRoutePair in project dble by actiontech.
the class RouterUtil method ruleCalculate.
public static Set<String> ruleCalculate(TableConfig tc, Set<ColumnRoutePair> colRoutePairSet) {
Set<String> routeNodeSet = new LinkedHashSet<>();
String col = tc.getRule().getColumn();
RuleConfig rule = tc.getRule();
AbstractPartitionAlgorithm algorithm = rule.getRuleAlgorithm();
for (ColumnRoutePair colPair : colRoutePairSet) {
if (colPair.colValue != null) {
Integer nodeIndex = algorithm.calculate(colPair.colValue);
if (nodeIndex == null) {
throw new IllegalArgumentException("can't find datanode for sharding column:" + col + " val:" + colPair.colValue);
} else {
String dataNode = tc.getDataNodes().get(nodeIndex);
routeNodeSet.add(dataNode);
colPair.setNodeId(nodeIndex);
}
} else if (colPair.rangeValue != null) {
Integer[] nodeRange = algorithm.calculateRange(String.valueOf(colPair.rangeValue.getBeginValue()), String.valueOf(colPair.rangeValue.getEndValue()));
if (nodeRange != null) {
/**
* not sure colPair's nodeid has other effect
*/
if (nodeRange.length == 0) {
routeNodeSet.addAll(tc.getDataNodes());
} else {
ArrayList<String> dataNodes = tc.getDataNodes();
String dataNode = null;
for (Integer nodeId : nodeRange) {
dataNode = dataNodes.get(nodeId);
routeNodeSet.add(dataNode);
}
}
}
}
}
return routeNodeSet;
}
use of com.actiontech.dble.sqlengine.mpp.ColumnRoutePair in project dble by actiontech.
the class RouterUtil method findRouterWithConditionsForTables.
/**
* findRouterWithConditionsForTables
*/
public static void findRouterWithConditionsForTables(SchemaConfig schema, RouteResultset rrs, Map<String, Map<String, Set<ColumnRoutePair>>> tablesAndConditions, Map<String, Set<String>> tablesRouteMap, LayerCachePool cachePool, boolean isSelect, boolean isSingleTable) throws SQLNonTransientException {
// router for shard-ing tables
for (Map.Entry<String, Map<String, Set<ColumnRoutePair>>> entry : tablesAndConditions.entrySet()) {
String tableName = entry.getKey();
if (DbleServer.getInstance().getSystemVariables().isLowerCaseTableNames()) {
tableName = tableName.toLowerCase();
}
if (tableName.startsWith(schema.getName() + ".")) {
tableName = tableName.substring(schema.getName().length() + 1);
}
TableConfig tableConfig = schema.getTables().get(tableName);
if (tableConfig == null) {
if (isSingleTable) {
String msg = "can't find table [" + tableName + "[ define in schema " + ":" + schema.getName();
LOGGER.info(msg);
throw new SQLNonTransientException(msg);
} else {
// cross to other schema
continue;
}
}
if (tableConfig.isGlobalTable() || schema.getTables().get(tableName).getDataNodes().size() == 1) {
// global table or single node shard-ing table will router later
continue;
} else {
// shard-ing table,childTable or others
Map<String, Set<ColumnRoutePair>> columnsMap = entry.getValue();
if (tryRouteWithPrimaryCache(rrs, tablesRouteMap, cachePool, columnsMap, schema, tableName, tableConfig.getPrimaryKey(), isSelect)) {
continue;
}
String joinKey = tableConfig.getJoinKey();
String partitionCol = tableConfig.getPartitionColumn();
boolean isFoundPartitionValue = partitionCol != null && columnsMap.get(partitionCol) != null;
// where filter contains partition column
if (isFoundPartitionValue) {
Set<ColumnRoutePair> partitionValue = columnsMap.get(partitionCol);
if (partitionValue.size() == 0) {
if (tablesRouteMap.get(tableName) == null) {
tablesRouteMap.put(tableName, new HashSet<String>());
}
tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes());
} else {
routeWithPartition(tablesRouteMap, tableName, tableConfig, partitionValue);
}
} else if (joinKey != null && columnsMap.get(joinKey) != null && columnsMap.get(joinKey).size() != 0) {
routerForJoinTable(rrs, tableConfig, columnsMap, joinKey);
return;
} else {
// no partition column,router to all nodes
if (tablesRouteMap.get(tableName) == null) {
tablesRouteMap.put(tableName, new HashSet<String>());
}
tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes());
}
}
}
}
use of com.actiontech.dble.sqlengine.mpp.ColumnRoutePair in project dble by actiontech.
the class RouterUtil method tryRouteForTables.
/**
* tryRouteFor multiTables
*/
public static RouteResultset tryRouteForTables(SchemaConfig schema, DruidShardingParseInfo ctx, RouteCalculateUnit routeUnit, RouteResultset rrs, boolean isSelect, LayerCachePool cachePool) throws SQLException {
List<String> tables = ctx.getTables();
// no sharding table
if (isNoSharding(schema, tables.get(0))) {
return routeToSingleNode(rrs, schema.getDataNode());
}
if (tables.size() == 1) {
return RouterUtil.tryRouteForOneTable(schema, routeUnit, tables.get(0), rrs, isSelect, cachePool);
}
/**
* multi-table it must be ER OR global* normal , global* er
*/
// map <table,data_nodes>
Map<String, Set<String>> tablesRouteMap = new HashMap<>();
Map<String, Map<String, Set<ColumnRoutePair>>> tablesAndConditions = routeUnit.getTablesAndConditions();
if (tablesAndConditions != null && tablesAndConditions.size() > 0) {
// findRouter for shard-ing table
RouterUtil.findRouterWithConditionsForTables(schema, rrs, tablesAndConditions, tablesRouteMap, cachePool, isSelect, false);
if (rrs.isFinishedRoute()) {
return rrs;
}
}
// if global table and normal table has no intersection ,they had treat as normal join
for (String tableName : tables) {
TableConfig tableConfig = schema.getTables().get(tableName);
if (tableConfig != null && !tableConfig.isGlobalTable() && tablesRouteMap.get(tableName) == null) {
// the other is single table
tablesRouteMap.put(tableName, new HashSet<String>());
tablesRouteMap.get(tableName).addAll(tableConfig.getDataNodes());
}
}
Set<String> retNodesSet = new HashSet<>();
boolean isFirstAdd = true;
for (Map.Entry<String, Set<String>> entry : tablesRouteMap.entrySet()) {
if (entry.getValue() == null || entry.getValue().size() == 0) {
throw new SQLNonTransientException("parent key can't find any valid datanode ");
} else {
if (isFirstAdd) {
retNodesSet.addAll(entry.getValue());
isFirstAdd = false;
} else {
retNodesSet.retainAll(entry.getValue());
if (retNodesSet.size() == 0) {
// two tables has no no intersection
String errMsg = "invalid route in sql, multi tables found but datanode has no intersection " + " sql:" + rrs.getStatement();
LOGGER.info(errMsg);
throw new SQLNonTransientException(errMsg);
}
}
}
}
// retNodesSet.size() >0
routeToMultiNode(isSelect, rrs, retNodesSet);
return rrs;
}
use of com.actiontech.dble.sqlengine.mpp.ColumnRoutePair in project dble by actiontech.
the class DruidSelectParser method changeSql.
/**
* changeSql: add limit if need
*/
@Override
public void changeSql(SchemaConfig schema, RouteResultset rrs, SQLStatement stmt, LayerCachePool cachePool) throws SQLException {
if (rrs.isFinishedRoute() || rrs.isFinishedExecute() || rrs.isNeedOptimizer()) {
return;
}
tryRouteSingleTable(schema, rrs, cachePool);
rrs.copyLimitToNodes();
SQLSelectStatement selectStmt = (SQLSelectStatement) stmt;
SQLSelectQuery sqlSelectQuery = selectStmt.getSelect().getQuery();
if (sqlSelectQuery instanceof MySqlSelectQueryBlock) {
MySqlSelectQueryBlock mysqlSelectQuery = (MySqlSelectQueryBlock) selectStmt.getSelect().getQuery();
int limitStart = 0;
int limitSize = schema.getDefaultMaxLimit();
Map<String, Map<String, Set<ColumnRoutePair>>> allConditions = getAllConditions();
boolean isNeedAddLimit = isNeedAddLimit(schema, rrs, mysqlSelectQuery, allConditions);
if (isNeedAddLimit) {
SQLLimit limit = new SQLLimit();
limit.setRowCount(new SQLIntegerExpr(limitSize));
mysqlSelectQuery.setLimit(limit);
rrs.setLimitSize(limitSize);
String sql = getSql(rrs, stmt, isNeedAddLimit, schema.getName());
rrs.changeNodeSqlAfterAddLimit(sql, 0, limitSize);
}
SQLLimit limit = mysqlSelectQuery.getLimit();
if (limit != null && !isNeedAddLimit) {
SQLIntegerExpr offset = (SQLIntegerExpr) limit.getOffset();
SQLIntegerExpr count = (SQLIntegerExpr) limit.getRowCount();
if (offset != null) {
limitStart = offset.getNumber().intValue();
rrs.setLimitStart(limitStart);
}
if (count != null) {
limitSize = count.getNumber().intValue();
rrs.setLimitSize(limitSize);
}
if (isNeedChangeLimit(rrs)) {
SQLLimit changedLimit = new SQLLimit();
changedLimit.setRowCount(new SQLIntegerExpr(limitStart + limitSize));
if (offset != null) {
if (limitStart < 0) {
String msg = "You have an error in your SQL syntax; check the manual that " + "corresponds to your MySQL server version for the right syntax to use near '" + limitStart + "'";
throw new SQLNonTransientException(ErrorCode.ER_PARSE_ERROR + " - " + msg);
} else {
changedLimit.setOffset(new SQLIntegerExpr(0));
}
}
mysqlSelectQuery.setLimit(changedLimit);
String sql = getSql(rrs, stmt, isNeedAddLimit, schema.getName());
rrs.changeNodeSqlAfterAddLimit(sql, 0, limitStart + limitSize);
} else {
rrs.changeNodeSqlAfterAddLimit(rrs.getStatement(), rrs.getLimitStart(), rrs.getLimitSize());
}
}
rrs.setCacheAble(isNeedCache(schema));
}
}
Aggregations