use of io.mycat.calcite.table.NormalTable in project Mycat2 by MyCATApache.
the class Distribution method join.
public Optional<Distribution> join(Distribution arg) {
switch(arg.type()) {
case PHY:
switch(this.type()) {
case PHY:
NormalTable leftNormalTable = this.normalTables.get(0);
NormalTable rightNormalTable = arg.normalTables.get(0);
if (leftNormalTable.getDataNode().getTargetName().equals(rightNormalTable.getDataNode().getTargetName())) {
return Optional.of(new Distribution(this.shardingTables, this.globalTables, merge(this.normalTables, arg.normalTables)));
}
return Optional.empty();
case BROADCAST:
return Optional.of(new Distribution(merge(this.shardingTables, arg.shardingTables), merge(this.globalTables, arg.globalTables), merge(this.normalTables, arg.normalTables)));
case SHARDING:
ShardingTable shardingTable = this.shardingTables.get(0);
if (shardingTable.function().isAllPartitionInTargetName(arg.normalTables.get(0).getDataNode().getTargetName())) {
return Optional.of(new Distribution(merge(this.shardingTables, arg.shardingTables), merge(this.globalTables, arg.globalTables), merge(this.normalTables, arg.normalTables)));
}
return Optional.empty();
default:
throw new IllegalStateException("Unexpected value: " + this.type());
}
case BROADCAST:
return Optional.of(new Distribution(merge(this.shardingTables, arg.shardingTables), merge(this.globalTables, arg.globalTables), merge(this.normalTables, arg.normalTables)));
case SHARDING:
switch(this.type()) {
case PHY:
ShardingTable shardingTable = arg.shardingTables.get(0);
if (shardingTable.function().isAllPartitionInTargetName(this.normalTables.get(0).getDataNode().getTargetName())) {
return Optional.of(new Distribution(merge(this.shardingTables, arg.shardingTables), merge(this.globalTables, arg.globalTables), merge(this.normalTables, arg.normalTables)));
}
return Optional.empty();
case BROADCAST:
return Optional.of(new Distribution(merge(this.shardingTables, arg.shardingTables), merge(this.globalTables, arg.globalTables), merge(this.normalTables, arg.normalTables)));
case SHARDING:
ShardingTable leftShardingTable = this.shardingTables.get(0);
ShardingTable rightShardingTable = arg.shardingTables.get(0);
CustomRuleFunction leftShardingFuntion = leftShardingTable.getShardingFuntion();
CustomRuleFunction rightShardingFuntion = rightShardingTable.getShardingFuntion();
if (leftShardingFuntion.isSameDistribution(rightShardingFuntion) || isTargetPartitionJoin(leftShardingFuntion, rightShardingFuntion)) {
return Optional.of(new Distribution(merge(this.shardingTables, arg.shardingTables), merge(this.globalTables, arg.globalTables), merge(this.normalTables, arg.normalTables)));
}
return Optional.empty();
default:
throw new IllegalStateException("Unexpected value: " + this.type());
}
default:
throw new IllegalStateException("Unexpected value: " + arg.type());
}
}
use of io.mycat.calcite.table.NormalTable in project Mycat2 by MyCATApache.
the class AsyncMycatDataContextImpl method getSqlMap.
public static List<PartitionGroup> getSqlMap(Map<RexNode, RexNode> constantMap, MycatView view, DrdsSqlWithParams drdsSqlWithParams, Optional<List<PartitionGroup>> hintDataMapping) {
Distribution distribution = view.getDistribution();
Distribution.Type type = distribution.type();
switch(type) {
case BROADCAST:
{
Map<String, Partition> builder = new HashMap<>();
String targetName = null;
for (GlobalTable globalTable : distribution.getGlobalTables()) {
if (targetName == null) {
int i = ThreadLocalRandom.current().nextInt(0, globalTable.getGlobalDataNode().size());
Partition partition = globalTable.getGlobalDataNode().get(i);
targetName = partition.getTargetName();
}
builder.put(globalTable.getUniqueName(), globalTable.getDataNode());
}
return Collections.singletonList(new PartitionGroup(targetName, builder));
}
case PHY:
Map<String, Partition> builder = new HashMap<>();
String targetName = null;
for (GlobalTable globalTable : distribution.getGlobalTables()) {
builder.put(globalTable.getUniqueName(), globalTable.getDataNode());
}
for (NormalTable normalTable : distribution.getNormalTables()) {
if (targetName == null) {
targetName = normalTable.getDataNode().getTargetName();
}
builder.put(normalTable.getUniqueName(), normalTable.getDataNode());
}
return Collections.singletonList(new PartitionGroup(targetName, builder));
case SHARDING:
if (hintDataMapping.isPresent()) {
return hintDataMapping.get();
}
ShardingTable shardingTable = distribution.getShardingTables().get(0);
RexBuilder rexBuilder = MycatCalciteSupport.RexBuilder;
RexNode condition = view.getCondition().orElse(MycatCalciteSupport.RexBuilder.makeLiteral(true));
List<RexNode> inputConditions = new ArrayList<>(constantMap.size() + 1);
inputConditions.add(condition);
for (Map.Entry<RexNode, RexNode> rexNodeRexNodeEntry : constantMap.entrySet()) {
inputConditions.add(rexBuilder.makeCall(SqlStdOperatorTable.EQUALS, rexNodeRexNodeEntry.getKey(), rexNodeRexNodeEntry.getValue()));
}
ParamHolder paramHolder = ParamHolder.CURRENT_THREAD_LOCAL.get();
paramHolder.setData(drdsSqlWithParams.getParams(), drdsSqlWithParams.getTypeNames());
try {
ArrayList<RexNode> res = new ArrayList<>(inputConditions.size());
MycatRexExecutor.INSTANCE.reduce(rexBuilder, inputConditions, res);
condition = res.get(0);
ValuePredicateAnalyzer predicateAnalyzer = new ValuePredicateAnalyzer(shardingTable.keyMetas(true), shardingTable.getColumns().stream().map(i -> i.getColumnName()).collect(Collectors.toList()));
Map<QueryType, List<ValueIndexCondition>> indexConditionMap = predicateAnalyzer.translateMatch(condition);
List<Partition> partitions = ValueIndexCondition.getPartitions(shardingTable.getShardingFuntion(), indexConditionMap, drdsSqlWithParams.getParams());
return mapSharding(view, partitions);
} finally {
paramHolder.clear();
}
default:
throw new IllegalStateException("Unexpected value: " + distribution.type());
}
}
use of io.mycat.calcite.table.NormalTable in project Mycat2 by MyCATApache.
the class AsyncMycatDataContextImpl method mapSharding.
public static List<PartitionGroup> mapSharding(MycatView view, List<Partition> partitionList) {
Distribution distribution = view.getDistribution();
List<ShardingTable> shardingTableList = distribution.getShardingTables();
ShardingTable primaryShardingTable = shardingTableList.get(0);
CustomRuleFunction primaryShardingFunction = primaryShardingTable.getShardingFuntion();
HashMap<String, Partition> groupTemplate = new HashMap<>();
for (NormalTable normalTable : distribution.getNormalTables()) {
// 可能存在错误的数据分布,但是错误的数据分布访问不到
groupTemplate.put(normalTable.getUniqueName(), normalTable.getDataNode());
}
for (GlobalTable globalTable : distribution.getGlobalTables()) {
groupTemplate.put(globalTable.getUniqueName(), globalTable.getDataNode());
}
if (distribution.getShardingTables().size() == 1) {
List<PartitionGroup> res = new ArrayList<>(partitionList.size());
for (Partition partition : partitionList) {
HashMap<String, Partition> map = new HashMap<>(groupTemplate);
map.put(primaryShardingTable.getUniqueName(), partition);
res.add(new PartitionGroup(partition.getTargetName(), map));
}
return res;
} else {
List<ShardingTable> joinShardingTables = shardingTableList.subList(1, shardingTableList.size());
List<PartitionGroup> res = new ArrayList<>(partitionList.size());
for (Partition primaryPartition : partitionList) {
HashMap<String, Partition> map = new HashMap<>(groupTemplate);
map.put(primaryShardingTable.getUniqueName(), primaryPartition);
for (ShardingTable joinShardingTable : joinShardingTables) {
CustomRuleFunction joinFunction = joinShardingTable.function();
if (primaryShardingFunction.isSameDistribution(joinFunction)) {
Partition joinPartition = joinFunction.getPartition(primaryShardingFunction.indexOf(primaryPartition));
map.put(joinShardingTable.getUniqueName(), joinPartition);
} else if (primaryShardingFunction.isSameTargetFunctionDistribution(joinFunction)) {
List<Partition> joinPartitions = joinShardingTable.getPartitionsByTargetName(primaryPartition.getTargetName());
if (joinPartitions.size() != 1) {
throw new IllegalArgumentException("wrong partition " + joinPartitions + " in " + view);
}
map.put(joinShardingTable.getUniqueName(), joinPartitions.get(0));
}
}
res.add(new PartitionGroup(primaryPartition.getTargetName(), map));
}
return res;
}
}
use of io.mycat.calcite.table.NormalTable in project Mycat2 by MyCATApache.
the class VertxUpdateExecuter method handleNormal.
private static void handleNormal(SQLStatement statement, List<VertxExecuter.EachSQL> res, List<Object> params, NormalTable table) {
NormalTable normalTable = table;
Partition partition = normalTable.getDataNode();
SQLStatement eachSql = statement.clone();
SQLExprTableSource eachTableSource = convertToFromExprDatasource(eachSql);
eachTableSource.setExpr(partition.getTable());
eachTableSource.setSchema(partition.getSchema());
res.add(new VertxExecuter.EachSQL(partition.getTargetName(), eachSql.toString(), params));
}
use of io.mycat.calcite.table.NormalTable in project Mycat2 by MyCATApache.
the class HintHandler method onExecute.
@Override
protected Future<Void> onExecute(SQLRequest<MySqlHintStatement> request, MycatDataContext dataContext, Response response) {
MySqlHintStatement ast = request.getAst();
List<SQLCommentHint> hints = ast.getHints();
try {
if (hints.size() == 1) {
String s = SqlHints.unWrapperHint(hints.get(0).getText());
if (s.startsWith("mycat:") || s.startsWith("MYCAT:")) {
s = s.substring(6);
int bodyStartIndex = s.indexOf('{');
String cmd;
String body;
if (bodyStartIndex == -1) {
cmd = s;
body = "{}";
} else {
cmd = s.substring(0, bodyStartIndex);
body = s.substring(bodyStartIndex);
}
cmd = cmd.trim();
MetadataManager metadataManager = MetaClusterCurrent.wrapper(MetadataManager.class);
MycatRouterConfig routerConfig = MetaClusterCurrent.wrapper(MycatRouterConfig.class);
ReplicaSelectorManager replicaSelectorRuntime = MetaClusterCurrent.wrapper(ReplicaSelectorManager.class);
JdbcConnectionManager jdbcConnectionManager = MetaClusterCurrent.wrapper(JdbcConnectionManager.class);
MycatServer mycatServer = MetaClusterCurrent.wrapper(MycatServer.class);
if ("showErGroup".equalsIgnoreCase(cmd)) {
return showErGroup(response, metadataManager);
}
if ("loaddata".equalsIgnoreCase(cmd)) {
return loaddata(dataContext, response, body, metadataManager);
}
if ("setUserDialect".equalsIgnoreCase(cmd)) {
return setUserDialect(response, body);
}
if ("showSlowSql".equalsIgnoreCase(cmd)) {
return showSlowSql(response, body);
}
if ("showTopology".equalsIgnoreCase(cmd)) {
return showTopology(response, body, metadataManager);
}
if ("checkConfigConsistency".equalsIgnoreCase(cmd)) {
StorageManager assembleMetadataStorageManager = MetaClusterCurrent.wrapper(StorageManager.class);
boolean res = assembleMetadataStorageManager.checkConfigConsistency();
ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
resultSetBuilder.addColumnInfo("value", JDBCType.VARCHAR);
resultSetBuilder.addObjectRowPayload(Arrays.asList(res ? 1 : 0));
return response.sendResultSet(resultSetBuilder.build());
}
if ("resetConfig".equalsIgnoreCase(cmd)) {
MycatRouterConfigOps ops = ConfigUpdater.getOps();
ops.reset();
ops.commit();
return response.sendOk();
}
if ("run".equalsIgnoreCase(cmd)) {
Map<String, Object> map = JsonUtil.from(body, Map.class);
String hbt = Objects.toString(map.get("hbt"));
DrdsSqlCompiler drdsRunner = MetaClusterCurrent.wrapper(DrdsSqlCompiler.class);
Plan plan = drdsRunner.doHbt(hbt);
AsyncMycatDataContextImpl.HbtMycatDataContextImpl sqlMycatDataContext = new AsyncMycatDataContextImpl.HbtMycatDataContextImpl(dataContext, plan.getCodeExecuterContext());
ArrayBindable arrayBindable = MetaClusterCurrent.wrapper(ExecutorProvider.class).prepare(plan).getArrayBindable();
Observable<MysqlPayloadObject> mysqlPayloadObjectObservable = PrepareExecutor.getMysqlPayloadObjectObservable(arrayBindable, sqlMycatDataContext, plan.getMetaData());
return response.sendResultSet(mysqlPayloadObjectObservable);
}
if ("killThread".equalsIgnoreCase(cmd)) {
KillThreadHint killThreadHint = JsonUtil.from(body, KillThreadHint.class);
long pid = killThreadHint.getId();
dataContext.setAffectedRows(IOExecutor.kill(pid) ? 1 : 0);
return response.sendOk();
}
if ("interruptThread".equalsIgnoreCase(cmd)) {
Thread.currentThread().interrupt();
InterruptThreadHint interruptThreadHint = JsonUtil.from(body, InterruptThreadHint.class);
long pid = interruptThreadHint.getId();
dataContext.setAffectedRows(IOExecutor.interrupt(pid) ? 1 : 0);
return response.sendOk();
}
if ("showThreadInfo".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("ID", JDBCType.VARCHAR);
builder.addColumnInfo("NAME", JDBCType.VARCHAR);
builder.addColumnInfo("STATE", JDBCType.VARCHAR);
builder.addColumnInfo("STACKTRACE", JDBCType.VARCHAR);
List<Thread> threads = IOExecutor.findAllThreads();
for (Thread thread : threads) {
String name = thread.getName();
long id = thread.getId();
String state = thread.getState().name();
StackTraceElement[] stackTrace = thread.getStackTrace();
StringWriter stringWriter = new StringWriter();
for (StackTraceElement traceElement : stackTrace) {
stringWriter.write("\tat " + traceElement);
}
String stackTraceText = stringWriter.toString();
builder.addObjectRowPayload(Arrays.asList(id, name, state, stackTraceText));
}
return response.sendResultSet(builder.build());
}
if ("createSqlCache".equalsIgnoreCase(cmd)) {
MycatRouterConfigOps ops = ConfigUpdater.getOps();
SQLStatement sqlStatement = null;
if (ast.getHintStatements() != null && ast.getHintStatements().size() == 1) {
sqlStatement = ast.getHintStatements().get(0);
}
SqlCacheConfig sqlCacheConfig = JsonUtil.from(body, SqlCacheConfig.class);
if (sqlCacheConfig.getSql() == null && sqlStatement != null) {
sqlCacheConfig.setSql(sqlStatement.toString());
}
ops.putSqlCache(sqlCacheConfig);
ops.commit();
if (sqlStatement == null) {
String sql = sqlCacheConfig.getSql();
sqlStatement = SQLUtils.parseSingleMysqlStatement(sql);
}
return MycatdbCommand.execute(dataContext, response, sqlStatement);
}
if ("showSqlCaches".equalsIgnoreCase(cmd)) {
ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
resultSetBuilder.addColumnInfo("info", JDBCType.VARCHAR);
if (MetaClusterCurrent.exist(SqlResultSetService.class)) {
SqlResultSetService sqlResultSetService = MetaClusterCurrent.wrapper(SqlResultSetService.class);
sqlResultSetService.snapshot().toStringList().forEach(c -> resultSetBuilder.addObjectRowPayload(Arrays.asList(c)));
}
return response.sendResultSet(resultSetBuilder.build());
}
if ("dropSqlCache".equalsIgnoreCase(cmd)) {
MycatRouterConfigOps ops = ConfigUpdater.getOps();
SqlCacheConfig sqlCacheConfig = JsonUtil.from(body, SqlCacheConfig.class);
ops.removeSqlCache(sqlCacheConfig.getName());
ops.commit();
return response.sendOk();
}
if ("showBufferUsage".equalsIgnoreCase(cmd)) {
return response.sendResultSet(mycatServer.showBufferUsage(dataContext.getSessionId()));
}
if ("showUsers".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("username", JDBCType.VARCHAR);
builder.addColumnInfo("ip", JDBCType.VARCHAR);
builder.addColumnInfo("transactionType", JDBCType.VARCHAR);
builder.addColumnInfo("dbType", JDBCType.VARCHAR);
Authenticator authenticator = MetaClusterCurrent.wrapper(Authenticator.class);
List<UserConfig> userConfigs = authenticator.getConfigAsList();
for (UserConfig userConfig : userConfigs) {
builder.addObjectRowPayload(Arrays.asList(userConfig.getUsername(), userConfig.getIp(), userConfig.getTransactionType(), userConfig.getDialect()));
}
return response.sendResultSet(() -> builder.build());
}
if ("showSchemas".equalsIgnoreCase(cmd)) {
Map map = JsonUtil.from(body, Map.class);
String schemaName = (String) map.get("schemaName");
Collection<SchemaHandler> schemaHandlers;
if (schemaName == null) {
schemaHandlers = metadataManager.getSchemaMap().values();
} else {
schemaHandlers = Collections.singletonList(metadataManager.getSchemaMap().get(schemaName));
}
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("SCHEMA_NAME", JDBCType.VARCHAR).addColumnInfo("DEFAULT_TARGET_NAME", JDBCType.VARCHAR).addColumnInfo("TABLE_NAMES", JDBCType.VARCHAR);
for (SchemaHandler value : schemaHandlers) {
String SCHEMA_NAME = value.getName();
String DEFAULT_TARGET_NAME = value.defaultTargetName();
String TABLE_NAMES = String.join(",", value.logicTables().keySet());
builder.addObjectRowPayload(Arrays.asList(SCHEMA_NAME, DEFAULT_TARGET_NAME, TABLE_NAMES));
}
return response.sendResultSet(() -> builder.build());
}
if ("showTables".equalsIgnoreCase(cmd)) {
return showTables(response, body, metadataManager, routerConfig);
}
if ("setSqlTimeFilter".equalsIgnoreCase(cmd)) {
return setSqlTimeFilter(response, body, metadataManager);
}
if ("getSqlTimeFilter".equalsIgnoreCase(cmd)) {
return getSqlTimeFilter(response, body, metadataManager);
}
if ("showClusters".equalsIgnoreCase(cmd)) {
Map map = JsonUtil.from(body, Map.class);
String clusterName = (String) map.get("name");
RowBaseIterator rowBaseIterator = showClusters(clusterName);
return response.sendResultSet(rowBaseIterator);
}
if ("showNativeDataSources".equalsIgnoreCase(cmd)) {
return response.sendResultSet(mycatServer.showNativeDataSources());
}
if ("showDataSources".equalsIgnoreCase(cmd)) {
Optional<JdbcConnectionManager> connectionManager = Optional.ofNullable(jdbcConnectionManager);
Collection<JdbcDataSource> jdbcDataSources = new HashSet<>(connectionManager.map(i -> i.getDatasourceInfo()).map(i -> i.values()).orElse(Collections.emptyList()));
ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
resultSetBuilder.addColumnInfo("NAME", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("USERNAME", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("PASSWORD", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("MAX_CON", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("MIN_CON", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("EXIST_CON", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("USE_CON", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("MAX_RETRY_COUNT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("MAX_CONNECT_TIMEOUT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("DB_TYPE", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("URL", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("WEIGHT", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("INIT_SQL", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("INIT_SQL_GET_CONNECTION", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("INSTANCE_TYPE", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("IDLE_TIMEOUT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("DRIVER", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("TYPE", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("IS_MYSQL", JDBCType.VARCHAR);
for (JdbcDataSource jdbcDataSource : jdbcDataSources) {
DatasourceConfig config = jdbcDataSource.getConfig();
String NAME = config.getName();
String USERNAME = config.getUser();
String PASSWORD = config.getPassword();
int MAX_CON = config.getMaxCon();
int MIN_CON = config.getMinCon();
// 注意显示顺序
int USED_CON = jdbcDataSource.getUsedCount();
// jdbc连接池已经存在连接数量是内部状态,未知
int EXIST_CON = USED_CON;
int MAX_RETRY_COUNT = config.getMaxRetryCount();
long MAX_CONNECT_TIMEOUT = config.getMaxConnectTimeout();
String DB_TYPE = config.getDbType();
String URL = config.getUrl();
int WEIGHT = config.getWeight();
String INIT_SQL = Optional.ofNullable(config.getInitSqls()).map(o -> String.join(";", o)).orElse("");
boolean INIT_SQL_GET_CONNECTION = config.isInitSqlsGetConnection();
String INSTANCE_TYPE = Optional.ofNullable(replicaSelectorRuntime.getPhysicsInstanceByName(NAME)).map(i -> i.getType().name()).orElse(config.getInstanceType());
long IDLE_TIMEOUT = config.getIdleTimeout();
// 保留属性
String DRIVER = jdbcDataSource.getDataSource().toString();
String TYPE = config.getType();
boolean IS_MYSQL = jdbcDataSource.isMySQLType();
resultSetBuilder.addObjectRowPayload(Arrays.asList(NAME, USERNAME, PASSWORD, MAX_CON, MIN_CON, EXIST_CON, USED_CON, MAX_RETRY_COUNT, MAX_CONNECT_TIMEOUT, DB_TYPE, URL, WEIGHT, INIT_SQL, INIT_SQL_GET_CONNECTION, INSTANCE_TYPE, IDLE_TIMEOUT, DRIVER, TYPE, IS_MYSQL));
}
return response.sendResultSet(() -> resultSetBuilder.build());
}
if ("showHeartbeats".equalsIgnoreCase(cmd)) {
RowBaseIterator rowBaseIterator = showHeatbeatStat();
return response.sendResultSet(rowBaseIterator);
}
if ("showHeartbeatStatus".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("name", JDBCType.VARCHAR);
builder.addColumnInfo("status", JDBCType.VARCHAR);
Map<String, HeartbeatFlow> heartbeatDetectorMap = replicaSelectorRuntime.getHeartbeatDetectorMap();
for (Map.Entry<String, HeartbeatFlow> entry : heartbeatDetectorMap.entrySet()) {
String key = entry.getKey();
HeartbeatFlow value = entry.getValue();
builder.addObjectRowPayload(Arrays.asList(Objects.toString(key), Objects.toString(value.getDsStatus())));
}
return response.sendResultSet(() -> builder.build());
}
if ("showInstances".equalsIgnoreCase(cmd)) {
RowBaseIterator rowBaseIterator = showInstances();
return response.sendResultSet(rowBaseIterator);
}
if ("showReactors".equalsIgnoreCase(cmd)) {
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
return response.sendResultSet(server.showReactors());
}
if ("showThreadPools".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("NAME", JDBCType.VARCHAR).addColumnInfo("POOL_SIZE", JDBCType.BIGINT).addColumnInfo("ACTIVE_COUNT", JDBCType.BIGINT).addColumnInfo("TASK_QUEUE_SIZE", JDBCType.BIGINT).addColumnInfo("COMPLETED_TASK", JDBCType.BIGINT).addColumnInfo("TOTAL_TASK", JDBCType.BIGINT);
return response.sendResultSet(() -> builder.build());
}
if ("showNativeBackends".equalsIgnoreCase(cmd)) {
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
return response.sendResultSet(server.showNativeBackends());
}
if ("showConnections".equalsIgnoreCase(cmd)) {
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
return response.sendResultSet(server.showConnections());
}
if ("showSchedules".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
ScheduledExecutorService timer = ScheduleUtil.getTimer();
String NAME = timer.toString();
boolean IS_TERMINATED = timer.isTerminated();
boolean IS_SHUTDOWN = timer.isShutdown();
int SCHEDULE_COUNT = ScheduleUtil.getScheduleCount();
builder.addColumnInfo("NAME", JDBCType.VARCHAR).addColumnInfo("IS_TERMINATED", JDBCType.VARCHAR).addColumnInfo("IS_SHUTDOWN", JDBCType.VARCHAR).addColumnInfo("SCHEDULE_COUNT", JDBCType.BIGINT);
builder.addObjectRowPayload(Arrays.asList(NAME, IS_TERMINATED, IS_SHUTDOWN, SCHEDULE_COUNT));
return response.sendResultSet(() -> builder.build());
}
if ("showBaselines".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
QueryPlanCache queryPlanCache = MetaClusterCurrent.wrapper(QueryPlanCache.class);
builder.addColumnInfo("BASELINE_ID", JDBCType.VARCHAR).addColumnInfo("PARAMETERIZED_SQL", JDBCType.VARCHAR).addColumnInfo("PLAN_ID", JDBCType.VARCHAR).addColumnInfo("EXTERNALIZED_PLAN", JDBCType.VARCHAR).addColumnInfo("FIXED", JDBCType.VARCHAR).addColumnInfo("ACCEPTED", JDBCType.VARCHAR);
for (Baseline baseline : queryPlanCache.list()) {
for (BaselinePlan baselinePlan : baseline.getPlanList()) {
String BASELINE_ID = String.valueOf(baselinePlan.getBaselineId());
String PARAMETERIZED_SQL = String.valueOf(baselinePlan.getSql());
String PLAN_ID = String.valueOf(baselinePlan.getId());
CodeExecuterContext attach = (CodeExecuterContext) baselinePlan.attach();
String EXTERNALIZED_PLAN = new PlanImpl(attach.getMycatRel(), attach, Collections.emptyList()).dumpPlan();
String FIXED = Optional.ofNullable(baseline.getFixPlan()).filter(i -> i.getId() == baselinePlan.getId()).map(u -> "true").orElse("false");
String ACCEPTED = "true";
builder.addObjectRowPayload(Arrays.asList(BASELINE_ID, PARAMETERIZED_SQL, PLAN_ID, EXTERNALIZED_PLAN, FIXED, ACCEPTED));
}
}
return response.sendResultSet(() -> builder.build());
}
if ("showConfigText".equalsIgnoreCase(cmd)) {
MycatRouterConfig mycatRouterConfig = MetaClusterCurrent.wrapper(MycatRouterConfig.class);
String text = JsonUtil.toJson(mycatRouterConfig);
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("CONFIG_TEXT", JDBCType.VARCHAR);
builder.addObjectRowPayload(Arrays.asList(text));
return response.sendResultSet(builder.build());
}
if ("setBkaJoin".equalsIgnoreCase(cmd)) {
DrdsSqlCompiler.RBO_BKA_JOIN = body.contains("1");
return response.sendOk();
}
if ("setSortMergeJoin".equalsIgnoreCase(cmd)) {
DrdsSqlCompiler.RBO_MERGE_JOIN = body.contains("1");
return response.sendOk();
}
if ("setAcceptConnect".equalsIgnoreCase(cmd)) {
boolean contains = body.contains("1");
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
if (!contains) {
server.stopAcceptConnect();
} else {
server.resumeAcceptConnect();
}
dataContext.setAffectedRows(1);
return response.sendOk();
}
if ("setReadyToCloseSQL".equalsIgnoreCase(cmd)) {
ReadyToCloseSQLHint readyToCloseSQLHint = JsonUtil.from(body, ReadyToCloseSQLHint.class);
String sql = readyToCloseSQLHint.getSql().trim();
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
server.setReadyToCloseSQL(sql);
dataContext.setAffectedRows(1);
return response.sendOk();
}
if ("setDebug".equalsIgnoreCase(cmd)) {
boolean contains = body.contains("1");
dataContext.setDebug(contains);
return response.sendOk();
}
if ("setVector".equalsIgnoreCase(cmd)) {
boolean contains = body.contains("1");
dataContext.setVector(contains);
return response.sendOk();
}
if ("is".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("value", JDBCType.VARCHAR);
if (body.contains("debug")) {
boolean debug = dataContext.isDebug();
DrdsSqlCompiler.DEBUG = debug;
builder.addObjectRowPayload(Arrays.asList(debug ? "1" : "0"));
}
return response.sendResultSet(builder.build());
}
if ("setBkaJoinLeftRowCountLimit".equalsIgnoreCase(cmd)) {
DrdsSqlCompiler.BKA_JOIN_LEFT_ROW_COUNT_LIMIT = Long.parseLong(body.substring(1, body.length() - 1));
return response.sendOk();
}
if ("baseline".equalsIgnoreCase(cmd)) {
Map<String, Object> map = JsonUtil.from(body, Map.class);
String command = Objects.requireNonNull(map.get("command")).toString().toLowerCase();
long value = Long.parseLong((map.getOrDefault("value", "0")).toString());
QueryPlanCache queryPlanCache = MetaClusterCurrent.wrapper(QueryPlanCache.class);
switch(command) {
case "showAllPlans":
{
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("BASELINE_ID", JDBCType.VARCHAR).addColumnInfo("PARAMETERIZED_SQL", JDBCType.VARCHAR).addColumnInfo("PLAN_ID", JDBCType.VARCHAR).addColumnInfo("EXTERNALIZED_PLAN", JDBCType.VARCHAR).addColumnInfo("FIXED", JDBCType.VARCHAR).addColumnInfo("ACCEPTED", JDBCType.VARCHAR);
for (Baseline baseline : queryPlanCache.list()) {
for (BaselinePlan baselinePlan : baseline.getPlanList()) {
String BASELINE_ID = String.valueOf(baselinePlan.getBaselineId());
String PARAMETERIZED_SQL = String.valueOf(baselinePlan.getSql());
String PLAN_ID = String.valueOf(baselinePlan.getId());
CodeExecuterContext attach = (CodeExecuterContext) baselinePlan.attach();
String EXTERNALIZED_PLAN = new PlanImpl(attach.getMycatRel(), attach, Collections.emptyList()).dumpPlan();
String FIXED = Optional.ofNullable(baseline.getFixPlan()).filter(i -> i.getId() == baselinePlan.getId()).map(u -> "true").orElse("false");
String ACCEPTED = "true";
builder.addObjectRowPayload(Arrays.asList(BASELINE_ID, PARAMETERIZED_SQL, PLAN_ID, EXTERNALIZED_PLAN, FIXED, ACCEPTED));
}
}
return response.sendResultSet(() -> builder.build());
}
case "persistAllBaselines":
{
queryPlanCache.saveBaselines();
return response.sendOk();
}
case "loadBaseline":
{
queryPlanCache.loadBaseline(value);
return response.sendOk();
}
case "loadPlan":
{
queryPlanCache.loadPlan(value);
return response.sendOk();
}
case "persistPlan":
{
queryPlanCache.persistPlan(value);
return response.sendOk();
}
case "clearBaseline":
{
queryPlanCache.clearBaseline(value);
return response.sendOk();
}
case "clearPlan":
{
queryPlanCache.clearPlan(value);
return response.sendOk();
}
case "deleteBaseline":
{
queryPlanCache.deleteBaseline(value);
return response.sendOk();
}
case "deletePlan":
{
queryPlanCache.deletePlan(value);
return response.sendOk();
}
case "add":
case "fix":
{
SQLStatement sqlStatement = null;
if (ast.getHintStatements() != null && ast.getHintStatements().size() == 1) {
sqlStatement = ast.getHintStatements().get(0);
DrdsSqlWithParams drdsSqlWithParams = DrdsRunnerHelper.preParse(sqlStatement, dataContext.getDefaultSchema());
queryPlanCache.add("fix".equalsIgnoreCase(command), drdsSqlWithParams);
}
return response.sendOk();
}
default:
throw new UnsupportedOperationException();
}
}
if ("MIGRATE_LIST".equalsIgnoreCase(cmd)) {
return response.sendResultSet(MigrateUtil.list());
}
if ("MIGRATE_STOP".equalsIgnoreCase(cmd)) {
MigrateStopHint hint = JsonUtil.from(body, MigrateStopHint.class);
if (MigrateUtil.stop(hint.getId())) {
dataContext.setAffectedRows(1);
}
return response.sendOk();
}
if ("MIGRATE".equalsIgnoreCase(cmd)) {
MigrateHint migrateHint = JsonUtil.from(body, MigrateHint.class);
String name = migrateHint.getName();
MigrateHint.Input input = migrateHint.getInput();
MigrateHint.Output output = migrateHint.getOutput();
int parallelism = output.getParallelism();
MigrateUtil.MigrateJdbcOutput migrateJdbcOutput = new MigrateUtil.MigrateJdbcOutput();
migrateJdbcOutput.setParallelism(parallelism);
migrateJdbcOutput.setBatch(output.getBatch());
List<MigrateUtil.MigrateJdbcInput> migrateJdbcInputs = new ArrayList<>();
List<Flowable<Object[]>> observables = new ArrayList<>();
MetadataManager manager = MetaClusterCurrent.wrapper(MetadataManager.class);
TableHandler outputTable = manager.getTable(output.getSchemaName(), output.getTableName());
String username = Optional.ofNullable(output.getUsername()).orElseGet(new Supplier<String>() {
@Override
public String get() {
UserConfig userConfig = routerConfig.getUsers().get(0);
String username = userConfig.getUsername();
String password = userConfig.getPassword();
return username;
}
});
String password = Optional.ofNullable(output.getUsername()).orElseGet(new Supplier<String>() {
@Override
public String get() {
UserConfig userConfig = routerConfig.getUsers().get(0);
String username = userConfig.getUsername();
String password = userConfig.getPassword();
return password;
}
});
String url = Optional.ofNullable(output.getUrl()).orElseGet(() -> {
ServerConfig serverConfig = MetaClusterCurrent.wrapper(ServerConfig.class);
String ip = serverConfig.getIp();
int port = serverConfig.getPort();
return "jdbc:mysql://" + ip + ":" + port + "/mysql?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true";
});
if (input.getUrl() != null) {
String sql = input.getSql();
long count = input.getCount();
Map<String, String> properties = input.getProperties();
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInput.setCount(count);
observables.add(MigrateUtil.read(migrateJdbcInput, input.getUrl(), input.getUsername(), input.getPassword(), sql));
} else if (input.getType() == null || "mycat".equalsIgnoreCase(input.getType())) {
TableHandler inputTable = manager.getTable(input.getSchemaName(), input.getTableName());
switch(inputTable.getType()) {
case SHARDING:
{
ShardingTable shardingTable = (ShardingTable) inputTable;
for (Partition backend : shardingTable.getBackends()) {
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInputs.add(migrateJdbcInput);
observables.add(MigrateUtil.read(migrateJdbcInput, backend));
}
break;
}
case GLOBAL:
{
GlobalTable globalTable = (GlobalTable) inputTable;
Partition partition = globalTable.getGlobalDataNode().get(0);
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInputs.add(migrateJdbcInput);
observables.add(MigrateUtil.read(migrateJdbcInput, partition));
break;
}
case NORMAL:
{
NormalTable normalTable = (NormalTable) inputTable;
Partition partition = normalTable.getDataNode();
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInputs.add(migrateJdbcInput);
observables.add(MigrateUtil.read(migrateJdbcInput, partition));
break;
}
case VISUAL:
case VIEW:
case CUSTOM:
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInputs.add(migrateJdbcInput);
observables.add(MigrateUtil.read(migrateJdbcInput, input.getTableName(), input.getSchemaName(), url, username, password));
break;
default:
throw new IllegalStateException("Unexpected value: " + inputTable.getType());
}
} else {
throw new UnsupportedOperationException();
}
String outputSchemaName = outputTable.getSchemaName();
String outputTableName = outputTable.getTableName();
String insertTemplate = getMySQLInsertTemplate(outputTable);
migrateJdbcOutput.setUsername(username);
migrateJdbcOutput.setPassword(password);
migrateJdbcOutput.setUrl(url);
migrateJdbcOutput.setInsertTemplate(insertTemplate);
MigrateUtil.MigrateController migrateController = MigrateUtil.write(migrateJdbcOutput, Flowable.merge(observables.stream().map(i -> i.buffer(output.getBatch()).subscribeOn(Schedulers.io())).collect(Collectors.toList())));
MigrateUtil.MigrateScheduler scheduler = MigrateUtil.register(name, migrateJdbcInputs, migrateJdbcOutput, migrateController);
return response.sendResultSet(MigrateUtil.show(scheduler));
}
if ("BINLOG_LIST".equalsIgnoreCase(cmd)) {
return response.sendResultSet(BinlogUtil.list());
}
if ("BINLOG_STOP".equalsIgnoreCase(cmd)) {
BinlogStopHint hint = JsonUtil.from(body, BinlogStopHint.class);
if (BinlogUtil.stop(hint.getId())) {
dataContext.setAffectedRows(1);
}
return response.sendOk();
}
if ("BINLOG".equalsIgnoreCase(cmd)) {
BinlogHint binlogHint = JsonUtil.from(body, BinlogHint.class);
Objects.requireNonNull(binlogHint.getInputTableNames());
List<String> outputTableNames = binlogHint.getOutputTableNames();
if (outputTableNames == null) {
binlogHint.setInputTableNames(binlogHint.getInputTableNames());
}
IdentityHashMap<TableHandler, TableHandler> map = new IdentityHashMap<>();
List<TableHandler> inputs = new ArrayList<>();
List<TableHandler> outputs = new ArrayList<>();
for (String inputTableName : binlogHint.getInputTableNames()) {
String[] split = inputTableName.split(".");
String schemaName = SQLUtils.normalize(split[0]);
String tableName = SQLUtils.normalize(split[1]);
TableHandler inputTable = metadataManager.getTable(schemaName, tableName);
inputs.add(inputTable);
}
for (String outputTableName : binlogHint.getOutputTableNames()) {
String[] split = outputTableName.split(".");
String schemaName = SQLUtils.normalize(split[0]);
String tableName = SQLUtils.normalize(split[1]);
TableHandler outputTable = metadataManager.getTable(schemaName, tableName);
outputs.add(outputTable);
}
for (int i = 0; i < inputs.size(); i++) {
map.put(inputs.get(i), outputs.get(i));
}
Map<String, Map<String, List<Partition>>> infoCollector = new HashMap<>();
List<MigrateUtil.MigrateController> migrateControllers = new ArrayList<>();
Set<Map.Entry<TableHandler, TableHandler>> entries = map.entrySet();
for (Map.Entry<TableHandler, TableHandler> entry : entries) {
ServerConfig serverConfig = MetaClusterCurrent.wrapper(ServerConfig.class);
TableHandler inputTable = entry.getKey();
TableHandler outputTable = entry.getValue();
UserConfig userConfig = routerConfig.getUsers().get(0);
String username = userConfig.getUsername();
String password = userConfig.getPassword();
String ip = serverConfig.getIp();
int port = serverConfig.getPort();
String url = "jdbc:mysql://" + ip + ":" + port + "/mysql?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true";
// String insertTemplate = getMySQLInsertTemplate(outputTable);
MigrateUtil.MigrateJdbcAnyOutput output = new MigrateUtil.MigrateJdbcAnyOutput();
output.setUrl(url);
output.setUsername(username);
output.setPassword(password);
List<Partition> partitions = new ArrayList<>();
switch(inputTable.getType()) {
case SHARDING:
{
ShardingTable shardingTable = (ShardingTable) inputTable;
partitions = shardingTable.getShardingFuntion().calculate(Collections.emptyMap());
break;
}
case GLOBAL:
{
GlobalTable globalTable = (GlobalTable) inputTable;
partitions = ImmutableList.of(globalTable.getGlobalDataNode().get(0));
break;
}
case NORMAL:
{
NormalTable normalTable = (NormalTable) inputTable;
partitions = ImmutableList.of(normalTable.getDataNode());
break;
}
case CUSTOM:
case VISUAL:
case VIEW:
throw new UnsupportedOperationException();
}
ReplicaSelectorManager replicaSelectorManager = MetaClusterCurrent.wrapper(ReplicaSelectorManager.class);
Map<String, List<Partition>> listMap = partitions.stream().collect(Collectors.groupingBy(partition -> replicaSelectorManager.getDatasourceNameByReplicaName(partition.getTargetName(), true, null)));
infoCollector.put(inputTable.getUniqueName(), listMap);
List<Flowable<BinlogUtil.ParamSQL>> flowables = new ArrayList<>();
for (Map.Entry<String, List<Partition>> e : listMap.entrySet()) {
flowables.add(BinlogUtil.observe(e.getKey(), e.getValue()).subscribeOn(Schedulers.io()));
}
Flowable<BinlogUtil.ParamSQL> merge = flowables.size() == 1 ? flowables.get(0) : Flowable.merge(flowables, flowables.size());
merge = merge.map(paramSQL -> {
SQLStatement sqlStatement = SQLUtils.parseSingleMysqlStatement(paramSQL.getSql());
SQLExprTableSource sqlExprTableSource = VertxUpdateExecuter.getTableSource(sqlStatement);
MycatSQLExprTableSourceUtil.setSqlExprTableSource(outputTable.getSchemaName(), outputTable.getTableName(), sqlExprTableSource);
paramSQL.setSql(sqlStatement.toString());
return paramSQL;
});
MigrateUtil.MigrateController migrateController = MigrateUtil.writeSql(output, merge);
migrateControllers.add(migrateController);
}
BinlogUtil.BinlogScheduler scheduler = BinlogUtil.BinlogScheduler.of(UUID.randomUUID().toString(), binlogHint.getName(), infoCollector, migrateControllers);
BinlogUtil.register(scheduler);
return response.sendResultSet(BinlogUtil.list(Collections.singletonList(scheduler)));
}
mycatDmlHandler(cmd, body);
return response.sendOk();
}
}
return response.sendOk();
} catch (Throwable throwable) {
return response.sendError(throwable);
}
}
Aggregations