Search in sources :

Example 21 with JdbcConnectionManager

use of io.mycat.datasource.jdbc.datasource.JdbcConnectionManager in project Mycat2 by MyCATApache.

the class MycatMySQLManagerImpl method getWriteableConnection.

@Override
@SneakyThrows
public Connection getWriteableConnection(String name) {
    JdbcConnectionManager jdbcConnectionManager = MetaClusterCurrent.wrapper(JdbcConnectionManager.class);
    Map<String, JdbcDataSource> datasourceInfo = jdbcConnectionManager.getDatasourceInfo();
    JdbcDataSource jdbcDataSource = datasourceInfo.get(name);
    if (jdbcDataSource == null)
        return null;
    return jdbcDataSource.getDataSource().getConnection();
}
Also used : JdbcDataSource(io.mycat.datasource.jdbc.datasource.JdbcDataSource) JdbcConnectionManager(io.mycat.datasource.jdbc.datasource.JdbcConnectionManager) SneakyThrows(lombok.SneakyThrows)

Example 22 with JdbcConnectionManager

use of io.mycat.datasource.jdbc.datasource.JdbcConnectionManager in project Mycat2 by MyCATApache.

the class StatisticCenter method init.

@SneakyThrows
public void init() {
    if (init) {
        return;
    }
    init = true;
    JdbcConnectionManager jdbcConnectionManager = MetaClusterCurrent.wrapper(JdbcConnectionManager.class);
    try (DefaultConnection prototype = jdbcConnectionManager.getConnection(MetadataManager.getPrototype())) {
        Connection rawConnection = prototype.getRawConnection();
        JdbcUtils.execute(rawConnection, "CREATE TABLE IF NOT EXISTS mycat.`analyze_table` (\n" + "  `table_rows` bigint(20) NOT NULL,\n" + "  `name` varchar(64) NOT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4");
        List<Map<String, Object>> maps = JdbcUtils.executeQuery(rawConnection, "select table_rows as `table_rows`,name from mycat.`analyze_table`", Collections.emptyList());
        for (Map<String, Object> map : maps) {
            Number table_rows = (Number) map.get("table_rows");
            String name = (String) map.get("name");
            String[] strings = name.split("_");
            StatisticObject statisticObject = new StatisticObject();
            statisticObject.setRowCount(table_rows.doubleValue());
            statisticMap.put(Key.of(strings[0], strings[1]), statisticObject);
        }
    }
}
Also used : DefaultConnection(io.mycat.datasource.jdbc.datasource.DefaultConnection) Connection(java.sql.Connection) DefaultConnection(io.mycat.datasource.jdbc.datasource.DefaultConnection) JdbcConnectionManager(io.mycat.datasource.jdbc.datasource.JdbcConnectionManager) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 23 with JdbcConnectionManager

use of io.mycat.datasource.jdbc.datasource.JdbcConnectionManager in project Mycat2 by MyCATApache.

the class TruncateSQLHandler method onExecute.

@Override
protected Future<Void> onExecute(SQLRequest<SQLTruncateStatement> request, MycatDataContext dataContext, Response response) {
    SQLTruncateStatement truncateStatement = request.getAst();
    MetadataManager metadataManager = MetaClusterCurrent.wrapper(MetadataManager.class);
    JdbcConnectionManager jdbcConnectionManager = MetaClusterCurrent.wrapper(JdbcConnectionManager.class);
    for (SQLExprTableSource source : new ArrayList<>(truncateStatement.getTableSources())) {
        resolveSQLExprTableSource(source, dataContext);
        SQLTruncateStatement eachTruncateStatement = clone(truncateStatement);
        eachTruncateStatement.getTableSources().clear();
        eachTruncateStatement.addTableSource(source.getName());
        TableHandler table = metadataManager.getTable(SQLUtils.normalize(source.getSchema()), SQLUtils.normalize(source.getTableName()));
        executeOnDataNodes(eachTruncateStatement, jdbcConnectionManager, getDataNodes(table));
    }
    return response.sendOk();
}
Also used : ArrayList(java.util.ArrayList) SQLExprTableSource(com.alibaba.druid.sql.ast.statement.SQLExprTableSource) JdbcConnectionManager(io.mycat.datasource.jdbc.datasource.JdbcConnectionManager) SQLTruncateStatement(com.alibaba.druid.sql.ast.statement.SQLTruncateStatement)

Example 24 with JdbcConnectionManager

use of io.mycat.datasource.jdbc.datasource.JdbcConnectionManager in project Mycat2 by MyCATApache.

the class HintHandler method onExecute.

@Override
protected Future<Void> onExecute(SQLRequest<MySqlHintStatement> request, MycatDataContext dataContext, Response response) {
    MySqlHintStatement ast = request.getAst();
    List<SQLCommentHint> hints = ast.getHints();
    try {
        if (hints.size() == 1) {
            String s = SqlHints.unWrapperHint(hints.get(0).getText());
            if (s.startsWith("mycat:") || s.startsWith("MYCAT:")) {
                s = s.substring(6);
                int bodyStartIndex = s.indexOf('{');
                String cmd;
                String body;
                if (bodyStartIndex == -1) {
                    cmd = s;
                    body = "{}";
                } else {
                    cmd = s.substring(0, bodyStartIndex);
                    body = s.substring(bodyStartIndex);
                }
                cmd = cmd.trim();
                MetadataManager metadataManager = MetaClusterCurrent.wrapper(MetadataManager.class);
                MycatRouterConfig routerConfig = MetaClusterCurrent.wrapper(MycatRouterConfig.class);
                ReplicaSelectorManager replicaSelectorRuntime = MetaClusterCurrent.wrapper(ReplicaSelectorManager.class);
                JdbcConnectionManager jdbcConnectionManager = MetaClusterCurrent.wrapper(JdbcConnectionManager.class);
                MycatServer mycatServer = MetaClusterCurrent.wrapper(MycatServer.class);
                if ("showErGroup".equalsIgnoreCase(cmd)) {
                    return showErGroup(response, metadataManager);
                }
                if ("loaddata".equalsIgnoreCase(cmd)) {
                    return loaddata(dataContext, response, body, metadataManager);
                }
                if ("setUserDialect".equalsIgnoreCase(cmd)) {
                    return setUserDialect(response, body);
                }
                if ("showSlowSql".equalsIgnoreCase(cmd)) {
                    return showSlowSql(response, body);
                }
                if ("showTopology".equalsIgnoreCase(cmd)) {
                    return showTopology(response, body, metadataManager);
                }
                if ("checkConfigConsistency".equalsIgnoreCase(cmd)) {
                    StorageManager assembleMetadataStorageManager = MetaClusterCurrent.wrapper(StorageManager.class);
                    boolean res = assembleMetadataStorageManager.checkConfigConsistency();
                    ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
                    resultSetBuilder.addColumnInfo("value", JDBCType.VARCHAR);
                    resultSetBuilder.addObjectRowPayload(Arrays.asList(res ? 1 : 0));
                    return response.sendResultSet(resultSetBuilder.build());
                }
                if ("resetConfig".equalsIgnoreCase(cmd)) {
                    MycatRouterConfigOps ops = ConfigUpdater.getOps();
                    ops.reset();
                    ops.commit();
                    return response.sendOk();
                }
                if ("run".equalsIgnoreCase(cmd)) {
                    Map<String, Object> map = JsonUtil.from(body, Map.class);
                    String hbt = Objects.toString(map.get("hbt"));
                    DrdsSqlCompiler drdsRunner = MetaClusterCurrent.wrapper(DrdsSqlCompiler.class);
                    Plan plan = drdsRunner.doHbt(hbt);
                    AsyncMycatDataContextImpl.HbtMycatDataContextImpl sqlMycatDataContext = new AsyncMycatDataContextImpl.HbtMycatDataContextImpl(dataContext, plan.getCodeExecuterContext());
                    ArrayBindable arrayBindable = MetaClusterCurrent.wrapper(ExecutorProvider.class).prepare(plan).getArrayBindable();
                    Observable<MysqlPayloadObject> mysqlPayloadObjectObservable = PrepareExecutor.getMysqlPayloadObjectObservable(arrayBindable, sqlMycatDataContext, plan.getMetaData());
                    return response.sendResultSet(mysqlPayloadObjectObservable);
                }
                if ("killThread".equalsIgnoreCase(cmd)) {
                    KillThreadHint killThreadHint = JsonUtil.from(body, KillThreadHint.class);
                    long pid = killThreadHint.getId();
                    dataContext.setAffectedRows(IOExecutor.kill(pid) ? 1 : 0);
                    return response.sendOk();
                }
                if ("interruptThread".equalsIgnoreCase(cmd)) {
                    Thread.currentThread().interrupt();
                    InterruptThreadHint interruptThreadHint = JsonUtil.from(body, InterruptThreadHint.class);
                    long pid = interruptThreadHint.getId();
                    dataContext.setAffectedRows(IOExecutor.interrupt(pid) ? 1 : 0);
                    return response.sendOk();
                }
                if ("showThreadInfo".equalsIgnoreCase(cmd)) {
                    ResultSetBuilder builder = ResultSetBuilder.create();
                    builder.addColumnInfo("ID", JDBCType.VARCHAR);
                    builder.addColumnInfo("NAME", JDBCType.VARCHAR);
                    builder.addColumnInfo("STATE", JDBCType.VARCHAR);
                    builder.addColumnInfo("STACKTRACE", JDBCType.VARCHAR);
                    List<Thread> threads = IOExecutor.findAllThreads();
                    for (Thread thread : threads) {
                        String name = thread.getName();
                        long id = thread.getId();
                        String state = thread.getState().name();
                        StackTraceElement[] stackTrace = thread.getStackTrace();
                        StringWriter stringWriter = new StringWriter();
                        for (StackTraceElement traceElement : stackTrace) {
                            stringWriter.write("\tat " + traceElement);
                        }
                        String stackTraceText = stringWriter.toString();
                        builder.addObjectRowPayload(Arrays.asList(id, name, state, stackTraceText));
                    }
                    return response.sendResultSet(builder.build());
                }
                if ("createSqlCache".equalsIgnoreCase(cmd)) {
                    MycatRouterConfigOps ops = ConfigUpdater.getOps();
                    SQLStatement sqlStatement = null;
                    if (ast.getHintStatements() != null && ast.getHintStatements().size() == 1) {
                        sqlStatement = ast.getHintStatements().get(0);
                    }
                    SqlCacheConfig sqlCacheConfig = JsonUtil.from(body, SqlCacheConfig.class);
                    if (sqlCacheConfig.getSql() == null && sqlStatement != null) {
                        sqlCacheConfig.setSql(sqlStatement.toString());
                    }
                    ops.putSqlCache(sqlCacheConfig);
                    ops.commit();
                    if (sqlStatement == null) {
                        String sql = sqlCacheConfig.getSql();
                        sqlStatement = SQLUtils.parseSingleMysqlStatement(sql);
                    }
                    return MycatdbCommand.execute(dataContext, response, sqlStatement);
                }
                if ("showSqlCaches".equalsIgnoreCase(cmd)) {
                    ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
                    resultSetBuilder.addColumnInfo("info", JDBCType.VARCHAR);
                    if (MetaClusterCurrent.exist(SqlResultSetService.class)) {
                        SqlResultSetService sqlResultSetService = MetaClusterCurrent.wrapper(SqlResultSetService.class);
                        sqlResultSetService.snapshot().toStringList().forEach(c -> resultSetBuilder.addObjectRowPayload(Arrays.asList(c)));
                    }
                    return response.sendResultSet(resultSetBuilder.build());
                }
                if ("dropSqlCache".equalsIgnoreCase(cmd)) {
                    MycatRouterConfigOps ops = ConfigUpdater.getOps();
                    SqlCacheConfig sqlCacheConfig = JsonUtil.from(body, SqlCacheConfig.class);
                    ops.removeSqlCache(sqlCacheConfig.getName());
                    ops.commit();
                    return response.sendOk();
                }
                if ("showBufferUsage".equalsIgnoreCase(cmd)) {
                    return response.sendResultSet(mycatServer.showBufferUsage(dataContext.getSessionId()));
                }
                if ("showUsers".equalsIgnoreCase(cmd)) {
                    ResultSetBuilder builder = ResultSetBuilder.create();
                    builder.addColumnInfo("username", JDBCType.VARCHAR);
                    builder.addColumnInfo("ip", JDBCType.VARCHAR);
                    builder.addColumnInfo("transactionType", JDBCType.VARCHAR);
                    builder.addColumnInfo("dbType", JDBCType.VARCHAR);
                    Authenticator authenticator = MetaClusterCurrent.wrapper(Authenticator.class);
                    List<UserConfig> userConfigs = authenticator.getConfigAsList();
                    for (UserConfig userConfig : userConfigs) {
                        builder.addObjectRowPayload(Arrays.asList(userConfig.getUsername(), userConfig.getIp(), userConfig.getTransactionType(), userConfig.getDialect()));
                    }
                    return response.sendResultSet(() -> builder.build());
                }
                if ("showSchemas".equalsIgnoreCase(cmd)) {
                    Map map = JsonUtil.from(body, Map.class);
                    String schemaName = (String) map.get("schemaName");
                    Collection<SchemaHandler> schemaHandlers;
                    if (schemaName == null) {
                        schemaHandlers = metadataManager.getSchemaMap().values();
                    } else {
                        schemaHandlers = Collections.singletonList(metadataManager.getSchemaMap().get(schemaName));
                    }
                    ResultSetBuilder builder = ResultSetBuilder.create();
                    builder.addColumnInfo("SCHEMA_NAME", JDBCType.VARCHAR).addColumnInfo("DEFAULT_TARGET_NAME", JDBCType.VARCHAR).addColumnInfo("TABLE_NAMES", JDBCType.VARCHAR);
                    for (SchemaHandler value : schemaHandlers) {
                        String SCHEMA_NAME = value.getName();
                        String DEFAULT_TARGET_NAME = value.defaultTargetName();
                        String TABLE_NAMES = String.join(",", value.logicTables().keySet());
                        builder.addObjectRowPayload(Arrays.asList(SCHEMA_NAME, DEFAULT_TARGET_NAME, TABLE_NAMES));
                    }
                    return response.sendResultSet(() -> builder.build());
                }
                if ("showTables".equalsIgnoreCase(cmd)) {
                    return showTables(response, body, metadataManager, routerConfig);
                }
                if ("setSqlTimeFilter".equalsIgnoreCase(cmd)) {
                    return setSqlTimeFilter(response, body, metadataManager);
                }
                if ("getSqlTimeFilter".equalsIgnoreCase(cmd)) {
                    return getSqlTimeFilter(response, body, metadataManager);
                }
                if ("showClusters".equalsIgnoreCase(cmd)) {
                    Map map = JsonUtil.from(body, Map.class);
                    String clusterName = (String) map.get("name");
                    RowBaseIterator rowBaseIterator = showClusters(clusterName);
                    return response.sendResultSet(rowBaseIterator);
                }
                if ("showNativeDataSources".equalsIgnoreCase(cmd)) {
                    return response.sendResultSet(mycatServer.showNativeDataSources());
                }
                if ("showDataSources".equalsIgnoreCase(cmd)) {
                    Optional<JdbcConnectionManager> connectionManager = Optional.ofNullable(jdbcConnectionManager);
                    Collection<JdbcDataSource> jdbcDataSources = new HashSet<>(connectionManager.map(i -> i.getDatasourceInfo()).map(i -> i.values()).orElse(Collections.emptyList()));
                    ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
                    resultSetBuilder.addColumnInfo("NAME", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("USERNAME", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("PASSWORD", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("MAX_CON", JDBCType.BIGINT);
                    resultSetBuilder.addColumnInfo("MIN_CON", JDBCType.BIGINT);
                    resultSetBuilder.addColumnInfo("EXIST_CON", JDBCType.BIGINT);
                    resultSetBuilder.addColumnInfo("USE_CON", JDBCType.BIGINT);
                    resultSetBuilder.addColumnInfo("MAX_RETRY_COUNT", JDBCType.BIGINT);
                    resultSetBuilder.addColumnInfo("MAX_CONNECT_TIMEOUT", JDBCType.BIGINT);
                    resultSetBuilder.addColumnInfo("DB_TYPE", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("URL", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("WEIGHT", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("INIT_SQL", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("INIT_SQL_GET_CONNECTION", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("INSTANCE_TYPE", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("IDLE_TIMEOUT", JDBCType.BIGINT);
                    resultSetBuilder.addColumnInfo("DRIVER", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("TYPE", JDBCType.VARCHAR);
                    resultSetBuilder.addColumnInfo("IS_MYSQL", JDBCType.VARCHAR);
                    for (JdbcDataSource jdbcDataSource : jdbcDataSources) {
                        DatasourceConfig config = jdbcDataSource.getConfig();
                        String NAME = config.getName();
                        String USERNAME = config.getUser();
                        String PASSWORD = config.getPassword();
                        int MAX_CON = config.getMaxCon();
                        int MIN_CON = config.getMinCon();
                        // 注意显示顺序
                        int USED_CON = jdbcDataSource.getUsedCount();
                        // jdbc连接池已经存在连接数量是内部状态,未知
                        int EXIST_CON = USED_CON;
                        int MAX_RETRY_COUNT = config.getMaxRetryCount();
                        long MAX_CONNECT_TIMEOUT = config.getMaxConnectTimeout();
                        String DB_TYPE = config.getDbType();
                        String URL = config.getUrl();
                        int WEIGHT = config.getWeight();
                        String INIT_SQL = Optional.ofNullable(config.getInitSqls()).map(o -> String.join(";", o)).orElse("");
                        boolean INIT_SQL_GET_CONNECTION = config.isInitSqlsGetConnection();
                        String INSTANCE_TYPE = Optional.ofNullable(replicaSelectorRuntime.getPhysicsInstanceByName(NAME)).map(i -> i.getType().name()).orElse(config.getInstanceType());
                        long IDLE_TIMEOUT = config.getIdleTimeout();
                        // 保留属性
                        String DRIVER = jdbcDataSource.getDataSource().toString();
                        String TYPE = config.getType();
                        boolean IS_MYSQL = jdbcDataSource.isMySQLType();
                        resultSetBuilder.addObjectRowPayload(Arrays.asList(NAME, USERNAME, PASSWORD, MAX_CON, MIN_CON, EXIST_CON, USED_CON, MAX_RETRY_COUNT, MAX_CONNECT_TIMEOUT, DB_TYPE, URL, WEIGHT, INIT_SQL, INIT_SQL_GET_CONNECTION, INSTANCE_TYPE, IDLE_TIMEOUT, DRIVER, TYPE, IS_MYSQL));
                    }
                    return response.sendResultSet(() -> resultSetBuilder.build());
                }
                if ("showHeartbeats".equalsIgnoreCase(cmd)) {
                    RowBaseIterator rowBaseIterator = showHeatbeatStat();
                    return response.sendResultSet(rowBaseIterator);
                }
                if ("showHeartbeatStatus".equalsIgnoreCase(cmd)) {
                    ResultSetBuilder builder = ResultSetBuilder.create();
                    builder.addColumnInfo("name", JDBCType.VARCHAR);
                    builder.addColumnInfo("status", JDBCType.VARCHAR);
                    Map<String, HeartbeatFlow> heartbeatDetectorMap = replicaSelectorRuntime.getHeartbeatDetectorMap();
                    for (Map.Entry<String, HeartbeatFlow> entry : heartbeatDetectorMap.entrySet()) {
                        String key = entry.getKey();
                        HeartbeatFlow value = entry.getValue();
                        builder.addObjectRowPayload(Arrays.asList(Objects.toString(key), Objects.toString(value.getDsStatus())));
                    }
                    return response.sendResultSet(() -> builder.build());
                }
                if ("showInstances".equalsIgnoreCase(cmd)) {
                    RowBaseIterator rowBaseIterator = showInstances();
                    return response.sendResultSet(rowBaseIterator);
                }
                if ("showReactors".equalsIgnoreCase(cmd)) {
                    MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
                    return response.sendResultSet(server.showReactors());
                }
                if ("showThreadPools".equalsIgnoreCase(cmd)) {
                    ResultSetBuilder builder = ResultSetBuilder.create();
                    builder.addColumnInfo("NAME", JDBCType.VARCHAR).addColumnInfo("POOL_SIZE", JDBCType.BIGINT).addColumnInfo("ACTIVE_COUNT", JDBCType.BIGINT).addColumnInfo("TASK_QUEUE_SIZE", JDBCType.BIGINT).addColumnInfo("COMPLETED_TASK", JDBCType.BIGINT).addColumnInfo("TOTAL_TASK", JDBCType.BIGINT);
                    return response.sendResultSet(() -> builder.build());
                }
                if ("showNativeBackends".equalsIgnoreCase(cmd)) {
                    MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
                    return response.sendResultSet(server.showNativeBackends());
                }
                if ("showConnections".equalsIgnoreCase(cmd)) {
                    MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
                    return response.sendResultSet(server.showConnections());
                }
                if ("showSchedules".equalsIgnoreCase(cmd)) {
                    ResultSetBuilder builder = ResultSetBuilder.create();
                    ScheduledExecutorService timer = ScheduleUtil.getTimer();
                    String NAME = timer.toString();
                    boolean IS_TERMINATED = timer.isTerminated();
                    boolean IS_SHUTDOWN = timer.isShutdown();
                    int SCHEDULE_COUNT = ScheduleUtil.getScheduleCount();
                    builder.addColumnInfo("NAME", JDBCType.VARCHAR).addColumnInfo("IS_TERMINATED", JDBCType.VARCHAR).addColumnInfo("IS_SHUTDOWN", JDBCType.VARCHAR).addColumnInfo("SCHEDULE_COUNT", JDBCType.BIGINT);
                    builder.addObjectRowPayload(Arrays.asList(NAME, IS_TERMINATED, IS_SHUTDOWN, SCHEDULE_COUNT));
                    return response.sendResultSet(() -> builder.build());
                }
                if ("showBaselines".equalsIgnoreCase(cmd)) {
                    ResultSetBuilder builder = ResultSetBuilder.create();
                    QueryPlanCache queryPlanCache = MetaClusterCurrent.wrapper(QueryPlanCache.class);
                    builder.addColumnInfo("BASELINE_ID", JDBCType.VARCHAR).addColumnInfo("PARAMETERIZED_SQL", JDBCType.VARCHAR).addColumnInfo("PLAN_ID", JDBCType.VARCHAR).addColumnInfo("EXTERNALIZED_PLAN", JDBCType.VARCHAR).addColumnInfo("FIXED", JDBCType.VARCHAR).addColumnInfo("ACCEPTED", JDBCType.VARCHAR);
                    for (Baseline baseline : queryPlanCache.list()) {
                        for (BaselinePlan baselinePlan : baseline.getPlanList()) {
                            String BASELINE_ID = String.valueOf(baselinePlan.getBaselineId());
                            String PARAMETERIZED_SQL = String.valueOf(baselinePlan.getSql());
                            String PLAN_ID = String.valueOf(baselinePlan.getId());
                            CodeExecuterContext attach = (CodeExecuterContext) baselinePlan.attach();
                            String EXTERNALIZED_PLAN = new PlanImpl(attach.getMycatRel(), attach, Collections.emptyList()).dumpPlan();
                            String FIXED = Optional.ofNullable(baseline.getFixPlan()).filter(i -> i.getId() == baselinePlan.getId()).map(u -> "true").orElse("false");
                            String ACCEPTED = "true";
                            builder.addObjectRowPayload(Arrays.asList(BASELINE_ID, PARAMETERIZED_SQL, PLAN_ID, EXTERNALIZED_PLAN, FIXED, ACCEPTED));
                        }
                    }
                    return response.sendResultSet(() -> builder.build());
                }
                if ("showConfigText".equalsIgnoreCase(cmd)) {
                    MycatRouterConfig mycatRouterConfig = MetaClusterCurrent.wrapper(MycatRouterConfig.class);
                    String text = JsonUtil.toJson(mycatRouterConfig);
                    ResultSetBuilder builder = ResultSetBuilder.create();
                    builder.addColumnInfo("CONFIG_TEXT", JDBCType.VARCHAR);
                    builder.addObjectRowPayload(Arrays.asList(text));
                    return response.sendResultSet(builder.build());
                }
                if ("setBkaJoin".equalsIgnoreCase(cmd)) {
                    DrdsSqlCompiler.RBO_BKA_JOIN = body.contains("1");
                    return response.sendOk();
                }
                if ("setSortMergeJoin".equalsIgnoreCase(cmd)) {
                    DrdsSqlCompiler.RBO_MERGE_JOIN = body.contains("1");
                    return response.sendOk();
                }
                if ("setAcceptConnect".equalsIgnoreCase(cmd)) {
                    boolean contains = body.contains("1");
                    MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
                    if (!contains) {
                        server.stopAcceptConnect();
                    } else {
                        server.resumeAcceptConnect();
                    }
                    dataContext.setAffectedRows(1);
                    return response.sendOk();
                }
                if ("setReadyToCloseSQL".equalsIgnoreCase(cmd)) {
                    ReadyToCloseSQLHint readyToCloseSQLHint = JsonUtil.from(body, ReadyToCloseSQLHint.class);
                    String sql = readyToCloseSQLHint.getSql().trim();
                    MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
                    server.setReadyToCloseSQL(sql);
                    dataContext.setAffectedRows(1);
                    return response.sendOk();
                }
                if ("setDebug".equalsIgnoreCase(cmd)) {
                    boolean contains = body.contains("1");
                    dataContext.setDebug(contains);
                    return response.sendOk();
                }
                if ("setVector".equalsIgnoreCase(cmd)) {
                    boolean contains = body.contains("1");
                    dataContext.setVector(contains);
                    return response.sendOk();
                }
                if ("is".equalsIgnoreCase(cmd)) {
                    ResultSetBuilder builder = ResultSetBuilder.create();
                    builder.addColumnInfo("value", JDBCType.VARCHAR);
                    if (body.contains("debug")) {
                        boolean debug = dataContext.isDebug();
                        DrdsSqlCompiler.DEBUG = debug;
                        builder.addObjectRowPayload(Arrays.asList(debug ? "1" : "0"));
                    }
                    return response.sendResultSet(builder.build());
                }
                if ("setBkaJoinLeftRowCountLimit".equalsIgnoreCase(cmd)) {
                    DrdsSqlCompiler.BKA_JOIN_LEFT_ROW_COUNT_LIMIT = Long.parseLong(body.substring(1, body.length() - 1));
                    return response.sendOk();
                }
                if ("baseline".equalsIgnoreCase(cmd)) {
                    Map<String, Object> map = JsonUtil.from(body, Map.class);
                    String command = Objects.requireNonNull(map.get("command")).toString().toLowerCase();
                    long value = Long.parseLong((map.getOrDefault("value", "0")).toString());
                    QueryPlanCache queryPlanCache = MetaClusterCurrent.wrapper(QueryPlanCache.class);
                    switch(command) {
                        case "showAllPlans":
                            {
                                ResultSetBuilder builder = ResultSetBuilder.create();
                                builder.addColumnInfo("BASELINE_ID", JDBCType.VARCHAR).addColumnInfo("PARAMETERIZED_SQL", JDBCType.VARCHAR).addColumnInfo("PLAN_ID", JDBCType.VARCHAR).addColumnInfo("EXTERNALIZED_PLAN", JDBCType.VARCHAR).addColumnInfo("FIXED", JDBCType.VARCHAR).addColumnInfo("ACCEPTED", JDBCType.VARCHAR);
                                for (Baseline baseline : queryPlanCache.list()) {
                                    for (BaselinePlan baselinePlan : baseline.getPlanList()) {
                                        String BASELINE_ID = String.valueOf(baselinePlan.getBaselineId());
                                        String PARAMETERIZED_SQL = String.valueOf(baselinePlan.getSql());
                                        String PLAN_ID = String.valueOf(baselinePlan.getId());
                                        CodeExecuterContext attach = (CodeExecuterContext) baselinePlan.attach();
                                        String EXTERNALIZED_PLAN = new PlanImpl(attach.getMycatRel(), attach, Collections.emptyList()).dumpPlan();
                                        String FIXED = Optional.ofNullable(baseline.getFixPlan()).filter(i -> i.getId() == baselinePlan.getId()).map(u -> "true").orElse("false");
                                        String ACCEPTED = "true";
                                        builder.addObjectRowPayload(Arrays.asList(BASELINE_ID, PARAMETERIZED_SQL, PLAN_ID, EXTERNALIZED_PLAN, FIXED, ACCEPTED));
                                    }
                                }
                                return response.sendResultSet(() -> builder.build());
                            }
                        case "persistAllBaselines":
                            {
                                queryPlanCache.saveBaselines();
                                return response.sendOk();
                            }
                        case "loadBaseline":
                            {
                                queryPlanCache.loadBaseline(value);
                                return response.sendOk();
                            }
                        case "loadPlan":
                            {
                                queryPlanCache.loadPlan(value);
                                return response.sendOk();
                            }
                        case "persistPlan":
                            {
                                queryPlanCache.persistPlan(value);
                                return response.sendOk();
                            }
                        case "clearBaseline":
                            {
                                queryPlanCache.clearBaseline(value);
                                return response.sendOk();
                            }
                        case "clearPlan":
                            {
                                queryPlanCache.clearPlan(value);
                                return response.sendOk();
                            }
                        case "deleteBaseline":
                            {
                                queryPlanCache.deleteBaseline(value);
                                return response.sendOk();
                            }
                        case "deletePlan":
                            {
                                queryPlanCache.deletePlan(value);
                                return response.sendOk();
                            }
                        case "add":
                        case "fix":
                            {
                                SQLStatement sqlStatement = null;
                                if (ast.getHintStatements() != null && ast.getHintStatements().size() == 1) {
                                    sqlStatement = ast.getHintStatements().get(0);
                                    DrdsSqlWithParams drdsSqlWithParams = DrdsRunnerHelper.preParse(sqlStatement, dataContext.getDefaultSchema());
                                    queryPlanCache.add("fix".equalsIgnoreCase(command), drdsSqlWithParams);
                                }
                                return response.sendOk();
                            }
                        default:
                            throw new UnsupportedOperationException();
                    }
                }
                if ("MIGRATE_LIST".equalsIgnoreCase(cmd)) {
                    return response.sendResultSet(MigrateUtil.list());
                }
                if ("MIGRATE_STOP".equalsIgnoreCase(cmd)) {
                    MigrateStopHint hint = JsonUtil.from(body, MigrateStopHint.class);
                    if (MigrateUtil.stop(hint.getId())) {
                        dataContext.setAffectedRows(1);
                    }
                    return response.sendOk();
                }
                if ("MIGRATE".equalsIgnoreCase(cmd)) {
                    MigrateHint migrateHint = JsonUtil.from(body, MigrateHint.class);
                    String name = migrateHint.getName();
                    MigrateHint.Input input = migrateHint.getInput();
                    MigrateHint.Output output = migrateHint.getOutput();
                    int parallelism = output.getParallelism();
                    MigrateUtil.MigrateJdbcOutput migrateJdbcOutput = new MigrateUtil.MigrateJdbcOutput();
                    migrateJdbcOutput.setParallelism(parallelism);
                    migrateJdbcOutput.setBatch(output.getBatch());
                    List<MigrateUtil.MigrateJdbcInput> migrateJdbcInputs = new ArrayList<>();
                    List<Flowable<Object[]>> observables = new ArrayList<>();
                    MetadataManager manager = MetaClusterCurrent.wrapper(MetadataManager.class);
                    TableHandler outputTable = manager.getTable(output.getSchemaName(), output.getTableName());
                    String username = Optional.ofNullable(output.getUsername()).orElseGet(new Supplier<String>() {

                        @Override
                        public String get() {
                            UserConfig userConfig = routerConfig.getUsers().get(0);
                            String username = userConfig.getUsername();
                            String password = userConfig.getPassword();
                            return username;
                        }
                    });
                    String password = Optional.ofNullable(output.getUsername()).orElseGet(new Supplier<String>() {

                        @Override
                        public String get() {
                            UserConfig userConfig = routerConfig.getUsers().get(0);
                            String username = userConfig.getUsername();
                            String password = userConfig.getPassword();
                            return password;
                        }
                    });
                    String url = Optional.ofNullable(output.getUrl()).orElseGet(() -> {
                        ServerConfig serverConfig = MetaClusterCurrent.wrapper(ServerConfig.class);
                        String ip = serverConfig.getIp();
                        int port = serverConfig.getPort();
                        return "jdbc:mysql://" + ip + ":" + port + "/mysql?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true";
                    });
                    if (input.getUrl() != null) {
                        String sql = input.getSql();
                        long count = input.getCount();
                        Map<String, String> properties = input.getProperties();
                        MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
                        migrateJdbcInput.setCount(count);
                        observables.add(MigrateUtil.read(migrateJdbcInput, input.getUrl(), input.getUsername(), input.getPassword(), sql));
                    } else if (input.getType() == null || "mycat".equalsIgnoreCase(input.getType())) {
                        TableHandler inputTable = manager.getTable(input.getSchemaName(), input.getTableName());
                        switch(inputTable.getType()) {
                            case SHARDING:
                                {
                                    ShardingTable shardingTable = (ShardingTable) inputTable;
                                    for (Partition backend : shardingTable.getBackends()) {
                                        MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
                                        migrateJdbcInputs.add(migrateJdbcInput);
                                        observables.add(MigrateUtil.read(migrateJdbcInput, backend));
                                    }
                                    break;
                                }
                            case GLOBAL:
                                {
                                    GlobalTable globalTable = (GlobalTable) inputTable;
                                    Partition partition = globalTable.getGlobalDataNode().get(0);
                                    MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
                                    migrateJdbcInputs.add(migrateJdbcInput);
                                    observables.add(MigrateUtil.read(migrateJdbcInput, partition));
                                    break;
                                }
                            case NORMAL:
                                {
                                    NormalTable normalTable = (NormalTable) inputTable;
                                    Partition partition = normalTable.getDataNode();
                                    MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
                                    migrateJdbcInputs.add(migrateJdbcInput);
                                    observables.add(MigrateUtil.read(migrateJdbcInput, partition));
                                    break;
                                }
                            case VISUAL:
                            case VIEW:
                            case CUSTOM:
                                MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
                                migrateJdbcInputs.add(migrateJdbcInput);
                                observables.add(MigrateUtil.read(migrateJdbcInput, input.getTableName(), input.getSchemaName(), url, username, password));
                                break;
                            default:
                                throw new IllegalStateException("Unexpected value: " + inputTable.getType());
                        }
                    } else {
                        throw new UnsupportedOperationException();
                    }
                    String outputSchemaName = outputTable.getSchemaName();
                    String outputTableName = outputTable.getTableName();
                    String insertTemplate = getMySQLInsertTemplate(outputTable);
                    migrateJdbcOutput.setUsername(username);
                    migrateJdbcOutput.setPassword(password);
                    migrateJdbcOutput.setUrl(url);
                    migrateJdbcOutput.setInsertTemplate(insertTemplate);
                    MigrateUtil.MigrateController migrateController = MigrateUtil.write(migrateJdbcOutput, Flowable.merge(observables.stream().map(i -> i.buffer(output.getBatch()).subscribeOn(Schedulers.io())).collect(Collectors.toList())));
                    MigrateUtil.MigrateScheduler scheduler = MigrateUtil.register(name, migrateJdbcInputs, migrateJdbcOutput, migrateController);
                    return response.sendResultSet(MigrateUtil.show(scheduler));
                }
                if ("BINLOG_LIST".equalsIgnoreCase(cmd)) {
                    return response.sendResultSet(BinlogUtil.list());
                }
                if ("BINLOG_STOP".equalsIgnoreCase(cmd)) {
                    BinlogStopHint hint = JsonUtil.from(body, BinlogStopHint.class);
                    if (BinlogUtil.stop(hint.getId())) {
                        dataContext.setAffectedRows(1);
                    }
                    return response.sendOk();
                }
                if ("BINLOG".equalsIgnoreCase(cmd)) {
                    BinlogHint binlogHint = JsonUtil.from(body, BinlogHint.class);
                    Objects.requireNonNull(binlogHint.getInputTableNames());
                    List<String> outputTableNames = binlogHint.getOutputTableNames();
                    if (outputTableNames == null) {
                        binlogHint.setInputTableNames(binlogHint.getInputTableNames());
                    }
                    IdentityHashMap<TableHandler, TableHandler> map = new IdentityHashMap<>();
                    List<TableHandler> inputs = new ArrayList<>();
                    List<TableHandler> outputs = new ArrayList<>();
                    for (String inputTableName : binlogHint.getInputTableNames()) {
                        String[] split = inputTableName.split(".");
                        String schemaName = SQLUtils.normalize(split[0]);
                        String tableName = SQLUtils.normalize(split[1]);
                        TableHandler inputTable = metadataManager.getTable(schemaName, tableName);
                        inputs.add(inputTable);
                    }
                    for (String outputTableName : binlogHint.getOutputTableNames()) {
                        String[] split = outputTableName.split(".");
                        String schemaName = SQLUtils.normalize(split[0]);
                        String tableName = SQLUtils.normalize(split[1]);
                        TableHandler outputTable = metadataManager.getTable(schemaName, tableName);
                        outputs.add(outputTable);
                    }
                    for (int i = 0; i < inputs.size(); i++) {
                        map.put(inputs.get(i), outputs.get(i));
                    }
                    Map<String, Map<String, List<Partition>>> infoCollector = new HashMap<>();
                    List<MigrateUtil.MigrateController> migrateControllers = new ArrayList<>();
                    Set<Map.Entry<TableHandler, TableHandler>> entries = map.entrySet();
                    for (Map.Entry<TableHandler, TableHandler> entry : entries) {
                        ServerConfig serverConfig = MetaClusterCurrent.wrapper(ServerConfig.class);
                        TableHandler inputTable = entry.getKey();
                        TableHandler outputTable = entry.getValue();
                        UserConfig userConfig = routerConfig.getUsers().get(0);
                        String username = userConfig.getUsername();
                        String password = userConfig.getPassword();
                        String ip = serverConfig.getIp();
                        int port = serverConfig.getPort();
                        String url = "jdbc:mysql://" + ip + ":" + port + "/mysql?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true";
                        // String insertTemplate = getMySQLInsertTemplate(outputTable);
                        MigrateUtil.MigrateJdbcAnyOutput output = new MigrateUtil.MigrateJdbcAnyOutput();
                        output.setUrl(url);
                        output.setUsername(username);
                        output.setPassword(password);
                        List<Partition> partitions = new ArrayList<>();
                        switch(inputTable.getType()) {
                            case SHARDING:
                                {
                                    ShardingTable shardingTable = (ShardingTable) inputTable;
                                    partitions = shardingTable.getShardingFuntion().calculate(Collections.emptyMap());
                                    break;
                                }
                            case GLOBAL:
                                {
                                    GlobalTable globalTable = (GlobalTable) inputTable;
                                    partitions = ImmutableList.of(globalTable.getGlobalDataNode().get(0));
                                    break;
                                }
                            case NORMAL:
                                {
                                    NormalTable normalTable = (NormalTable) inputTable;
                                    partitions = ImmutableList.of(normalTable.getDataNode());
                                    break;
                                }
                            case CUSTOM:
                            case VISUAL:
                            case VIEW:
                                throw new UnsupportedOperationException();
                        }
                        ReplicaSelectorManager replicaSelectorManager = MetaClusterCurrent.wrapper(ReplicaSelectorManager.class);
                        Map<String, List<Partition>> listMap = partitions.stream().collect(Collectors.groupingBy(partition -> replicaSelectorManager.getDatasourceNameByReplicaName(partition.getTargetName(), true, null)));
                        infoCollector.put(inputTable.getUniqueName(), listMap);
                        List<Flowable<BinlogUtil.ParamSQL>> flowables = new ArrayList<>();
                        for (Map.Entry<String, List<Partition>> e : listMap.entrySet()) {
                            flowables.add(BinlogUtil.observe(e.getKey(), e.getValue()).subscribeOn(Schedulers.io()));
                        }
                        Flowable<BinlogUtil.ParamSQL> merge = flowables.size() == 1 ? flowables.get(0) : Flowable.merge(flowables, flowables.size());
                        merge = merge.map(paramSQL -> {
                            SQLStatement sqlStatement = SQLUtils.parseSingleMysqlStatement(paramSQL.getSql());
                            SQLExprTableSource sqlExprTableSource = VertxUpdateExecuter.getTableSource(sqlStatement);
                            MycatSQLExprTableSourceUtil.setSqlExprTableSource(outputTable.getSchemaName(), outputTable.getTableName(), sqlExprTableSource);
                            paramSQL.setSql(sqlStatement.toString());
                            return paramSQL;
                        });
                        MigrateUtil.MigrateController migrateController = MigrateUtil.writeSql(output, merge);
                        migrateControllers.add(migrateController);
                    }
                    BinlogUtil.BinlogScheduler scheduler = BinlogUtil.BinlogScheduler.of(UUID.randomUUID().toString(), binlogHint.getName(), infoCollector, migrateControllers);
                    BinlogUtil.register(scheduler);
                    return response.sendResultSet(BinlogUtil.list(Collections.singletonList(scheduler)));
                }
                mycatDmlHandler(cmd, body);
                return response.sendOk();
            }
        }
        return response.sendOk();
    } catch (Throwable throwable) {
        return response.sendError(throwable);
    }
}
Also used : MycatdbCommand(io.mycat.commands.MycatdbCommand) io.mycat(io.mycat) CSVRecord(org.apache.commons.csv.CSVRecord) JdbcDataSource(io.mycat.datasource.jdbc.datasource.JdbcDataSource) CSVFormat(org.apache.commons.csv.CSVFormat) HeartbeatFlow(io.mycat.replica.heartbeat.HeartbeatFlow) Duration(java.time.Duration) MySQLErrorCode(io.mycat.beans.mysql.MySQLErrorCode) JsonUtil(io.mycat.util.JsonUtil) MySqlHintStatement(com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlHintStatement) io.mycat.config(io.mycat.config) Timestamp(java.sql.Timestamp) Reader(java.io.Reader) Future(io.vertx.core.Future) Collectors(java.util.stream.Collectors) ArrayBindable(org.apache.calcite.runtime.ArrayBindable) MycatSQLLogMonitor(io.mycat.monitor.MycatSQLLogMonitor) io.mycat.sqlhandler(io.mycat.sqlhandler) NormalTable(io.mycat.calcite.table.NormalTable) Nullable(org.jetbrains.annotations.Nullable) ReplicaSelector(io.mycat.replica.ReplicaSelector) UpdateSQLHandler(io.mycat.sqlhandler.dml.UpdateSQLHandler) Stream(java.util.stream.Stream) ReplicaSelectorManager(io.mycat.replica.ReplicaSelectorManager) DrdsRunnerHelper(io.mycat.calcite.DrdsRunnerHelper) PhysicsInstance(io.mycat.replica.PhysicsInstance) NotNull(org.jetbrains.annotations.NotNull) ShardingTable(io.mycat.calcite.table.ShardingTable) VertxExecuter(io.mycat.vertx.VertxExecuter) SqlEntry(io.mycat.monitor.SqlEntry) java.util(java.util) ResultSetBuilder(io.mycat.beans.mycat.ResultSetBuilder) SQLIdentifierExpr(com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr) LocalDateTime(java.time.LocalDateTime) XaLog(cn.mycat.vertx.xa.XaLog) Supplier(java.util.function.Supplier) Iterators(com.google.common.collect.Iterators) SqlResultSetService(io.mycat.commands.SqlResultSetService) InterruptThreadHint(io.mycat.hint.InterruptThreadHint) MycatSQLExprTableSourceUtil(io.mycat.util.MycatSQLExprTableSourceUtil) KillThreadHint(io.mycat.hint.KillThreadHint) Schedulers(io.reactivex.rxjava3.schedulers.Schedulers) JDBCType(java.sql.JDBCType) PrepareExecutor(io.mycat.calcite.PrepareExecutor) SchemaHandler(io.mycat.calcite.table.SchemaHandler) MySqlInsertStatement(com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlInsertStatement) ImmutableList(com.google.common.collect.ImmutableList) DatasourceStatus(io.mycat.replica.heartbeat.DatasourceStatus) Observable(io.reactivex.rxjava3.core.Observable) JdbcConnectionManager(io.mycat.datasource.jdbc.datasource.JdbcConnectionManager) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) RowBaseIterator(io.mycat.api.collector.RowBaseIterator) StreamSupport(java.util.stream.StreamSupport) GlobalTable(io.mycat.calcite.table.GlobalTable) SQLVariantRefExpr(com.alibaba.druid.sql.ast.expr.SQLVariantRefExpr) SQLUtils(com.alibaba.druid.sql.SQLUtils) Flowable(io.reactivex.rxjava3.core.Flowable) PromiseInternal(io.vertx.core.impl.future.PromiseInternal) io.mycat.hint(io.mycat.hint) VertxUpdateExecuter(io.mycat.vertx.VertxUpdateExecuter) StringWriter(java.io.StringWriter) SQLInsertStatement(com.alibaba.druid.sql.ast.statement.SQLInsertStatement) ExecutorProvider(io.mycat.calcite.ExecutorProvider) IOException(java.io.IOException) QuoteMode(org.apache.commons.csv.QuoteMode) SQLCommentHint(com.alibaba.druid.sql.ast.SQLCommentHint) SqlRecorderRuntime(io.mycat.exporter.SqlRecorderRuntime) VertxUtil(io.mycat.util.VertxUtil) ReplicaSwitchType(io.mycat.replica.ReplicaSwitchType) SQLCharExpr(com.alibaba.druid.sql.ast.expr.SQLCharExpr) MycatInsertRel(io.mycat.calcite.physical.MycatInsertRel) io.mycat.calcite.spm(io.mycat.calcite.spm) NameMap(io.mycat.util.NameMap) MysqlPayloadObject(io.mycat.api.collector.MysqlPayloadObject) JdbcUtils(com.alibaba.druid.util.JdbcUtils) CodeExecuterContext(io.mycat.calcite.CodeExecuterContext) HeartBeatStatus(io.mycat.replica.heartbeat.HeartBeatStatus) SQLStatement(com.alibaba.druid.sql.ast.SQLStatement) UnmodifiableIterator(com.google.common.collect.UnmodifiableIterator) FileReader(java.io.FileReader) SQLExprTableSource(com.alibaba.druid.sql.ast.statement.SQLExprTableSource) StorageManager(io.mycat.sqlhandler.config.StorageManager) SQLCommentHint(com.alibaba.druid.sql.ast.SQLCommentHint) RowBaseIterator(io.mycat.api.collector.RowBaseIterator) MySqlHintStatement(com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlHintStatement) NormalTable(io.mycat.calcite.table.NormalTable) ArrayBindable(org.apache.calcite.runtime.ArrayBindable) MysqlPayloadObject(io.mycat.api.collector.MysqlPayloadObject) NameMap(io.mycat.util.NameMap) ReplicaSelectorManager(io.mycat.replica.ReplicaSelectorManager) SchemaHandler(io.mycat.calcite.table.SchemaHandler) HeartbeatFlow(io.mycat.replica.heartbeat.HeartbeatFlow) SqlEntry(io.mycat.monitor.SqlEntry) StringWriter(java.io.StringWriter) GlobalTable(io.mycat.calcite.table.GlobalTable) ShardingTable(io.mycat.calcite.table.ShardingTable) SQLExprTableSource(com.alibaba.druid.sql.ast.statement.SQLExprTableSource) Flowable(io.reactivex.rxjava3.core.Flowable) JdbcDataSource(io.mycat.datasource.jdbc.datasource.JdbcDataSource) StorageManager(io.mycat.sqlhandler.config.StorageManager) SQLStatement(com.alibaba.druid.sql.ast.SQLStatement) ImmutableList(com.google.common.collect.ImmutableList) KillThreadHint(io.mycat.hint.KillThreadHint) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) SqlResultSetService(io.mycat.commands.SqlResultSetService) ResultSetBuilder(io.mycat.beans.mycat.ResultSetBuilder) CodeExecuterContext(io.mycat.calcite.CodeExecuterContext) MysqlPayloadObject(io.mycat.api.collector.MysqlPayloadObject) InterruptThreadHint(io.mycat.hint.InterruptThreadHint) KillThreadHint(io.mycat.hint.KillThreadHint) io.mycat.hint(io.mycat.hint) SQLCommentHint(com.alibaba.druid.sql.ast.SQLCommentHint) JdbcConnectionManager(io.mycat.datasource.jdbc.datasource.JdbcConnectionManager) InterruptThreadHint(io.mycat.hint.InterruptThreadHint)

Example 25 with JdbcConnectionManager

use of io.mycat.datasource.jdbc.datasource.JdbcConnectionManager in project Mycat2 by MyCATApache.

the class MySQLCheckHandler method onExecute.

@Override
protected Future<Void> onExecute(SQLRequest<MySqlCheckTableStatement> request, MycatDataContext dataContext, Response response) {
    ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
    resultSetBuilder.addColumnInfo("TABLE", JDBCType.VARCHAR);
    resultSetBuilder.addColumnInfo("OP", JDBCType.VARCHAR);
    resultSetBuilder.addColumnInfo("MSG_TYPE", JDBCType.VARCHAR);
    resultSetBuilder.addColumnInfo("MSG_TEXT", JDBCType.VARCHAR);
    MetadataManager metadataManager = MetaClusterCurrent.wrapper(MetadataManager.class);
    JdbcConnectionManager jdbcConnectionManager = MetaClusterCurrent.wrapper(JdbcConnectionManager.class);
    List<Throwable> throwables = Collections.synchronizedList(new LinkedList<>());
    MySqlCheckTableStatement ast = request.getAst();
    for (SQLExprTableSource table : ast.getTables()) {
        resolveSQLExprTableSource(table, dataContext);
        String schemaName = SQLUtils.normalize(table.getSchema());
        String tableName = SQLUtils.normalize(table.getTableName());
        TableHandler tableHandler = metadataManager.getTable(schemaName, tableName);
        Set<String> errorInfo = new HashSet<>();
        switch(tableHandler.getType()) {
            case SHARDING:
                {
                    ShardingTableHandler shardingTableHandler = (ShardingTableHandler) tableHandler;
                    errorInfo = check(metadataManager, jdbcConnectionManager, throwables, shardingTableHandler.dataNodes().parallelStream());
                    break;
                }
            case GLOBAL:
                {
                    GlobalTableHandler globalTableHandler = (GlobalTableHandler) tableHandler;
                    errorInfo = check(metadataManager, jdbcConnectionManager, throwables, globalTableHandler.getGlobalDataNode().parallelStream());
                    break;
                }
            case NORMAL:
                {
                    break;
                }
            case CUSTOM:
                {
                    break;
                }
            default:
                throw new IllegalStateException("Unexpected value: " + tableHandler.getType());
        }
        resultSetBuilder.addObjectRowPayload(Arrays.asList(table.toString(), "check", errorInfo.isEmpty() ? "Ok" : "Error", String.join(",", errorInfo)));
    }
    return response.sendResultSet(resultSetBuilder.build());
}
Also used : ResultSetBuilder(io.mycat.beans.mycat.ResultSetBuilder) ShardingTableHandler(io.mycat.router.ShardingTableHandler) GlobalTableHandler(io.mycat.calcite.table.GlobalTableHandler) GlobalTableHandler(io.mycat.calcite.table.GlobalTableHandler) ShardingTableHandler(io.mycat.router.ShardingTableHandler) SQLExprTableSource(com.alibaba.druid.sql.ast.statement.SQLExprTableSource) JdbcConnectionManager(io.mycat.datasource.jdbc.datasource.JdbcConnectionManager) MySqlCheckTableStatement(com.alibaba.druid.sql.dialect.mysql.ast.statement.MySqlCheckTableStatement)

Aggregations

JdbcConnectionManager (io.mycat.datasource.jdbc.datasource.JdbcConnectionManager)57 DefaultConnection (io.mycat.datasource.jdbc.datasource.DefaultConnection)38 SneakyThrows (lombok.SneakyThrows)21 Connection (java.sql.Connection)16 ReplicaSelectorManager (io.mycat.replica.ReplicaSelectorManager)8 java.util (java.util)8 NotNull (org.jetbrains.annotations.NotNull)8 SQLExprTableSource (com.alibaba.druid.sql.ast.statement.SQLExprTableSource)7 DatasourceConfig (io.mycat.config.DatasourceConfig)7 JdbcDataSource (io.mycat.datasource.jdbc.datasource.JdbcDataSource)7 Collectors (java.util.stream.Collectors)7 MetadataManager (io.mycat.MetadataManager)6 RowBaseIterator (io.mycat.api.collector.RowBaseIterator)6 NameMap (io.mycat.util.NameMap)6 SQLStatement (com.alibaba.druid.sql.ast.SQLStatement)5 SQLIdentifierExpr (com.alibaba.druid.sql.ast.expr.SQLIdentifierExpr)5 MycatRowMetaData (io.mycat.beans.mycat.MycatRowMetaData)5 SQLException (java.sql.SQLException)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)5 Logger (org.slf4j.Logger)5