use of io.mycat.replica.heartbeat.HeartbeatFlow in project Mycat2 by MyCATApache.
the class HintHandler method onExecute.
@Override
protected Future<Void> onExecute(SQLRequest<MySqlHintStatement> request, MycatDataContext dataContext, Response response) {
MySqlHintStatement ast = request.getAst();
List<SQLCommentHint> hints = ast.getHints();
try {
if (hints.size() == 1) {
String s = SqlHints.unWrapperHint(hints.get(0).getText());
if (s.startsWith("mycat:") || s.startsWith("MYCAT:")) {
s = s.substring(6);
int bodyStartIndex = s.indexOf('{');
String cmd;
String body;
if (bodyStartIndex == -1) {
cmd = s;
body = "{}";
} else {
cmd = s.substring(0, bodyStartIndex);
body = s.substring(bodyStartIndex);
}
cmd = cmd.trim();
MetadataManager metadataManager = MetaClusterCurrent.wrapper(MetadataManager.class);
MycatRouterConfig routerConfig = MetaClusterCurrent.wrapper(MycatRouterConfig.class);
ReplicaSelectorManager replicaSelectorRuntime = MetaClusterCurrent.wrapper(ReplicaSelectorManager.class);
JdbcConnectionManager jdbcConnectionManager = MetaClusterCurrent.wrapper(JdbcConnectionManager.class);
MycatServer mycatServer = MetaClusterCurrent.wrapper(MycatServer.class);
if ("showErGroup".equalsIgnoreCase(cmd)) {
return showErGroup(response, metadataManager);
}
if ("loaddata".equalsIgnoreCase(cmd)) {
return loaddata(dataContext, response, body, metadataManager);
}
if ("setUserDialect".equalsIgnoreCase(cmd)) {
return setUserDialect(response, body);
}
if ("showSlowSql".equalsIgnoreCase(cmd)) {
return showSlowSql(response, body);
}
if ("showTopology".equalsIgnoreCase(cmd)) {
return showTopology(response, body, metadataManager);
}
if ("checkConfigConsistency".equalsIgnoreCase(cmd)) {
StorageManager assembleMetadataStorageManager = MetaClusterCurrent.wrapper(StorageManager.class);
boolean res = assembleMetadataStorageManager.checkConfigConsistency();
ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
resultSetBuilder.addColumnInfo("value", JDBCType.VARCHAR);
resultSetBuilder.addObjectRowPayload(Arrays.asList(res ? 1 : 0));
return response.sendResultSet(resultSetBuilder.build());
}
if ("resetConfig".equalsIgnoreCase(cmd)) {
MycatRouterConfigOps ops = ConfigUpdater.getOps();
ops.reset();
ops.commit();
return response.sendOk();
}
if ("run".equalsIgnoreCase(cmd)) {
Map<String, Object> map = JsonUtil.from(body, Map.class);
String hbt = Objects.toString(map.get("hbt"));
DrdsSqlCompiler drdsRunner = MetaClusterCurrent.wrapper(DrdsSqlCompiler.class);
Plan plan = drdsRunner.doHbt(hbt);
AsyncMycatDataContextImpl.HbtMycatDataContextImpl sqlMycatDataContext = new AsyncMycatDataContextImpl.HbtMycatDataContextImpl(dataContext, plan.getCodeExecuterContext());
ArrayBindable arrayBindable = MetaClusterCurrent.wrapper(ExecutorProvider.class).prepare(plan).getArrayBindable();
Observable<MysqlPayloadObject> mysqlPayloadObjectObservable = PrepareExecutor.getMysqlPayloadObjectObservable(arrayBindable, sqlMycatDataContext, plan.getMetaData());
return response.sendResultSet(mysqlPayloadObjectObservable);
}
if ("killThread".equalsIgnoreCase(cmd)) {
KillThreadHint killThreadHint = JsonUtil.from(body, KillThreadHint.class);
long pid = killThreadHint.getId();
dataContext.setAffectedRows(IOExecutor.kill(pid) ? 1 : 0);
return response.sendOk();
}
if ("interruptThread".equalsIgnoreCase(cmd)) {
Thread.currentThread().interrupt();
InterruptThreadHint interruptThreadHint = JsonUtil.from(body, InterruptThreadHint.class);
long pid = interruptThreadHint.getId();
dataContext.setAffectedRows(IOExecutor.interrupt(pid) ? 1 : 0);
return response.sendOk();
}
if ("showThreadInfo".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("ID", JDBCType.VARCHAR);
builder.addColumnInfo("NAME", JDBCType.VARCHAR);
builder.addColumnInfo("STATE", JDBCType.VARCHAR);
builder.addColumnInfo("STACKTRACE", JDBCType.VARCHAR);
List<Thread> threads = IOExecutor.findAllThreads();
for (Thread thread : threads) {
String name = thread.getName();
long id = thread.getId();
String state = thread.getState().name();
StackTraceElement[] stackTrace = thread.getStackTrace();
StringWriter stringWriter = new StringWriter();
for (StackTraceElement traceElement : stackTrace) {
stringWriter.write("\tat " + traceElement);
}
String stackTraceText = stringWriter.toString();
builder.addObjectRowPayload(Arrays.asList(id, name, state, stackTraceText));
}
return response.sendResultSet(builder.build());
}
if ("createSqlCache".equalsIgnoreCase(cmd)) {
MycatRouterConfigOps ops = ConfigUpdater.getOps();
SQLStatement sqlStatement = null;
if (ast.getHintStatements() != null && ast.getHintStatements().size() == 1) {
sqlStatement = ast.getHintStatements().get(0);
}
SqlCacheConfig sqlCacheConfig = JsonUtil.from(body, SqlCacheConfig.class);
if (sqlCacheConfig.getSql() == null && sqlStatement != null) {
sqlCacheConfig.setSql(sqlStatement.toString());
}
ops.putSqlCache(sqlCacheConfig);
ops.commit();
if (sqlStatement == null) {
String sql = sqlCacheConfig.getSql();
sqlStatement = SQLUtils.parseSingleMysqlStatement(sql);
}
return MycatdbCommand.execute(dataContext, response, sqlStatement);
}
if ("showSqlCaches".equalsIgnoreCase(cmd)) {
ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
resultSetBuilder.addColumnInfo("info", JDBCType.VARCHAR);
if (MetaClusterCurrent.exist(SqlResultSetService.class)) {
SqlResultSetService sqlResultSetService = MetaClusterCurrent.wrapper(SqlResultSetService.class);
sqlResultSetService.snapshot().toStringList().forEach(c -> resultSetBuilder.addObjectRowPayload(Arrays.asList(c)));
}
return response.sendResultSet(resultSetBuilder.build());
}
if ("dropSqlCache".equalsIgnoreCase(cmd)) {
MycatRouterConfigOps ops = ConfigUpdater.getOps();
SqlCacheConfig sqlCacheConfig = JsonUtil.from(body, SqlCacheConfig.class);
ops.removeSqlCache(sqlCacheConfig.getName());
ops.commit();
return response.sendOk();
}
if ("showBufferUsage".equalsIgnoreCase(cmd)) {
return response.sendResultSet(mycatServer.showBufferUsage(dataContext.getSessionId()));
}
if ("showUsers".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("username", JDBCType.VARCHAR);
builder.addColumnInfo("ip", JDBCType.VARCHAR);
builder.addColumnInfo("transactionType", JDBCType.VARCHAR);
builder.addColumnInfo("dbType", JDBCType.VARCHAR);
Authenticator authenticator = MetaClusterCurrent.wrapper(Authenticator.class);
List<UserConfig> userConfigs = authenticator.getConfigAsList();
for (UserConfig userConfig : userConfigs) {
builder.addObjectRowPayload(Arrays.asList(userConfig.getUsername(), userConfig.getIp(), userConfig.getTransactionType(), userConfig.getDialect()));
}
return response.sendResultSet(() -> builder.build());
}
if ("showSchemas".equalsIgnoreCase(cmd)) {
Map map = JsonUtil.from(body, Map.class);
String schemaName = (String) map.get("schemaName");
Collection<SchemaHandler> schemaHandlers;
if (schemaName == null) {
schemaHandlers = metadataManager.getSchemaMap().values();
} else {
schemaHandlers = Collections.singletonList(metadataManager.getSchemaMap().get(schemaName));
}
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("SCHEMA_NAME", JDBCType.VARCHAR).addColumnInfo("DEFAULT_TARGET_NAME", JDBCType.VARCHAR).addColumnInfo("TABLE_NAMES", JDBCType.VARCHAR);
for (SchemaHandler value : schemaHandlers) {
String SCHEMA_NAME = value.getName();
String DEFAULT_TARGET_NAME = value.defaultTargetName();
String TABLE_NAMES = String.join(",", value.logicTables().keySet());
builder.addObjectRowPayload(Arrays.asList(SCHEMA_NAME, DEFAULT_TARGET_NAME, TABLE_NAMES));
}
return response.sendResultSet(() -> builder.build());
}
if ("showTables".equalsIgnoreCase(cmd)) {
return showTables(response, body, metadataManager, routerConfig);
}
if ("setSqlTimeFilter".equalsIgnoreCase(cmd)) {
return setSqlTimeFilter(response, body, metadataManager);
}
if ("getSqlTimeFilter".equalsIgnoreCase(cmd)) {
return getSqlTimeFilter(response, body, metadataManager);
}
if ("showClusters".equalsIgnoreCase(cmd)) {
Map map = JsonUtil.from(body, Map.class);
String clusterName = (String) map.get("name");
RowBaseIterator rowBaseIterator = showClusters(clusterName);
return response.sendResultSet(rowBaseIterator);
}
if ("showNativeDataSources".equalsIgnoreCase(cmd)) {
return response.sendResultSet(mycatServer.showNativeDataSources());
}
if ("showDataSources".equalsIgnoreCase(cmd)) {
Optional<JdbcConnectionManager> connectionManager = Optional.ofNullable(jdbcConnectionManager);
Collection<JdbcDataSource> jdbcDataSources = new HashSet<>(connectionManager.map(i -> i.getDatasourceInfo()).map(i -> i.values()).orElse(Collections.emptyList()));
ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
resultSetBuilder.addColumnInfo("NAME", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("USERNAME", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("PASSWORD", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("MAX_CON", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("MIN_CON", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("EXIST_CON", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("USE_CON", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("MAX_RETRY_COUNT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("MAX_CONNECT_TIMEOUT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("DB_TYPE", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("URL", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("WEIGHT", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("INIT_SQL", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("INIT_SQL_GET_CONNECTION", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("INSTANCE_TYPE", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("IDLE_TIMEOUT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("DRIVER", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("TYPE", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("IS_MYSQL", JDBCType.VARCHAR);
for (JdbcDataSource jdbcDataSource : jdbcDataSources) {
DatasourceConfig config = jdbcDataSource.getConfig();
String NAME = config.getName();
String USERNAME = config.getUser();
String PASSWORD = config.getPassword();
int MAX_CON = config.getMaxCon();
int MIN_CON = config.getMinCon();
// 注意显示顺序
int USED_CON = jdbcDataSource.getUsedCount();
// jdbc连接池已经存在连接数量是内部状态,未知
int EXIST_CON = USED_CON;
int MAX_RETRY_COUNT = config.getMaxRetryCount();
long MAX_CONNECT_TIMEOUT = config.getMaxConnectTimeout();
String DB_TYPE = config.getDbType();
String URL = config.getUrl();
int WEIGHT = config.getWeight();
String INIT_SQL = Optional.ofNullable(config.getInitSqls()).map(o -> String.join(";", o)).orElse("");
boolean INIT_SQL_GET_CONNECTION = config.isInitSqlsGetConnection();
String INSTANCE_TYPE = Optional.ofNullable(replicaSelectorRuntime.getPhysicsInstanceByName(NAME)).map(i -> i.getType().name()).orElse(config.getInstanceType());
long IDLE_TIMEOUT = config.getIdleTimeout();
// 保留属性
String DRIVER = jdbcDataSource.getDataSource().toString();
String TYPE = config.getType();
boolean IS_MYSQL = jdbcDataSource.isMySQLType();
resultSetBuilder.addObjectRowPayload(Arrays.asList(NAME, USERNAME, PASSWORD, MAX_CON, MIN_CON, EXIST_CON, USED_CON, MAX_RETRY_COUNT, MAX_CONNECT_TIMEOUT, DB_TYPE, URL, WEIGHT, INIT_SQL, INIT_SQL_GET_CONNECTION, INSTANCE_TYPE, IDLE_TIMEOUT, DRIVER, TYPE, IS_MYSQL));
}
return response.sendResultSet(() -> resultSetBuilder.build());
}
if ("showHeartbeats".equalsIgnoreCase(cmd)) {
RowBaseIterator rowBaseIterator = showHeatbeatStat();
return response.sendResultSet(rowBaseIterator);
}
if ("showHeartbeatStatus".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("name", JDBCType.VARCHAR);
builder.addColumnInfo("status", JDBCType.VARCHAR);
Map<String, HeartbeatFlow> heartbeatDetectorMap = replicaSelectorRuntime.getHeartbeatDetectorMap();
for (Map.Entry<String, HeartbeatFlow> entry : heartbeatDetectorMap.entrySet()) {
String key = entry.getKey();
HeartbeatFlow value = entry.getValue();
builder.addObjectRowPayload(Arrays.asList(Objects.toString(key), Objects.toString(value.getDsStatus())));
}
return response.sendResultSet(() -> builder.build());
}
if ("showInstances".equalsIgnoreCase(cmd)) {
RowBaseIterator rowBaseIterator = showInstances();
return response.sendResultSet(rowBaseIterator);
}
if ("showReactors".equalsIgnoreCase(cmd)) {
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
return response.sendResultSet(server.showReactors());
}
if ("showThreadPools".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("NAME", JDBCType.VARCHAR).addColumnInfo("POOL_SIZE", JDBCType.BIGINT).addColumnInfo("ACTIVE_COUNT", JDBCType.BIGINT).addColumnInfo("TASK_QUEUE_SIZE", JDBCType.BIGINT).addColumnInfo("COMPLETED_TASK", JDBCType.BIGINT).addColumnInfo("TOTAL_TASK", JDBCType.BIGINT);
return response.sendResultSet(() -> builder.build());
}
if ("showNativeBackends".equalsIgnoreCase(cmd)) {
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
return response.sendResultSet(server.showNativeBackends());
}
if ("showConnections".equalsIgnoreCase(cmd)) {
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
return response.sendResultSet(server.showConnections());
}
if ("showSchedules".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
ScheduledExecutorService timer = ScheduleUtil.getTimer();
String NAME = timer.toString();
boolean IS_TERMINATED = timer.isTerminated();
boolean IS_SHUTDOWN = timer.isShutdown();
int SCHEDULE_COUNT = ScheduleUtil.getScheduleCount();
builder.addColumnInfo("NAME", JDBCType.VARCHAR).addColumnInfo("IS_TERMINATED", JDBCType.VARCHAR).addColumnInfo("IS_SHUTDOWN", JDBCType.VARCHAR).addColumnInfo("SCHEDULE_COUNT", JDBCType.BIGINT);
builder.addObjectRowPayload(Arrays.asList(NAME, IS_TERMINATED, IS_SHUTDOWN, SCHEDULE_COUNT));
return response.sendResultSet(() -> builder.build());
}
if ("showBaselines".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
QueryPlanCache queryPlanCache = MetaClusterCurrent.wrapper(QueryPlanCache.class);
builder.addColumnInfo("BASELINE_ID", JDBCType.VARCHAR).addColumnInfo("PARAMETERIZED_SQL", JDBCType.VARCHAR).addColumnInfo("PLAN_ID", JDBCType.VARCHAR).addColumnInfo("EXTERNALIZED_PLAN", JDBCType.VARCHAR).addColumnInfo("FIXED", JDBCType.VARCHAR).addColumnInfo("ACCEPTED", JDBCType.VARCHAR);
for (Baseline baseline : queryPlanCache.list()) {
for (BaselinePlan baselinePlan : baseline.getPlanList()) {
String BASELINE_ID = String.valueOf(baselinePlan.getBaselineId());
String PARAMETERIZED_SQL = String.valueOf(baselinePlan.getSql());
String PLAN_ID = String.valueOf(baselinePlan.getId());
CodeExecuterContext attach = (CodeExecuterContext) baselinePlan.attach();
String EXTERNALIZED_PLAN = new PlanImpl(attach.getMycatRel(), attach, Collections.emptyList()).dumpPlan();
String FIXED = Optional.ofNullable(baseline.getFixPlan()).filter(i -> i.getId() == baselinePlan.getId()).map(u -> "true").orElse("false");
String ACCEPTED = "true";
builder.addObjectRowPayload(Arrays.asList(BASELINE_ID, PARAMETERIZED_SQL, PLAN_ID, EXTERNALIZED_PLAN, FIXED, ACCEPTED));
}
}
return response.sendResultSet(() -> builder.build());
}
if ("showConfigText".equalsIgnoreCase(cmd)) {
MycatRouterConfig mycatRouterConfig = MetaClusterCurrent.wrapper(MycatRouterConfig.class);
String text = JsonUtil.toJson(mycatRouterConfig);
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("CONFIG_TEXT", JDBCType.VARCHAR);
builder.addObjectRowPayload(Arrays.asList(text));
return response.sendResultSet(builder.build());
}
if ("setBkaJoin".equalsIgnoreCase(cmd)) {
DrdsSqlCompiler.RBO_BKA_JOIN = body.contains("1");
return response.sendOk();
}
if ("setSortMergeJoin".equalsIgnoreCase(cmd)) {
DrdsSqlCompiler.RBO_MERGE_JOIN = body.contains("1");
return response.sendOk();
}
if ("setAcceptConnect".equalsIgnoreCase(cmd)) {
boolean contains = body.contains("1");
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
if (!contains) {
server.stopAcceptConnect();
} else {
server.resumeAcceptConnect();
}
dataContext.setAffectedRows(1);
return response.sendOk();
}
if ("setReadyToCloseSQL".equalsIgnoreCase(cmd)) {
ReadyToCloseSQLHint readyToCloseSQLHint = JsonUtil.from(body, ReadyToCloseSQLHint.class);
String sql = readyToCloseSQLHint.getSql().trim();
MycatServer server = MetaClusterCurrent.wrapper(MycatServer.class);
server.setReadyToCloseSQL(sql);
dataContext.setAffectedRows(1);
return response.sendOk();
}
if ("setDebug".equalsIgnoreCase(cmd)) {
boolean contains = body.contains("1");
dataContext.setDebug(contains);
return response.sendOk();
}
if ("setVector".equalsIgnoreCase(cmd)) {
boolean contains = body.contains("1");
dataContext.setVector(contains);
return response.sendOk();
}
if ("is".equalsIgnoreCase(cmd)) {
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("value", JDBCType.VARCHAR);
if (body.contains("debug")) {
boolean debug = dataContext.isDebug();
DrdsSqlCompiler.DEBUG = debug;
builder.addObjectRowPayload(Arrays.asList(debug ? "1" : "0"));
}
return response.sendResultSet(builder.build());
}
if ("setBkaJoinLeftRowCountLimit".equalsIgnoreCase(cmd)) {
DrdsSqlCompiler.BKA_JOIN_LEFT_ROW_COUNT_LIMIT = Long.parseLong(body.substring(1, body.length() - 1));
return response.sendOk();
}
if ("baseline".equalsIgnoreCase(cmd)) {
Map<String, Object> map = JsonUtil.from(body, Map.class);
String command = Objects.requireNonNull(map.get("command")).toString().toLowerCase();
long value = Long.parseLong((map.getOrDefault("value", "0")).toString());
QueryPlanCache queryPlanCache = MetaClusterCurrent.wrapper(QueryPlanCache.class);
switch(command) {
case "showAllPlans":
{
ResultSetBuilder builder = ResultSetBuilder.create();
builder.addColumnInfo("BASELINE_ID", JDBCType.VARCHAR).addColumnInfo("PARAMETERIZED_SQL", JDBCType.VARCHAR).addColumnInfo("PLAN_ID", JDBCType.VARCHAR).addColumnInfo("EXTERNALIZED_PLAN", JDBCType.VARCHAR).addColumnInfo("FIXED", JDBCType.VARCHAR).addColumnInfo("ACCEPTED", JDBCType.VARCHAR);
for (Baseline baseline : queryPlanCache.list()) {
for (BaselinePlan baselinePlan : baseline.getPlanList()) {
String BASELINE_ID = String.valueOf(baselinePlan.getBaselineId());
String PARAMETERIZED_SQL = String.valueOf(baselinePlan.getSql());
String PLAN_ID = String.valueOf(baselinePlan.getId());
CodeExecuterContext attach = (CodeExecuterContext) baselinePlan.attach();
String EXTERNALIZED_PLAN = new PlanImpl(attach.getMycatRel(), attach, Collections.emptyList()).dumpPlan();
String FIXED = Optional.ofNullable(baseline.getFixPlan()).filter(i -> i.getId() == baselinePlan.getId()).map(u -> "true").orElse("false");
String ACCEPTED = "true";
builder.addObjectRowPayload(Arrays.asList(BASELINE_ID, PARAMETERIZED_SQL, PLAN_ID, EXTERNALIZED_PLAN, FIXED, ACCEPTED));
}
}
return response.sendResultSet(() -> builder.build());
}
case "persistAllBaselines":
{
queryPlanCache.saveBaselines();
return response.sendOk();
}
case "loadBaseline":
{
queryPlanCache.loadBaseline(value);
return response.sendOk();
}
case "loadPlan":
{
queryPlanCache.loadPlan(value);
return response.sendOk();
}
case "persistPlan":
{
queryPlanCache.persistPlan(value);
return response.sendOk();
}
case "clearBaseline":
{
queryPlanCache.clearBaseline(value);
return response.sendOk();
}
case "clearPlan":
{
queryPlanCache.clearPlan(value);
return response.sendOk();
}
case "deleteBaseline":
{
queryPlanCache.deleteBaseline(value);
return response.sendOk();
}
case "deletePlan":
{
queryPlanCache.deletePlan(value);
return response.sendOk();
}
case "add":
case "fix":
{
SQLStatement sqlStatement = null;
if (ast.getHintStatements() != null && ast.getHintStatements().size() == 1) {
sqlStatement = ast.getHintStatements().get(0);
DrdsSqlWithParams drdsSqlWithParams = DrdsRunnerHelper.preParse(sqlStatement, dataContext.getDefaultSchema());
queryPlanCache.add("fix".equalsIgnoreCase(command), drdsSqlWithParams);
}
return response.sendOk();
}
default:
throw new UnsupportedOperationException();
}
}
if ("MIGRATE_LIST".equalsIgnoreCase(cmd)) {
return response.sendResultSet(MigrateUtil.list());
}
if ("MIGRATE_STOP".equalsIgnoreCase(cmd)) {
MigrateStopHint hint = JsonUtil.from(body, MigrateStopHint.class);
if (MigrateUtil.stop(hint.getId())) {
dataContext.setAffectedRows(1);
}
return response.sendOk();
}
if ("MIGRATE".equalsIgnoreCase(cmd)) {
MigrateHint migrateHint = JsonUtil.from(body, MigrateHint.class);
String name = migrateHint.getName();
MigrateHint.Input input = migrateHint.getInput();
MigrateHint.Output output = migrateHint.getOutput();
int parallelism = output.getParallelism();
MigrateUtil.MigrateJdbcOutput migrateJdbcOutput = new MigrateUtil.MigrateJdbcOutput();
migrateJdbcOutput.setParallelism(parallelism);
migrateJdbcOutput.setBatch(output.getBatch());
List<MigrateUtil.MigrateJdbcInput> migrateJdbcInputs = new ArrayList<>();
List<Flowable<Object[]>> observables = new ArrayList<>();
MetadataManager manager = MetaClusterCurrent.wrapper(MetadataManager.class);
TableHandler outputTable = manager.getTable(output.getSchemaName(), output.getTableName());
String username = Optional.ofNullable(output.getUsername()).orElseGet(new Supplier<String>() {
@Override
public String get() {
UserConfig userConfig = routerConfig.getUsers().get(0);
String username = userConfig.getUsername();
String password = userConfig.getPassword();
return username;
}
});
String password = Optional.ofNullable(output.getUsername()).orElseGet(new Supplier<String>() {
@Override
public String get() {
UserConfig userConfig = routerConfig.getUsers().get(0);
String username = userConfig.getUsername();
String password = userConfig.getPassword();
return password;
}
});
String url = Optional.ofNullable(output.getUrl()).orElseGet(() -> {
ServerConfig serverConfig = MetaClusterCurrent.wrapper(ServerConfig.class);
String ip = serverConfig.getIp();
int port = serverConfig.getPort();
return "jdbc:mysql://" + ip + ":" + port + "/mysql?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true";
});
if (input.getUrl() != null) {
String sql = input.getSql();
long count = input.getCount();
Map<String, String> properties = input.getProperties();
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInput.setCount(count);
observables.add(MigrateUtil.read(migrateJdbcInput, input.getUrl(), input.getUsername(), input.getPassword(), sql));
} else if (input.getType() == null || "mycat".equalsIgnoreCase(input.getType())) {
TableHandler inputTable = manager.getTable(input.getSchemaName(), input.getTableName());
switch(inputTable.getType()) {
case SHARDING:
{
ShardingTable shardingTable = (ShardingTable) inputTable;
for (Partition backend : shardingTable.getBackends()) {
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInputs.add(migrateJdbcInput);
observables.add(MigrateUtil.read(migrateJdbcInput, backend));
}
break;
}
case GLOBAL:
{
GlobalTable globalTable = (GlobalTable) inputTable;
Partition partition = globalTable.getGlobalDataNode().get(0);
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInputs.add(migrateJdbcInput);
observables.add(MigrateUtil.read(migrateJdbcInput, partition));
break;
}
case NORMAL:
{
NormalTable normalTable = (NormalTable) inputTable;
Partition partition = normalTable.getDataNode();
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInputs.add(migrateJdbcInput);
observables.add(MigrateUtil.read(migrateJdbcInput, partition));
break;
}
case VISUAL:
case VIEW:
case CUSTOM:
MigrateUtil.MigrateJdbcInput migrateJdbcInput = new MigrateUtil.MigrateJdbcInput();
migrateJdbcInputs.add(migrateJdbcInput);
observables.add(MigrateUtil.read(migrateJdbcInput, input.getTableName(), input.getSchemaName(), url, username, password));
break;
default:
throw new IllegalStateException("Unexpected value: " + inputTable.getType());
}
} else {
throw new UnsupportedOperationException();
}
String outputSchemaName = outputTable.getSchemaName();
String outputTableName = outputTable.getTableName();
String insertTemplate = getMySQLInsertTemplate(outputTable);
migrateJdbcOutput.setUsername(username);
migrateJdbcOutput.setPassword(password);
migrateJdbcOutput.setUrl(url);
migrateJdbcOutput.setInsertTemplate(insertTemplate);
MigrateUtil.MigrateController migrateController = MigrateUtil.write(migrateJdbcOutput, Flowable.merge(observables.stream().map(i -> i.buffer(output.getBatch()).subscribeOn(Schedulers.io())).collect(Collectors.toList())));
MigrateUtil.MigrateScheduler scheduler = MigrateUtil.register(name, migrateJdbcInputs, migrateJdbcOutput, migrateController);
return response.sendResultSet(MigrateUtil.show(scheduler));
}
if ("BINLOG_LIST".equalsIgnoreCase(cmd)) {
return response.sendResultSet(BinlogUtil.list());
}
if ("BINLOG_STOP".equalsIgnoreCase(cmd)) {
BinlogStopHint hint = JsonUtil.from(body, BinlogStopHint.class);
if (BinlogUtil.stop(hint.getId())) {
dataContext.setAffectedRows(1);
}
return response.sendOk();
}
if ("BINLOG".equalsIgnoreCase(cmd)) {
BinlogHint binlogHint = JsonUtil.from(body, BinlogHint.class);
Objects.requireNonNull(binlogHint.getInputTableNames());
List<String> outputTableNames = binlogHint.getOutputTableNames();
if (outputTableNames == null) {
binlogHint.setInputTableNames(binlogHint.getInputTableNames());
}
IdentityHashMap<TableHandler, TableHandler> map = new IdentityHashMap<>();
List<TableHandler> inputs = new ArrayList<>();
List<TableHandler> outputs = new ArrayList<>();
for (String inputTableName : binlogHint.getInputTableNames()) {
String[] split = inputTableName.split(".");
String schemaName = SQLUtils.normalize(split[0]);
String tableName = SQLUtils.normalize(split[1]);
TableHandler inputTable = metadataManager.getTable(schemaName, tableName);
inputs.add(inputTable);
}
for (String outputTableName : binlogHint.getOutputTableNames()) {
String[] split = outputTableName.split(".");
String schemaName = SQLUtils.normalize(split[0]);
String tableName = SQLUtils.normalize(split[1]);
TableHandler outputTable = metadataManager.getTable(schemaName, tableName);
outputs.add(outputTable);
}
for (int i = 0; i < inputs.size(); i++) {
map.put(inputs.get(i), outputs.get(i));
}
Map<String, Map<String, List<Partition>>> infoCollector = new HashMap<>();
List<MigrateUtil.MigrateController> migrateControllers = new ArrayList<>();
Set<Map.Entry<TableHandler, TableHandler>> entries = map.entrySet();
for (Map.Entry<TableHandler, TableHandler> entry : entries) {
ServerConfig serverConfig = MetaClusterCurrent.wrapper(ServerConfig.class);
TableHandler inputTable = entry.getKey();
TableHandler outputTable = entry.getValue();
UserConfig userConfig = routerConfig.getUsers().get(0);
String username = userConfig.getUsername();
String password = userConfig.getPassword();
String ip = serverConfig.getIp();
int port = serverConfig.getPort();
String url = "jdbc:mysql://" + ip + ":" + port + "/mysql?useUnicode=true&characterEncoding=utf8&useSSL=false&serverTimezone=Asia/Shanghai&allowPublicKeyRetrieval=true";
// String insertTemplate = getMySQLInsertTemplate(outputTable);
MigrateUtil.MigrateJdbcAnyOutput output = new MigrateUtil.MigrateJdbcAnyOutput();
output.setUrl(url);
output.setUsername(username);
output.setPassword(password);
List<Partition> partitions = new ArrayList<>();
switch(inputTable.getType()) {
case SHARDING:
{
ShardingTable shardingTable = (ShardingTable) inputTable;
partitions = shardingTable.getShardingFuntion().calculate(Collections.emptyMap());
break;
}
case GLOBAL:
{
GlobalTable globalTable = (GlobalTable) inputTable;
partitions = ImmutableList.of(globalTable.getGlobalDataNode().get(0));
break;
}
case NORMAL:
{
NormalTable normalTable = (NormalTable) inputTable;
partitions = ImmutableList.of(normalTable.getDataNode());
break;
}
case CUSTOM:
case VISUAL:
case VIEW:
throw new UnsupportedOperationException();
}
ReplicaSelectorManager replicaSelectorManager = MetaClusterCurrent.wrapper(ReplicaSelectorManager.class);
Map<String, List<Partition>> listMap = partitions.stream().collect(Collectors.groupingBy(partition -> replicaSelectorManager.getDatasourceNameByReplicaName(partition.getTargetName(), true, null)));
infoCollector.put(inputTable.getUniqueName(), listMap);
List<Flowable<BinlogUtil.ParamSQL>> flowables = new ArrayList<>();
for (Map.Entry<String, List<Partition>> e : listMap.entrySet()) {
flowables.add(BinlogUtil.observe(e.getKey(), e.getValue()).subscribeOn(Schedulers.io()));
}
Flowable<BinlogUtil.ParamSQL> merge = flowables.size() == 1 ? flowables.get(0) : Flowable.merge(flowables, flowables.size());
merge = merge.map(paramSQL -> {
SQLStatement sqlStatement = SQLUtils.parseSingleMysqlStatement(paramSQL.getSql());
SQLExprTableSource sqlExprTableSource = VertxUpdateExecuter.getTableSource(sqlStatement);
MycatSQLExprTableSourceUtil.setSqlExprTableSource(outputTable.getSchemaName(), outputTable.getTableName(), sqlExprTableSource);
paramSQL.setSql(sqlStatement.toString());
return paramSQL;
});
MigrateUtil.MigrateController migrateController = MigrateUtil.writeSql(output, merge);
migrateControllers.add(migrateController);
}
BinlogUtil.BinlogScheduler scheduler = BinlogUtil.BinlogScheduler.of(UUID.randomUUID().toString(), binlogHint.getName(), infoCollector, migrateControllers);
BinlogUtil.register(scheduler);
return response.sendResultSet(BinlogUtil.list(Collections.singletonList(scheduler)));
}
mycatDmlHandler(cmd, body);
return response.sendOk();
}
}
return response.sendOk();
} catch (Throwable throwable) {
return response.sendError(throwable);
}
}
use of io.mycat.replica.heartbeat.HeartbeatFlow in project Mycat2 by MyCATApache.
the class HintHandler method showHeatbeatStat.
public static RowBaseIterator showHeatbeatStat() {
ResultSetBuilder resultSetBuilder = ResultSetBuilder.create();
resultSetBuilder.addColumnInfo("NAME", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("TYPE", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("READABLE", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("SESSION_COUNT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("WEIGHT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("ALIVE", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("MASTER", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("LIMIT_SESSION_COUNT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("REPLICA", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("SLAVE_THRESHOLD", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("IS_HEARTBEAT_TIMEOUT", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("HB_ERROR_COUNT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("HB_LAST_SWITCH_TIME", JDBCType.TIMESTAMP);
resultSetBuilder.addColumnInfo("HB_MAX_RETRY", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("IS_CHECKING", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("MIN_SWITCH_TIME_INTERVAL", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("HEARTBEAT_TIMEOUT", JDBCType.BIGINT);
resultSetBuilder.addColumnInfo("SYNC_DS_STATUS", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("HB_DS_STATUS", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("IS_SLAVE_BEHIND_MASTER", JDBCType.VARCHAR);
resultSetBuilder.addColumnInfo("LAST_SEND_QUERY_TIME", JDBCType.TIMESTAMP);
resultSetBuilder.addColumnInfo("LAST_RECEIVED_QUERY_TIME", JDBCType.TIMESTAMP);
if (MetaClusterCurrent.exist(MycatRouterConfig.class) && MetaClusterCurrent.exist(ReplicaSelectorManager.class)) {
MycatRouterConfig routerConfig = MetaClusterCurrent.wrapper(MycatRouterConfig.class);
ReplicaSelectorManager replicaSelectorRuntime = MetaClusterCurrent.wrapper(ReplicaSelectorManager.class);
Map<String, DatasourceConfig> dataSourceConfig = routerConfig.getDatasources().stream().collect(Collectors.toMap(k -> k.getName(), v -> v));
for (HeartbeatFlow heartbeatFlow : replicaSelectorRuntime.getHeartbeatDetectorMap().values()) {
PhysicsInstance instance = heartbeatFlow.instance();
String NAME = instance.getName();
String TYPE = instance.getType().name();
boolean READABLE = instance.asSelectRead();
int SESSION_COUNT = instance.getSessionCounter();
int WEIGHT = instance.getWeight();
boolean ALIVE = instance.isAlive();
boolean MASTER = instance.isMaster();
double SLAVE_THRESHOLD = heartbeatFlow.getSlaveThreshold();
boolean IS_HEARTBEAT_TIMEOUT = heartbeatFlow.isHeartbeatTimeout();
final HeartBeatStatus HEART_BEAT_STATUS = heartbeatFlow.getHbStatus();
int HB_ERROR_COUNT = HEART_BEAT_STATUS.getErrorCount();
LocalDateTime HB_LAST_SWITCH_TIME = new Timestamp(HEART_BEAT_STATUS.getLastSwitchTime()).toLocalDateTime();
int HB_MAX_RETRY = HEART_BEAT_STATUS.getMaxRetry();
boolean IS_CHECKING = HEART_BEAT_STATUS.isChecking();
long MIN_SWITCH_TIME_INTERVAL = HEART_BEAT_STATUS.getMinSwitchTimeInterval();
final long HEARTBEAT_TIMEOUT = (heartbeatFlow.getHeartbeatTimeout());
DatasourceStatus DS_STATUS_OBJECT = heartbeatFlow.getDsStatus();
String SYNC_DS_STATUS = DS_STATUS_OBJECT.getDbSynStatus().name();
String HB_DS_STATUS = DS_STATUS_OBJECT.getStatus().name();
boolean IS_SLAVE_BEHIND_MASTER = DS_STATUS_OBJECT.isSlaveBehindMaster();
LocalDateTime LAST_SEND_QUERY_TIME = new Timestamp(heartbeatFlow.getLastSendQryTime()).toLocalDateTime();
LocalDateTime LAST_RECEIVED_QUERY_TIME = new Timestamp(heartbeatFlow.getLastReceivedQryTime()).toLocalDateTime();
Optional<DatasourceConfig> e = Optional.ofNullable(dataSourceConfig.get(NAME));
String replicaDataSourceSelectorList = String.join(",", replicaSelectorRuntime.getReplicaNameListByInstanceName(NAME));
resultSetBuilder.addObjectRowPayload(Arrays.asList(NAME, TYPE, READABLE, SESSION_COUNT, WEIGHT, ALIVE, MASTER, e.map(i -> i.getMaxCon()).orElse(-1), replicaDataSourceSelectorList, SLAVE_THRESHOLD, IS_HEARTBEAT_TIMEOUT, HB_ERROR_COUNT, HB_LAST_SWITCH_TIME, HB_MAX_RETRY, IS_CHECKING, MIN_SWITCH_TIME_INTERVAL, HEARTBEAT_TIMEOUT, SYNC_DS_STATUS, HB_DS_STATUS, IS_SLAVE_BEHIND_MASTER, LAST_SEND_QUERY_TIME, LAST_RECEIVED_QUERY_TIME));
}
}
RowBaseIterator rowBaseIterator = resultSetBuilder.build();
return rowBaseIterator;
}
Aggregations