Search in sources :

Example 1 with SchemaConfig

use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.

the class ClearSlow method schema.

public static void schema(ManagerConnection c, String name) {
    MycatConfig conf = MycatServer.getInstance().getConfig();
    SchemaConfig schema = conf.getSchemas().get(name);
    if (schema != null) {
        //            Map<String, MySQLDataNode> dataNodes = conf.getDataNodes();
        //            for (String n : schema.getAllDataNodes()) {
        //                MySQLDataNode dn = dataNodes.get(n);
        //                MySQLDataSource ds = null;
        //                if (dn != null && (ds = dn.getSource()) != null) {
        //                    ds.getSqlRecorder().clear();
        //                }
        //            }
        c.write(c.writeToBuffer(OkPacket.OK, c.allocate()));
    } else {
        c.writeErrMessage(ErrorCode.ER_YES, "Invalid Schema:" + name);
    }
}
Also used : SchemaConfig(io.mycat.config.model.SchemaConfig) MycatConfig(io.mycat.config.MycatConfig)

Example 2 with SchemaConfig

use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.

the class MySQLTableStructureDetector method run.

@Override
public void run() {
    for (SchemaConfig schema : MycatServer.getInstance().getConfig().getSchemas().values()) {
        for (TableConfig table : schema.getTables().values()) {
            for (String dataNode : table.getDataNodes()) {
                try {
                    table.getReentrantReadWriteLock().writeLock().lock();
                    ConcurrentHashMap<String, List<String>> map = new ConcurrentHashMap<>();
                    table.setDataNodeTableStructureSQLMap(map);
                } finally {
                    table.getReentrantReadWriteLock().writeLock().unlock();
                }
                OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(MYSQL_SHOW_CREATE_TABLE_COLMS, new MySQLTableStructureListener(dataNode, table));
                resultHandler.setMark("Table Structure");
                PhysicalDBNode dn = MycatServer.getInstance().getConfig().getDataNodes().get(dataNode);
                SQLJob sqlJob = new SQLJob(sqlPrefix + table.getName(), dn.getDatabase(), resultHandler, dn.getDbPool().getSource());
                sqlJob.run();
            }
        }
    }
}
Also used : OneRawSQLQueryResultHandler(io.mycat.sqlengine.OneRawSQLQueryResultHandler) PhysicalDBNode(io.mycat.backend.datasource.PhysicalDBNode) SchemaConfig(io.mycat.config.model.SchemaConfig) SQLJob(io.mycat.sqlengine.SQLJob) TableConfig(io.mycat.config.model.TableConfig) List(java.util.List) LinkedList(java.util.LinkedList) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 3 with SchemaConfig

use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.

the class ShowDatabase method execute.

public static void execute(ManagerConnection c) {
    ByteBuffer buffer = c.allocate();
    // write header
    buffer = header.write(buffer, c, true);
    // write fields
    for (FieldPacket field : fields) {
        buffer = field.write(buffer, c, true);
    }
    // write eof
    buffer = eof.write(buffer, c, true);
    // write rows
    byte packetId = eof.packetId;
    Map<String, SchemaConfig> schemas = MycatServer.getInstance().getConfig().getSchemas();
    for (String name : new TreeSet<String>(schemas.keySet())) {
        RowDataPacket row = new RowDataPacket(FIELD_COUNT);
        row.add(StringUtil.encode(name, c.getCharset()));
        row.packetId = ++packetId;
        buffer = row.write(buffer, c, true);
    }
    // write lastEof
    EOFPacket lastEof = new EOFPacket();
    lastEof.packetId = ++packetId;
    buffer = lastEof.write(buffer, c, true);
    // write buffer
    c.write(buffer);
}
Also used : SchemaConfig(io.mycat.config.model.SchemaConfig) TreeSet(java.util.TreeSet) RowDataPacket(io.mycat.net.mysql.RowDataPacket) EOFPacket(io.mycat.net.mysql.EOFPacket) ByteBuffer(java.nio.ByteBuffer) FieldPacket(io.mycat.net.mysql.FieldPacket)

Example 4 with SchemaConfig

use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.

the class RollbackConfig method rollback.

private static boolean rollback() {
    MycatConfig conf = MycatServer.getInstance().getConfig();
    Map<String, UserConfig> users = conf.getBackupUsers();
    Map<String, SchemaConfig> schemas = conf.getBackupSchemas();
    Map<String, PhysicalDBNode> dataNodes = conf.getBackupDataNodes();
    Map<String, PhysicalDBPool> dataHosts = conf.getBackupDataHosts();
    MycatCluster cluster = conf.getBackupCluster();
    FirewallConfig firewall = conf.getBackupFirewall();
    // 检查可回滚状态
    if (!conf.canRollback()) {
        return false;
    }
    // 如果回滚已经存在的pool
    boolean rollbackStatus = true;
    Map<String, PhysicalDBPool> cNodes = conf.getDataHosts();
    for (PhysicalDBPool dn : dataHosts.values()) {
        dn.init(dn.getActivedIndex());
        if (!dn.isInitSuccess()) {
            rollbackStatus = false;
            break;
        }
    }
    // 如果回滚不成功,则清理已初始化的资源。
    if (!rollbackStatus) {
        for (PhysicalDBPool dn : dataHosts.values()) {
            dn.clearDataSources("rollbackup config");
            dn.stopHeartbeat();
        }
        return false;
    }
    // 应用回滚
    conf.rollback(users, schemas, dataNodes, dataHosts, cluster, firewall);
    // 处理旧的资源
    for (PhysicalDBPool dn : cNodes.values()) {
        dn.clearDataSources("clear old config ");
        dn.stopHeartbeat();
    }
    //清理缓存
    MycatServer.getInstance().getCacheService().clearCache();
    return true;
}
Also used : PhysicalDBNode(io.mycat.backend.datasource.PhysicalDBNode) SchemaConfig(io.mycat.config.model.SchemaConfig) MycatCluster(io.mycat.config.MycatCluster) PhysicalDBPool(io.mycat.backend.datasource.PhysicalDBPool) MycatConfig(io.mycat.config.MycatConfig) UserConfig(io.mycat.config.model.UserConfig) FirewallConfig(io.mycat.config.model.FirewallConfig)

Example 5 with SchemaConfig

use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.

the class MigrateHandler method handle.

public static void handle(String stmt, ServerConnection c) {
    Map<String, String> map = parse(stmt);
    String table = map.get("table");
    String add = map.get("add");
    if (table == null) {
        writeErrMessage(c, "table cannot be null");
        return;
    }
    if (add == null) {
        writeErrMessage(c, "add cannot be null");
        return;
    }
    String taskID = getUUID();
    try {
        SchemaConfig schemaConfig = MycatServer.getInstance().getConfig().getSchemas().get(c.getSchema());
        TableConfig tableConfig = schemaConfig.getTables().get(table.toUpperCase());
        AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm();
        if (!(algorithm instanceof PartitionByCRC32PreSlot)) {
            writeErrMessage(c, "table: " + table + " rule is not be PartitionByCRC32PreSlot");
            return;
        }
        Map<Integer, List<Range>> integerListMap = ((PartitionByCRC32PreSlot) algorithm).getRangeMap();
        integerListMap = (Map<Integer, List<Range>>) ObjectUtil.copyObject(integerListMap);
        ArrayList<String> oldDataNodes = tableConfig.getDataNodes();
        List<String> newDataNodes = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(add);
        Map<String, List<MigrateTask>> tasks = MigrateUtils.balanceExpand(table, integerListMap, oldDataNodes, newDataNodes, PartitionByCRC32PreSlot.DEFAULT_SLOTS_NUM);
        CuratorTransactionFinal transactionFinal = null;
        String taskBase = ZKUtils.getZKBasePath() + "migrate/" + c.getSchema();
        String taskPath = taskBase + "/" + taskID;
        CuratorFramework client = ZKUtils.getConnection();
        //校验 之前同一个表的迁移任务未完成,则jzhi禁止继续
        if (client.checkExists().forPath(taskBase) != null) {
            List<String> childTaskList = client.getChildren().forPath(taskBase);
            for (String child : childTaskList) {
                TaskNode taskNode = JSON.parseObject(ZKUtils.getConnection().getData().forPath(taskBase + "/" + child), TaskNode.class);
                if (taskNode.getSchema().equalsIgnoreCase(c.getSchema()) && table.equalsIgnoreCase(taskNode.getTable()) && taskNode.getStatus() < 5) {
                    writeErrMessage(c, "table: " + table + " previous migrate task is still running,on the same time one table only one task");
                    return;
                }
            }
        }
        client.create().creatingParentsIfNeeded().forPath(taskPath);
        TaskNode taskNode = new TaskNode();
        taskNode.setSchema(c.getSchema());
        taskNode.setSql(stmt);
        taskNode.setTable(table);
        taskNode.setAdd(add);
        taskNode.setStatus(0);
        Map<String, Integer> fromNodeSlaveIdMap = new HashMap<>();
        List<MigrateTask> allTaskList = new ArrayList<>();
        for (Map.Entry<String, List<MigrateTask>> entry : tasks.entrySet()) {
            String key = entry.getKey();
            List<MigrateTask> value = entry.getValue();
            for (MigrateTask migrateTask : value) {
                migrateTask.setSchema(c.getSchema());
                //分配slaveid只需要一个dataHost分配一个即可,后续任务执行模拟从节点只需要一个dataHost一个
                String dataHost = getDataHostNameFromNode(migrateTask.getFrom());
                if (fromNodeSlaveIdMap.containsKey(dataHost)) {
                    migrateTask.setSlaveId(fromNodeSlaveIdMap.get(dataHost));
                } else {
                    migrateTask.setSlaveId(getSlaveIdFromZKForDataNode(migrateTask.getFrom()));
                    fromNodeSlaveIdMap.put(dataHost, migrateTask.getSlaveId());
                }
            }
            allTaskList.addAll(value);
        }
        transactionFinal = client.inTransaction().setData().forPath(taskPath, JSON.toJSONBytes(taskNode)).and();
        //合并成dataHost级别任务
        Map<String, List<MigrateTask>> dataHostMigrateMap = mergerTaskForDataHost(allTaskList);
        for (Map.Entry<String, List<MigrateTask>> entry : dataHostMigrateMap.entrySet()) {
            String key = entry.getKey();
            List<MigrateTask> value = entry.getValue();
            String path = taskPath + "/" + key;
            transactionFinal = transactionFinal.create().forPath(path, JSON.toJSONBytes(value)).and();
        }
        transactionFinal.commit();
    } catch (Exception e) {
        LOGGER.error("migrate error", e);
        writeErrMessage(c, "migrate error:" + e);
        return;
    }
    writePackToClient(c, taskID);
    LOGGER.info("task start", new Date());
}
Also used : AbstractPartitionAlgorithm(io.mycat.route.function.AbstractPartitionAlgorithm) TaskNode(io.mycat.migrate.TaskNode) SchemaConfig(io.mycat.config.model.SchemaConfig) MigrateTask(io.mycat.migrate.MigrateTask) CuratorFramework(org.apache.curator.framework.CuratorFramework) CuratorTransactionFinal(org.apache.curator.framework.api.transaction.CuratorTransactionFinal) TableConfig(io.mycat.config.model.TableConfig) PartitionByCRC32PreSlot(io.mycat.route.function.PartitionByCRC32PreSlot)

Aggregations

SchemaConfig (io.mycat.config.model.SchemaConfig)79 SystemConfig (io.mycat.config.model.SystemConfig)54 RouteResultset (io.mycat.route.RouteResultset)34 Test (org.junit.Test)31 TableConfig (io.mycat.config.model.TableConfig)12 MycatConfig (io.mycat.config.MycatConfig)9 RouteResultsetNode (io.mycat.route.RouteResultsetNode)7 SQLNonTransientException (java.sql.SQLNonTransientException)7 PhysicalDBNode (io.mycat.backend.datasource.PhysicalDBNode)6 RowDataPacket (io.mycat.net.mysql.RowDataPacket)6 SQLStatement (com.alibaba.druid.sql.ast.SQLStatement)5 MySqlStatementParser (com.alibaba.druid.sql.dialect.mysql.parser.MySqlStatementParser)5 CacheService (io.mycat.cache.CacheService)5 MycatCluster (io.mycat.config.MycatCluster)5 UserConfig (io.mycat.config.model.UserConfig)5 ArrayList (java.util.ArrayList)5 NoSuchElementException (java.util.NoSuchElementException)5 EOFPacket (io.mycat.net.mysql.EOFPacket)4 FieldPacket (io.mycat.net.mysql.FieldPacket)4 RouteService (io.mycat.route.RouteService)4