use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.
the class ClearSlow method schema.
public static void schema(ManagerConnection c, String name) {
MycatConfig conf = MycatServer.getInstance().getConfig();
SchemaConfig schema = conf.getSchemas().get(name);
if (schema != null) {
// Map<String, MySQLDataNode> dataNodes = conf.getDataNodes();
// for (String n : schema.getAllDataNodes()) {
// MySQLDataNode dn = dataNodes.get(n);
// MySQLDataSource ds = null;
// if (dn != null && (ds = dn.getSource()) != null) {
// ds.getSqlRecorder().clear();
// }
// }
c.write(c.writeToBuffer(OkPacket.OK, c.allocate()));
} else {
c.writeErrMessage(ErrorCode.ER_YES, "Invalid Schema:" + name);
}
}
use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.
the class MySQLTableStructureDetector method run.
@Override
public void run() {
for (SchemaConfig schema : MycatServer.getInstance().getConfig().getSchemas().values()) {
for (TableConfig table : schema.getTables().values()) {
for (String dataNode : table.getDataNodes()) {
try {
table.getReentrantReadWriteLock().writeLock().lock();
ConcurrentHashMap<String, List<String>> map = new ConcurrentHashMap<>();
table.setDataNodeTableStructureSQLMap(map);
} finally {
table.getReentrantReadWriteLock().writeLock().unlock();
}
OneRawSQLQueryResultHandler resultHandler = new OneRawSQLQueryResultHandler(MYSQL_SHOW_CREATE_TABLE_COLMS, new MySQLTableStructureListener(dataNode, table));
resultHandler.setMark("Table Structure");
PhysicalDBNode dn = MycatServer.getInstance().getConfig().getDataNodes().get(dataNode);
SQLJob sqlJob = new SQLJob(sqlPrefix + table.getName(), dn.getDatabase(), resultHandler, dn.getDbPool().getSource());
sqlJob.run();
}
}
}
}
use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.
the class ShowDatabase method execute.
public static void execute(ManagerConnection c) {
ByteBuffer buffer = c.allocate();
// write header
buffer = header.write(buffer, c, true);
// write fields
for (FieldPacket field : fields) {
buffer = field.write(buffer, c, true);
}
// write eof
buffer = eof.write(buffer, c, true);
// write rows
byte packetId = eof.packetId;
Map<String, SchemaConfig> schemas = MycatServer.getInstance().getConfig().getSchemas();
for (String name : new TreeSet<String>(schemas.keySet())) {
RowDataPacket row = new RowDataPacket(FIELD_COUNT);
row.add(StringUtil.encode(name, c.getCharset()));
row.packetId = ++packetId;
buffer = row.write(buffer, c, true);
}
// write lastEof
EOFPacket lastEof = new EOFPacket();
lastEof.packetId = ++packetId;
buffer = lastEof.write(buffer, c, true);
// write buffer
c.write(buffer);
}
use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.
the class RollbackConfig method rollback.
private static boolean rollback() {
MycatConfig conf = MycatServer.getInstance().getConfig();
Map<String, UserConfig> users = conf.getBackupUsers();
Map<String, SchemaConfig> schemas = conf.getBackupSchemas();
Map<String, PhysicalDBNode> dataNodes = conf.getBackupDataNodes();
Map<String, PhysicalDBPool> dataHosts = conf.getBackupDataHosts();
MycatCluster cluster = conf.getBackupCluster();
FirewallConfig firewall = conf.getBackupFirewall();
// 检查可回滚状态
if (!conf.canRollback()) {
return false;
}
// 如果回滚已经存在的pool
boolean rollbackStatus = true;
Map<String, PhysicalDBPool> cNodes = conf.getDataHosts();
for (PhysicalDBPool dn : dataHosts.values()) {
dn.init(dn.getActivedIndex());
if (!dn.isInitSuccess()) {
rollbackStatus = false;
break;
}
}
// 如果回滚不成功,则清理已初始化的资源。
if (!rollbackStatus) {
for (PhysicalDBPool dn : dataHosts.values()) {
dn.clearDataSources("rollbackup config");
dn.stopHeartbeat();
}
return false;
}
// 应用回滚
conf.rollback(users, schemas, dataNodes, dataHosts, cluster, firewall);
// 处理旧的资源
for (PhysicalDBPool dn : cNodes.values()) {
dn.clearDataSources("clear old config ");
dn.stopHeartbeat();
}
//清理缓存
MycatServer.getInstance().getCacheService().clearCache();
return true;
}
use of io.mycat.config.model.SchemaConfig in project Mycat-Server by MyCATApache.
the class MigrateHandler method handle.
public static void handle(String stmt, ServerConnection c) {
Map<String, String> map = parse(stmt);
String table = map.get("table");
String add = map.get("add");
if (table == null) {
writeErrMessage(c, "table cannot be null");
return;
}
if (add == null) {
writeErrMessage(c, "add cannot be null");
return;
}
String taskID = getUUID();
try {
SchemaConfig schemaConfig = MycatServer.getInstance().getConfig().getSchemas().get(c.getSchema());
TableConfig tableConfig = schemaConfig.getTables().get(table.toUpperCase());
AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm();
if (!(algorithm instanceof PartitionByCRC32PreSlot)) {
writeErrMessage(c, "table: " + table + " rule is not be PartitionByCRC32PreSlot");
return;
}
Map<Integer, List<Range>> integerListMap = ((PartitionByCRC32PreSlot) algorithm).getRangeMap();
integerListMap = (Map<Integer, List<Range>>) ObjectUtil.copyObject(integerListMap);
ArrayList<String> oldDataNodes = tableConfig.getDataNodes();
List<String> newDataNodes = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(add);
Map<String, List<MigrateTask>> tasks = MigrateUtils.balanceExpand(table, integerListMap, oldDataNodes, newDataNodes, PartitionByCRC32PreSlot.DEFAULT_SLOTS_NUM);
CuratorTransactionFinal transactionFinal = null;
String taskBase = ZKUtils.getZKBasePath() + "migrate/" + c.getSchema();
String taskPath = taskBase + "/" + taskID;
CuratorFramework client = ZKUtils.getConnection();
//校验 之前同一个表的迁移任务未完成,则jzhi禁止继续
if (client.checkExists().forPath(taskBase) != null) {
List<String> childTaskList = client.getChildren().forPath(taskBase);
for (String child : childTaskList) {
TaskNode taskNode = JSON.parseObject(ZKUtils.getConnection().getData().forPath(taskBase + "/" + child), TaskNode.class);
if (taskNode.getSchema().equalsIgnoreCase(c.getSchema()) && table.equalsIgnoreCase(taskNode.getTable()) && taskNode.getStatus() < 5) {
writeErrMessage(c, "table: " + table + " previous migrate task is still running,on the same time one table only one task");
return;
}
}
}
client.create().creatingParentsIfNeeded().forPath(taskPath);
TaskNode taskNode = new TaskNode();
taskNode.setSchema(c.getSchema());
taskNode.setSql(stmt);
taskNode.setTable(table);
taskNode.setAdd(add);
taskNode.setStatus(0);
Map<String, Integer> fromNodeSlaveIdMap = new HashMap<>();
List<MigrateTask> allTaskList = new ArrayList<>();
for (Map.Entry<String, List<MigrateTask>> entry : tasks.entrySet()) {
String key = entry.getKey();
List<MigrateTask> value = entry.getValue();
for (MigrateTask migrateTask : value) {
migrateTask.setSchema(c.getSchema());
//分配slaveid只需要一个dataHost分配一个即可,后续任务执行模拟从节点只需要一个dataHost一个
String dataHost = getDataHostNameFromNode(migrateTask.getFrom());
if (fromNodeSlaveIdMap.containsKey(dataHost)) {
migrateTask.setSlaveId(fromNodeSlaveIdMap.get(dataHost));
} else {
migrateTask.setSlaveId(getSlaveIdFromZKForDataNode(migrateTask.getFrom()));
fromNodeSlaveIdMap.put(dataHost, migrateTask.getSlaveId());
}
}
allTaskList.addAll(value);
}
transactionFinal = client.inTransaction().setData().forPath(taskPath, JSON.toJSONBytes(taskNode)).and();
//合并成dataHost级别任务
Map<String, List<MigrateTask>> dataHostMigrateMap = mergerTaskForDataHost(allTaskList);
for (Map.Entry<String, List<MigrateTask>> entry : dataHostMigrateMap.entrySet()) {
String key = entry.getKey();
List<MigrateTask> value = entry.getValue();
String path = taskPath + "/" + key;
transactionFinal = transactionFinal.create().forPath(path, JSON.toJSONBytes(value)).and();
}
transactionFinal.commit();
} catch (Exception e) {
LOGGER.error("migrate error", e);
writeErrMessage(c, "migrate error:" + e);
return;
}
writePackToClient(c, taskID);
LOGGER.info("task start", new Date());
}
Aggregations