use of io.mycat.config.model.TableConfig in project Mycat_plus by coderczp.
the class ExplainHandler method isMycatSeq.
private static boolean isMycatSeq(String stmt, SchemaConfig schema) {
if (pattern.matcher(stmt).find()) {
return true;
}
SQLStatementParser parser = new MySqlStatementParser(stmt);
MySqlInsertStatement statement = (MySqlInsertStatement) parser.parseStatement();
String tableName = statement.getTableName().getSimpleName();
TableConfig tableConfig = schema.getTables().get(tableName.toUpperCase());
if (tableConfig == null) {
return false;
}
if (tableConfig.isAutoIncrement()) {
boolean isHasIdInSql = false;
String primaryKey = tableConfig.getPrimaryKey();
List<SQLExpr> columns = statement.getColumns();
for (SQLExpr column : columns) {
String columnName = column.toString();
if (primaryKey.equalsIgnoreCase(columnName)) {
isHasIdInSql = true;
break;
}
}
if (!isHasIdInSql) {
return true;
}
}
return false;
}
use of io.mycat.config.model.TableConfig in project Mycat_plus by coderczp.
the class MigrateHandler method handle.
public static void handle(String stmt, ServerConnection c) {
Map<String, String> map = parse(stmt);
String table = map.get("table");
String add = map.get("add");
if (table == null) {
writeErrMessage(c, "table cannot be null");
return;
}
if (add == null) {
writeErrMessage(c, "add cannot be null");
return;
}
String taskID = getUUID();
try {
SchemaConfig schemaConfig = MycatServer.getInstance().getConfig().getSchemas().get(c.getSchema());
TableConfig tableConfig = schemaConfig.getTables().get(table.toUpperCase());
AbstractPartitionAlgorithm algorithm = tableConfig.getRule().getRuleAlgorithm();
if (!(algorithm instanceof PartitionByCRC32PreSlot)) {
writeErrMessage(c, "table: " + table + " rule is not be PartitionByCRC32PreSlot");
return;
}
Map<Integer, List<Range>> integerListMap = ((PartitionByCRC32PreSlot) algorithm).getRangeMap();
integerListMap = (Map<Integer, List<Range>>) ObjectUtil.copyObject(integerListMap);
ArrayList<String> oldDataNodes = tableConfig.getDataNodes();
List<String> newDataNodes = Splitter.on(",").omitEmptyStrings().trimResults().splitToList(add);
Map<String, List<MigrateTask>> tasks = MigrateUtils.balanceExpand(table, integerListMap, oldDataNodes, newDataNodes, PartitionByCRC32PreSlot.DEFAULT_SLOTS_NUM);
CuratorTransactionFinal transactionFinal = null;
String taskBase = ZKUtils.getZKBasePath() + "migrate/" + c.getSchema();
String taskPath = taskBase + "/" + taskID;
CuratorFramework client = ZKUtils.getConnection();
// 校验 之前同一个表的迁移任务未完成,则jzhi禁止继续
if (client.checkExists().forPath(taskBase) != null) {
List<String> childTaskList = client.getChildren().forPath(taskBase);
for (String child : childTaskList) {
TaskNode taskNode = JSON.parseObject(ZKUtils.getConnection().getData().forPath(taskBase + "/" + child), TaskNode.class);
if (taskNode.getSchema().equalsIgnoreCase(c.getSchema()) && table.equalsIgnoreCase(taskNode.getTable()) && taskNode.getStatus() < 5) {
writeErrMessage(c, "table: " + table + " previous migrate task is still running,on the same time one table only one task");
return;
}
}
}
client.create().creatingParentsIfNeeded().forPath(taskPath);
TaskNode taskNode = new TaskNode();
taskNode.setSchema(c.getSchema());
taskNode.setSql(stmt);
taskNode.setTable(table);
taskNode.setAdd(add);
taskNode.setStatus(0);
Map<String, Integer> fromNodeSlaveIdMap = new HashMap<>();
List<MigrateTask> allTaskList = new ArrayList<>();
for (Map.Entry<String, List<MigrateTask>> entry : tasks.entrySet()) {
String key = entry.getKey();
List<MigrateTask> value = entry.getValue();
for (MigrateTask migrateTask : value) {
migrateTask.setSchema(c.getSchema());
// 分配slaveid只需要一个dataHost分配一个即可,后续任务执行模拟从节点只需要一个dataHost一个
String dataHost = getDataHostNameFromNode(migrateTask.getFrom());
if (fromNodeSlaveIdMap.containsKey(dataHost)) {
migrateTask.setSlaveId(fromNodeSlaveIdMap.get(dataHost));
} else {
migrateTask.setSlaveId(getSlaveIdFromZKForDataNode(migrateTask.getFrom()));
fromNodeSlaveIdMap.put(dataHost, migrateTask.getSlaveId());
}
}
allTaskList.addAll(value);
}
transactionFinal = client.inTransaction().setData().forPath(taskPath, JSON.toJSONBytes(taskNode)).and();
// 合并成dataHost级别任务
Map<String, List<MigrateTask>> dataHostMigrateMap = mergerTaskForDataHost(allTaskList);
for (Map.Entry<String, List<MigrateTask>> entry : dataHostMigrateMap.entrySet()) {
String key = entry.getKey();
List<MigrateTask> value = entry.getValue();
String path = taskPath + "/" + key;
transactionFinal = transactionFinal.create().forPath(path, JSON.toJSONBytes(value)).and();
}
transactionFinal.commit();
} catch (Exception e) {
LOGGER.error("migrate error", e);
writeErrMessage(c, "migrate error:" + e);
return;
}
writePackToClient(c, taskID);
LOGGER.info("task start", new Date());
}
use of io.mycat.config.model.TableConfig in project Mycat_plus by coderczp.
the class GlobalTableUtil method reGetColumnsForTable.
/**
* 重新获得table 的列list
* @param tableName
*/
private static void reGetColumnsForTable(String tableName) {
MycatConfig config = MycatServer.getInstance().getConfig();
if (globalTableMap != null && globalTableMap.get(tableName.toUpperCase()) != null) {
TableConfig tableConfig = globalTableMap.get(tableName.toUpperCase());
if (// consistencyCheck 在运行中
tableConfig == null || isInnerColumnCheckFinished != 1)
return;
String nodeName = tableConfig.getDataNodes().get(0);
Map<String, PhysicalDBNode> map = config.getDataNodes();
for (String k2 : map.keySet()) {
PhysicalDBNode dBnode = map.get(k2);
if (nodeName.equals(dBnode.getName())) {
PhysicalDBPool pool = dBnode.getDbPool();
List<PhysicalDatasource> dsList = (List<PhysicalDatasource>) pool.genAllDataSources();
for (PhysicalDatasource ds : dsList) {
if (ds instanceof MySQLDataSource) {
MySQLDataSource mds = (MySQLDataSource) dsList.get(0);
MySQLConsistencyChecker checker = new MySQLConsistencyChecker(mds, tableConfig.getName());
checker.checkInnerColumnExist();
// 运行一次就行了,不需要像consistencyCheck那样每个db都运行一次
return;
}
}
}
}
}
}
use of io.mycat.config.model.TableConfig in project Mycat_plus by coderczp.
the class GlobalTableUtil method getGlobalTable.
private static void getGlobalTable() {
MycatConfig config = MycatServer.getInstance().getConfig();
Map<String, SchemaConfig> schemaMap = config.getSchemas();
SchemaConfig schemaMconfig = null;
for (String key : schemaMap.keySet()) {
if (schemaMap.get(key) != null) {
schemaMconfig = schemaMap.get(key);
Map<String, TableConfig> tableMap = schemaMconfig.getTables();
if (tableMap != null) {
for (String k : tableMap.keySet()) {
TableConfig table = tableMap.get(k);
if (table != null && table.isGlobalTable()) {
globalTableMap.put(table.getName().toUpperCase(), table);
}
}
}
}
}
}
use of io.mycat.config.model.TableConfig in project Mycat_plus by coderczp.
the class BatchInsertSequence method route.
@Override
public void route(SystemConfig sysConfig, SchemaConfig schema, int sqlType, String realSQL, String charset, ServerConnection sc, LayerCachePool cachePool) {
int rs = ServerParse.parse(realSQL);
this.sqltype = rs & 0xff;
this.sysConfig = sysConfig;
this.schema = schema;
this.charset = charset;
this.sc = sc;
this.cachePool = cachePool;
try {
MySqlStatementParser parser = new MySqlStatementParser(realSQL);
SQLStatement statement = parser.parseStatement();
MySqlInsertStatement insert = (MySqlInsertStatement) statement;
if (insert.getValuesList() != null) {
String tableName = StringUtil.getTableName(realSQL).toUpperCase();
TableConfig tableConfig = schema.getTables().get(tableName);
// 获得表的主键字段
String primaryKey = tableConfig.getPrimaryKey();
SQLIdentifierExpr sqlIdentifierExpr = new SQLIdentifierExpr();
sqlIdentifierExpr.setName(primaryKey);
insert.getColumns().add(sqlIdentifierExpr);
if (sequenceHandler == null) {
int seqHandlerType = MycatServer.getInstance().getConfig().getSystem().getSequnceHandlerType();
switch(seqHandlerType) {
case SystemConfig.SEQUENCEHANDLER_MYSQLDB:
sequenceHandler = IncrSequenceMySQLHandler.getInstance();
break;
case SystemConfig.SEQUENCEHANDLER_LOCALFILE:
sequenceHandler = IncrSequencePropHandler.getInstance();
break;
case SystemConfig.SEQUENCEHANDLER_LOCAL_TIME:
sequenceHandler = IncrSequenceTimeHandler.getInstance();
break;
case SystemConfig.SEQUENCEHANDLER_ZK_DISTRIBUTED:
sequenceHandler = DistributedSequenceHandler.getInstance(MycatServer.getInstance().getConfig().getSystem());
break;
case SystemConfig.SEQUENCEHANDLER_ZK_GLOBAL_INCREMENT:
sequenceHandler = IncrSequenceZKHandler.getInstance();
break;
default:
throw new java.lang.IllegalArgumentException("Invalid sequnce handler type " + seqHandlerType);
}
}
for (ValuesClause vc : insert.getValuesList()) {
SQLIntegerExpr sqlIntegerExpr = new SQLIntegerExpr();
long value = sequenceHandler.nextId(tableName.toUpperCase());
// 插入生成的sequence值
sqlIntegerExpr.setNumber(value);
vc.addValue(sqlIntegerExpr);
}
String insertSql = insert.toString();
this.executeSql = insertSql;
}
} catch (Exception e) {
LOGGER.error("BatchInsertSequence.route(......)", e);
}
}
Aggregations